Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
   6 */
   7#include "core.h"
   8#include "debug.h"
   9#include "mac.h"
  10#include "hw.h"
  11#include "wmi.h"
  12#include "wmi-ops.h"
  13#include "wmi-tlv.h"
  14#include "p2p.h"
  15#include "testmode.h"
  16#include <linux/bitfield.h>
  17
  18/***************/
  19/* TLV helpers */
  20/**************/
  21
  22struct wmi_tlv_policy {
  23	size_t min_len;
  24};
  25
  26static const struct wmi_tlv_policy wmi_tlv_policies[] = {
  27	[WMI_TLV_TAG_ARRAY_BYTE]
  28		= { .min_len = 0 },
  29	[WMI_TLV_TAG_ARRAY_UINT32]
  30		= { .min_len = 0 },
  31	[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
  32		= { .min_len = sizeof(struct wmi_scan_event) },
  33	[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
  34		= { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
  35	[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
  36		= { .min_len = sizeof(struct wmi_chan_info_event) },
  37	[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
  38		= { .min_len = sizeof(struct wmi_vdev_start_response_event) },
  39	[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
  40		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
  41	[WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
  42		= { .min_len = sizeof(struct wmi_host_swba_event) },
  43	[WMI_TLV_TAG_STRUCT_TIM_INFO]
  44		= { .min_len = sizeof(struct wmi_tim_info) },
  45	[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
  46		= { .min_len = sizeof(struct wmi_p2p_noa_info) },
  47	[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
  48		= { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
  49	[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
  50		= { .min_len = sizeof(struct hal_reg_capabilities) },
  51	[WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
  52		= { .min_len = sizeof(struct wlan_host_mem_req) },
  53	[WMI_TLV_TAG_STRUCT_READY_EVENT]
  54		= { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
  55	[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
  56		= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
  57	[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
  58		= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
  59	[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
  60		= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
  61	[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
  62		= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
  63	[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
  64		= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
  65	[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
  66		= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
  67};
  68
  69static int
  70ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
  71		    int (*iter)(struct ath10k *ar, u16 tag, u16 len,
  72				const void *ptr, void *data),
  73		    void *data)
  74{
  75	const void *begin = ptr;
  76	const struct wmi_tlv *tlv;
  77	u16 tlv_tag, tlv_len;
  78	int ret;
  79
  80	while (len > 0) {
  81		if (len < sizeof(*tlv)) {
  82			ath10k_dbg(ar, ATH10K_DBG_WMI,
  83				   "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
  84				   ptr - begin, len, sizeof(*tlv));
  85			return -EINVAL;
  86		}
  87
  88		tlv = ptr;
  89		tlv_tag = __le16_to_cpu(tlv->tag);
  90		tlv_len = __le16_to_cpu(tlv->len);
  91		ptr += sizeof(*tlv);
  92		len -= sizeof(*tlv);
  93
  94		if (tlv_len > len) {
  95			ath10k_dbg(ar, ATH10K_DBG_WMI,
  96				   "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
  97				   tlv_tag, ptr - begin, len, tlv_len);
  98			return -EINVAL;
  99		}
 100
 101		if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
 102		    wmi_tlv_policies[tlv_tag].min_len &&
 103		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
 104			ath10k_dbg(ar, ATH10K_DBG_WMI,
 105				   "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
 106				   tlv_tag, ptr - begin, tlv_len,
 107				   wmi_tlv_policies[tlv_tag].min_len);
 108			return -EINVAL;
 109		}
 110
 111		ret = iter(ar, tlv_tag, tlv_len, ptr, data);
 112		if (ret)
 113			return ret;
 114
 115		ptr += tlv_len;
 116		len -= tlv_len;
 117	}
 118
 119	return 0;
 120}
 121
 122static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
 123				     const void *ptr, void *data)
 124{
 125	const void **tb = data;
 126
 127	if (tag < WMI_TLV_TAG_MAX)
 128		tb[tag] = ptr;
 129
 130	return 0;
 131}
 132
 133static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
 134				const void *ptr, size_t len)
 135{
 136	return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
 137				   (void *)tb);
 138}
 139
 140static const void **
 141ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
 142			   size_t len, gfp_t gfp)
 143{
 144	const void **tb;
 145	int ret;
 146
 147	tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
 148	if (!tb)
 149		return ERR_PTR(-ENOMEM);
 150
 151	ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
 152	if (ret) {
 153		kfree(tb);
 154		return ERR_PTR(ret);
 155	}
 156
 157	return tb;
 158}
 159
 160static u16 ath10k_wmi_tlv_len(const void *ptr)
 161{
 162	return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
 163}
 164
 165/**************/
 166/* TLV events */
 167/**************/
 168static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
 169					      struct sk_buff *skb)
 170{
 171	const void **tb;
 172	const struct wmi_tlv_bcn_tx_status_ev *ev;
 173	struct ath10k_vif *arvif;
 174	u32 vdev_id, tx_status;
 175	int ret;
 176
 177	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 178	if (IS_ERR(tb)) {
 179		ret = PTR_ERR(tb);
 180		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 181		return ret;
 182	}
 183
 184	ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
 185	if (!ev) {
 186		kfree(tb);
 187		return -EPROTO;
 188	}
 189
 190	tx_status = __le32_to_cpu(ev->tx_status);
 191	vdev_id = __le32_to_cpu(ev->vdev_id);
 192
 193	switch (tx_status) {
 194	case WMI_TLV_BCN_TX_STATUS_OK:
 195		break;
 196	case WMI_TLV_BCN_TX_STATUS_XRETRY:
 197	case WMI_TLV_BCN_TX_STATUS_DROP:
 198	case WMI_TLV_BCN_TX_STATUS_FILTERED:
 199		/* FIXME: It's probably worth telling mac80211 to stop the
 200		 * interface as it is crippled.
 201		 */
 202		ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
 203			    vdev_id, tx_status);
 204		break;
 205	}
 206
 207	arvif = ath10k_get_arvif(ar, vdev_id);
 208	if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
 209		ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
 210
 211	kfree(tb);
 212	return 0;
 213}
 214
 215static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
 216						  struct sk_buff *skb)
 217{
 218	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
 219	complete(&ar->vdev_delete_done);
 220}
 221
 222static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
 223						const void *ptr, void *data)
 224{
 225	const struct wmi_tlv_peer_stats_info *stat = ptr;
 226	struct ieee80211_sta *sta;
 227	struct ath10k_sta *arsta;
 228
 229	if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
 230		return -EPROTO;
 231
 232	ath10k_dbg(ar, ATH10K_DBG_WMI,
 233		   "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
 234		   stat->peer_macaddr.addr,
 235		   __le32_to_cpu(stat->last_rx_rate_code),
 236		   __le32_to_cpu(stat->last_rx_bitrate_kbps));
 237
 238	ath10k_dbg(ar, ATH10K_DBG_WMI,
 239		   "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
 240		   __le32_to_cpu(stat->last_tx_rate_code),
 241		   __le32_to_cpu(stat->last_tx_bitrate_kbps));
 242
 243	rcu_read_lock();
 244	sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
 245	if (!sta) {
 246		rcu_read_unlock();
 247		ath10k_warn(ar, "not found station for peer stats\n");
 248		return -EINVAL;
 249	}
 250
 251	arsta = (struct ath10k_sta *)sta->drv_priv;
 252	arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
 253	arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
 254	arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
 255	arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
 256	rcu_read_unlock();
 257
 258	return 0;
 259}
 260
 261static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
 262						  struct sk_buff *skb)
 263{
 264	const void **tb;
 265	const struct wmi_tlv_peer_stats_info_ev *ev;
 266	const void *data;
 267	u32 num_peer_stats;
 268	int ret;
 269
 270	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 271	if (IS_ERR(tb)) {
 272		ret = PTR_ERR(tb);
 273		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 274		return ret;
 275	}
 276
 277	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
 278	data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
 279
 280	if (!ev || !data) {
 281		kfree(tb);
 282		return -EPROTO;
 283	}
 284
 285	num_peer_stats = __le32_to_cpu(ev->num_peers);
 286
 287	ath10k_dbg(ar, ATH10K_DBG_WMI,
 288		   "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
 289		   __le32_to_cpu(ev->vdev_id),
 290		   num_peer_stats,
 291		   __le32_to_cpu(ev->more_data));
 292
 293	ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
 294				  ath10k_wmi_tlv_parse_peer_stats_info, NULL);
 295	if (ret)
 296		ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
 297
 298	kfree(tb);
 299	return 0;
 300}
 301
 302static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
 303						 struct sk_buff *skb)
 304{
 305	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
 306	ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
 307	complete(&ar->peer_stats_info_complete);
 308}
 309
 310static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
 311					  struct sk_buff *skb)
 312{
 313	const void **tb;
 314	const struct wmi_tlv_diag_data_ev *ev;
 315	const struct wmi_tlv_diag_item *item;
 316	const void *data;
 317	int ret, num_items, len;
 318
 319	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 320	if (IS_ERR(tb)) {
 321		ret = PTR_ERR(tb);
 322		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 323		return ret;
 324	}
 325
 326	ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
 327	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
 328	if (!ev || !data) {
 329		kfree(tb);
 330		return -EPROTO;
 331	}
 332
 333	num_items = __le32_to_cpu(ev->num_items);
 334	len = ath10k_wmi_tlv_len(data);
 335
 336	while (num_items--) {
 337		if (len == 0)
 338			break;
 339		if (len < sizeof(*item)) {
 340			ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
 341			break;
 342		}
 343
 344		item = data;
 345
 346		if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
 347			ath10k_warn(ar, "failed to parse diag data: item is too long\n");
 348			break;
 349		}
 350
 351		trace_ath10k_wmi_diag_container(ar,
 352						item->type,
 353						__le32_to_cpu(item->timestamp),
 354						__le32_to_cpu(item->code),
 355						__le16_to_cpu(item->len),
 356						item->payload);
 357
 358		len -= sizeof(*item);
 359		len -= roundup(__le16_to_cpu(item->len), 4);
 360
 361		data += sizeof(*item);
 362		data += roundup(__le16_to_cpu(item->len), 4);
 363	}
 364
 365	if (num_items != -1 || len != 0)
 366		ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
 367			    num_items, len);
 368
 369	kfree(tb);
 370	return 0;
 371}
 372
 373static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
 374				     struct sk_buff *skb)
 375{
 376	const void **tb;
 377	const void *data;
 378	int ret, len;
 379
 380	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 381	if (IS_ERR(tb)) {
 382		ret = PTR_ERR(tb);
 383		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 384		return ret;
 385	}
 386
 387	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
 388	if (!data) {
 389		kfree(tb);
 390		return -EPROTO;
 391	}
 392	len = ath10k_wmi_tlv_len(data);
 393
 394	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
 395	trace_ath10k_wmi_diag(ar, data, len);
 396
 397	kfree(tb);
 398	return 0;
 399}
 400
 401static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
 402					struct sk_buff *skb)
 403{
 404	const void **tb;
 405	const struct wmi_tlv_p2p_noa_ev *ev;
 406	const struct wmi_p2p_noa_info *noa;
 407	int ret, vdev_id;
 408
 409	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 410	if (IS_ERR(tb)) {
 411		ret = PTR_ERR(tb);
 412		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 413		return ret;
 414	}
 415
 416	ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
 417	noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
 418
 419	if (!ev || !noa) {
 420		kfree(tb);
 421		return -EPROTO;
 422	}
 423
 424	vdev_id = __le32_to_cpu(ev->vdev_id);
 425
 426	ath10k_dbg(ar, ATH10K_DBG_WMI,
 427		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
 428		   vdev_id, noa->num_descriptors);
 429
 430	ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
 431	kfree(tb);
 432	return 0;
 433}
 434
 435static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
 436					 struct sk_buff *skb)
 437{
 438	const void **tb;
 439	const struct wmi_tlv_tx_pause_ev *ev;
 440	int ret, vdev_id;
 441	u32 pause_id, action, vdev_map, peer_id, tid_map;
 442
 443	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 444	if (IS_ERR(tb)) {
 445		ret = PTR_ERR(tb);
 446		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 447		return ret;
 448	}
 449
 450	ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
 451	if (!ev) {
 452		kfree(tb);
 453		return -EPROTO;
 454	}
 455
 456	pause_id = __le32_to_cpu(ev->pause_id);
 457	action = __le32_to_cpu(ev->action);
 458	vdev_map = __le32_to_cpu(ev->vdev_map);
 459	peer_id = __le32_to_cpu(ev->peer_id);
 460	tid_map = __le32_to_cpu(ev->tid_map);
 461
 462	ath10k_dbg(ar, ATH10K_DBG_WMI,
 463		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
 464		   pause_id, action, vdev_map, peer_id, tid_map);
 465
 466	switch (pause_id) {
 467	case WMI_TLV_TX_PAUSE_ID_MCC:
 468	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
 469	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
 470	case WMI_TLV_TX_PAUSE_ID_AP_PS:
 471	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
 472		for (vdev_id = 0; vdev_map; vdev_id++) {
 473			if (!(vdev_map & BIT(vdev_id)))
 474				continue;
 475
 476			vdev_map &= ~BIT(vdev_id);
 477			ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
 478							action);
 479		}
 480		break;
 481	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
 482	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
 483	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
 484	case WMI_TLV_TX_PAUSE_ID_HOST:
 485		ath10k_dbg(ar, ATH10K_DBG_MAC,
 486			   "mac ignoring unsupported tx pause id %d\n",
 487			   pause_id);
 488		break;
 489	default:
 490		ath10k_dbg(ar, ATH10K_DBG_MAC,
 491			   "mac ignoring unknown tx pause vdev %d\n",
 492			   pause_id);
 493		break;
 494	}
 495
 496	kfree(tb);
 497	return 0;
 498}
 499
 500static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
 501						     struct sk_buff *skb)
 502{
 503	const struct wmi_tlv_rfkill_state_change_ev *ev;
 504	const void **tb;
 505	bool radio;
 506	int ret;
 507
 508	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 509	if (IS_ERR(tb)) {
 510		ret = PTR_ERR(tb);
 511		ath10k_warn(ar,
 512			    "failed to parse rfkill state change event: %d\n",
 513			    ret);
 514		return;
 515	}
 516
 517	ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
 518	if (!ev) {
 519		kfree(tb);
 520		return;
 521	}
 522
 523	ath10k_dbg(ar, ATH10K_DBG_MAC,
 524		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
 525		   __le32_to_cpu(ev->gpio_pin_num),
 526		   __le32_to_cpu(ev->int_type),
 527		   __le32_to_cpu(ev->radio_state));
 528
 529	radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
 530
 531	spin_lock_bh(&ar->data_lock);
 532
 533	if (!radio)
 534		ar->hw_rfkill_on = true;
 535
 536	spin_unlock_bh(&ar->data_lock);
 537
 538	/* notify cfg80211 radio state change */
 539	ath10k_mac_rfkill_enable_radio(ar, radio);
 540	wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
 541}
 542
 543static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
 544					    struct sk_buff *skb)
 545{
 546	const struct wmi_tlv_pdev_temperature_event *ev;
 547
 548	ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
 549	if (WARN_ON(skb->len < sizeof(*ev)))
 550		return -EPROTO;
 551
 552	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
 553	return 0;
 554}
 555
 556static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
 557{
 558	struct ieee80211_sta *station;
 559	const struct wmi_tlv_tdls_peer_event *ev;
 560	const void **tb;
 561	struct ath10k_vif *arvif;
 562
 563	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 564	if (IS_ERR(tb)) {
 565		ath10k_warn(ar, "tdls peer failed to parse tlv");
 566		return;
 567	}
 568	ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
 569	if (!ev) {
 570		kfree(tb);
 571		ath10k_warn(ar, "tdls peer NULL event");
 572		return;
 573	}
 574
 575	switch (__le32_to_cpu(ev->peer_reason)) {
 576	case WMI_TDLS_TEARDOWN_REASON_TX:
 577	case WMI_TDLS_TEARDOWN_REASON_RSSI:
 578	case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
 579		rcu_read_lock();
 580		station = ieee80211_find_sta_by_ifaddr(ar->hw,
 581						       ev->peer_macaddr.addr,
 582						       NULL);
 583		if (!station) {
 584			ath10k_warn(ar, "did not find station from tdls peer event");
 585			goto exit;
 586		}
 587
 588		arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
 589		if (!arvif) {
 590			ath10k_warn(ar, "no vif for vdev_id %d found",
 591				    __le32_to_cpu(ev->vdev_id));
 592			goto exit;
 593		}
 594
 595		ieee80211_tdls_oper_request(
 596					arvif->vif, station->addr,
 597					NL80211_TDLS_TEARDOWN,
 598					WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
 599					GFP_ATOMIC
 600					);
 601		break;
 602	default:
 603		kfree(tb);
 604		return;
 605	}
 606
 607exit:
 608	rcu_read_unlock();
 609	kfree(tb);
 610}
 611
 612static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
 613						 struct sk_buff *skb)
 614{
 615	struct wmi_peer_delete_resp_ev_arg *arg;
 616	struct wmi_tlv *tlv_hdr;
 617
 618	tlv_hdr = (struct wmi_tlv *)skb->data;
 619	arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value;
 620
 621	ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id);
 622	ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr);
 623	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n");
 624
 625	complete(&ar->peer_delete_done);
 626
 627	return 0;
 628}
 629
 630/***********/
 631/* TLV ops */
 632/***********/
 633
 634static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
 635{
 636	struct wmi_cmd_hdr *cmd_hdr;
 637	enum wmi_tlv_event_id id;
 638	bool consumed;
 639
 640	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 641	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 642
 643	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
 644		goto out;
 645
 646	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 647
 648	consumed = ath10k_tm_event_wmi(ar, id, skb);
 649
 650	/* Ready event must be handled normally also in UTF mode so that we
 651	 * know the UTF firmware has booted, others we are just bypass WMI
 652	 * events to testmode.
 653	 */
 654	if (consumed && id != WMI_TLV_READY_EVENTID) {
 655		ath10k_dbg(ar, ATH10K_DBG_WMI,
 656			   "wmi tlv testmode consumed 0x%x\n", id);
 657		goto out;
 658	}
 659
 660	switch (id) {
 661	case WMI_TLV_MGMT_RX_EVENTID:
 662		ath10k_wmi_event_mgmt_rx(ar, skb);
 663		/* mgmt_rx() owns the skb now! */
 664		return;
 665	case WMI_TLV_SCAN_EVENTID:
 666		ath10k_wmi_event_scan(ar, skb);
 667		break;
 668	case WMI_TLV_CHAN_INFO_EVENTID:
 669		ath10k_wmi_event_chan_info(ar, skb);
 670		break;
 671	case WMI_TLV_ECHO_EVENTID:
 672		ath10k_wmi_event_echo(ar, skb);
 673		break;
 674	case WMI_TLV_DEBUG_MESG_EVENTID:
 675		ath10k_wmi_event_debug_mesg(ar, skb);
 676		break;
 677	case WMI_TLV_UPDATE_STATS_EVENTID:
 678		ath10k_wmi_event_update_stats(ar, skb);
 679		break;
 680	case WMI_TLV_PEER_STATS_INFO_EVENTID:
 681		ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
 682		break;
 683	case WMI_TLV_VDEV_START_RESP_EVENTID:
 684		ath10k_wmi_event_vdev_start_resp(ar, skb);
 685		break;
 686	case WMI_TLV_VDEV_STOPPED_EVENTID:
 687		ath10k_wmi_event_vdev_stopped(ar, skb);
 688		break;
 689	case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
 690		ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
 691		break;
 692	case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
 693		ath10k_wmi_event_peer_sta_kickout(ar, skb);
 694		break;
 695	case WMI_TLV_HOST_SWBA_EVENTID:
 696		ath10k_wmi_event_host_swba(ar, skb);
 697		break;
 698	case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
 699		ath10k_wmi_event_tbttoffset_update(ar, skb);
 700		break;
 701	case WMI_TLV_PHYERR_EVENTID:
 702		ath10k_wmi_event_phyerr(ar, skb);
 703		break;
 704	case WMI_TLV_ROAM_EVENTID:
 705		ath10k_wmi_event_roam(ar, skb);
 706		break;
 707	case WMI_TLV_PROFILE_MATCH:
 708		ath10k_wmi_event_profile_match(ar, skb);
 709		break;
 710	case WMI_TLV_DEBUG_PRINT_EVENTID:
 711		ath10k_wmi_event_debug_print(ar, skb);
 712		break;
 713	case WMI_TLV_PDEV_QVIT_EVENTID:
 714		ath10k_wmi_event_pdev_qvit(ar, skb);
 715		break;
 716	case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
 717		ath10k_wmi_event_wlan_profile_data(ar, skb);
 718		break;
 719	case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
 720		ath10k_wmi_event_rtt_measurement_report(ar, skb);
 721		break;
 722	case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
 723		ath10k_wmi_event_tsf_measurement_report(ar, skb);
 724		break;
 725	case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
 726		ath10k_wmi_event_rtt_error_report(ar, skb);
 727		break;
 728	case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
 729		ath10k_wmi_event_wow_wakeup_host(ar, skb);
 730		break;
 731	case WMI_TLV_DCS_INTERFERENCE_EVENTID:
 732		ath10k_wmi_event_dcs_interference(ar, skb);
 733		break;
 734	case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
 735		ath10k_wmi_event_pdev_tpc_config(ar, skb);
 736		break;
 737	case WMI_TLV_PDEV_FTM_INTG_EVENTID:
 738		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
 739		break;
 740	case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
 741		ath10k_wmi_event_gtk_offload_status(ar, skb);
 742		break;
 743	case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
 744		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
 745		break;
 746	case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
 747		ath10k_wmi_event_delba_complete(ar, skb);
 748		break;
 749	case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
 750		ath10k_wmi_event_addba_complete(ar, skb);
 751		break;
 752	case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
 753		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
 754		break;
 755	case WMI_TLV_SERVICE_READY_EVENTID:
 756		ath10k_wmi_event_service_ready(ar, skb);
 757		return;
 758	case WMI_TLV_READY_EVENTID:
 759		ath10k_wmi_event_ready(ar, skb);
 760		break;
 761	case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
 762		ath10k_wmi_event_service_available(ar, skb);
 763		break;
 764	case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
 765		ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
 766		break;
 767	case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
 768		ath10k_wmi_tlv_event_diag_data(ar, skb);
 769		break;
 770	case WMI_TLV_DIAG_EVENTID:
 771		ath10k_wmi_tlv_event_diag(ar, skb);
 772		break;
 773	case WMI_TLV_P2P_NOA_EVENTID:
 774		ath10k_wmi_tlv_event_p2p_noa(ar, skb);
 775		break;
 776	case WMI_TLV_TX_PAUSE_EVENTID:
 777		ath10k_wmi_tlv_event_tx_pause(ar, skb);
 778		break;
 779	case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
 780		ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
 781		break;
 782	case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
 783		ath10k_wmi_tlv_event_temperature(ar, skb);
 784		break;
 785	case WMI_TLV_TDLS_PEER_EVENTID:
 786		ath10k_wmi_event_tdls_peer(ar, skb);
 787		break;
 788	case WMI_TLV_PEER_DELETE_RESP_EVENTID:
 789		ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
 790		break;
 791	case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
 792		ath10k_wmi_event_mgmt_tx_compl(ar, skb);
 793		break;
 794	case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID:
 795		ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
 796		break;
 797	default:
 798		ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
 799		break;
 800	}
 801
 802out:
 803	dev_kfree_skb(skb);
 804}
 805
 806static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
 807					  struct sk_buff *skb,
 808					  struct wmi_scan_ev_arg *arg)
 809{
 810	const void **tb;
 811	const struct wmi_scan_event *ev;
 812	int ret;
 813
 814	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 815	if (IS_ERR(tb)) {
 816		ret = PTR_ERR(tb);
 817		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 818		return ret;
 819	}
 820
 821	ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
 822	if (!ev) {
 823		kfree(tb);
 824		return -EPROTO;
 825	}
 826
 827	arg->event_type = ev->event_type;
 828	arg->reason = ev->reason;
 829	arg->channel_freq = ev->channel_freq;
 830	arg->scan_req_id = ev->scan_req_id;
 831	arg->scan_id = ev->scan_id;
 832	arg->vdev_id = ev->vdev_id;
 833
 834	kfree(tb);
 835	return 0;
 836}
 837
 838static int
 839ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
 840					struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
 841{
 842	const void **tb;
 843	const struct wmi_tlv_mgmt_tx_compl_ev *ev;
 844	int ret;
 845
 846	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 847	if (IS_ERR(tb)) {
 848		ret = PTR_ERR(tb);
 849		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 850		return ret;
 851	}
 852
 853	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
 854
 855	arg->desc_id = ev->desc_id;
 856	arg->status = ev->status;
 857	arg->pdev_id = ev->pdev_id;
 858	arg->ppdu_id = ev->ppdu_id;
 859
 860	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
 861		arg->ack_rssi = ev->ack_rssi;
 862
 863	kfree(tb);
 864	return 0;
 865}
 866
 867struct wmi_tlv_tx_bundle_compl_parse {
 868	const __le32 *num_reports;
 869	const __le32 *desc_ids;
 870	const __le32 *status;
 871	const __le32 *ppdu_ids;
 872	const __le32 *ack_rssi;
 873	bool desc_ids_done;
 874	bool status_done;
 875	bool ppdu_ids_done;
 876	bool ack_rssi_done;
 877};
 878
 879static int
 880ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
 881					  const void *ptr, void *data)
 882{
 883	struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data;
 884
 885	switch (tag) {
 886	case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT:
 887		bundle_tx_compl->num_reports = ptr;
 888		break;
 889	case WMI_TLV_TAG_ARRAY_UINT32:
 890		if (!bundle_tx_compl->desc_ids_done) {
 891			bundle_tx_compl->desc_ids_done = true;
 892			bundle_tx_compl->desc_ids = ptr;
 893		} else if (!bundle_tx_compl->status_done) {
 894			bundle_tx_compl->status_done = true;
 895			bundle_tx_compl->status = ptr;
 896		} else if (!bundle_tx_compl->ppdu_ids_done) {
 897			bundle_tx_compl->ppdu_ids_done = true;
 898			bundle_tx_compl->ppdu_ids = ptr;
 899		} else if (!bundle_tx_compl->ack_rssi_done) {
 900			bundle_tx_compl->ack_rssi_done = true;
 901			bundle_tx_compl->ack_rssi = ptr;
 902		}
 903		break;
 904	default:
 905		break;
 906	}
 907	return 0;
 908}
 909
 910static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
 911				struct ath10k *ar, struct sk_buff *skb,
 912				struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
 913{
 914	struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { };
 915	int ret;
 916
 917	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
 918				  ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse,
 919				  &bundle_tx_compl);
 920	if (ret) {
 921		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 922		return ret;
 923	}
 924
 925	if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids ||
 926	    !bundle_tx_compl.status)
 927		return -EPROTO;
 928
 929	arg->num_reports = *bundle_tx_compl.num_reports;
 930	arg->desc_ids = bundle_tx_compl.desc_ids;
 931	arg->status = bundle_tx_compl.status;
 932	arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
 933
 934	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
 935		arg->ack_rssi = bundle_tx_compl.ack_rssi;
 936
 937	return 0;
 938}
 939
 940static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
 941					     struct sk_buff *skb,
 942					     struct wmi_mgmt_rx_ev_arg *arg)
 943{
 944	const void **tb;
 945	const struct wmi_tlv_mgmt_rx_ev *ev;
 946	const u8 *frame;
 947	u32 msdu_len;
 948	int ret, i;
 949
 950	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 951	if (IS_ERR(tb)) {
 952		ret = PTR_ERR(tb);
 953		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 954		return ret;
 955	}
 956
 957	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
 958	frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
 959
 960	if (!ev || !frame) {
 961		kfree(tb);
 962		return -EPROTO;
 963	}
 964
 965	arg->channel = ev->channel;
 966	arg->buf_len = ev->buf_len;
 967	arg->status = ev->status;
 968	arg->snr = ev->snr;
 969	arg->phy_mode = ev->phy_mode;
 970	arg->rate = ev->rate;
 971
 972	for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
 973		arg->rssi[i] = ev->rssi[i];
 974
 975	msdu_len = __le32_to_cpu(arg->buf_len);
 976
 977	if (skb->len < (frame - skb->data) + msdu_len) {
 978		kfree(tb);
 979		return -EPROTO;
 980	}
 981
 982	/* shift the sk_buff to point to `frame` */
 983	skb_trim(skb, 0);
 984	skb_put(skb, frame - skb->data);
 985	skb_pull(skb, frame - skb->data);
 986	skb_put(skb, msdu_len);
 987
 988	kfree(tb);
 989	return 0;
 990}
 991
 992static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
 993					     struct sk_buff *skb,
 994					     struct wmi_ch_info_ev_arg *arg)
 995{
 996	const void **tb;
 997	const struct wmi_tlv_chan_info_event *ev;
 998	int ret;
 999
1000	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1001	if (IS_ERR(tb)) {
1002		ret = PTR_ERR(tb);
1003		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1004		return ret;
1005	}
1006
1007	ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
1008	if (!ev) {
1009		kfree(tb);
1010		return -EPROTO;
1011	}
1012
1013	arg->err_code = ev->err_code;
1014	arg->freq = ev->freq;
1015	arg->cmd_flags = ev->cmd_flags;
1016	arg->noise_floor = ev->noise_floor;
1017	arg->rx_clear_count = ev->rx_clear_count;
1018	arg->cycle_count = ev->cycle_count;
1019	if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
1020		     ar->running_fw->fw_file.fw_features))
1021		arg->mac_clk_mhz = ev->mac_clk_mhz;
1022
1023	kfree(tb);
1024	return 0;
1025}
1026
1027static int
1028ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
1029				     struct wmi_vdev_start_ev_arg *arg)
1030{
1031	const void **tb;
1032	const struct wmi_vdev_start_response_event *ev;
1033	int ret;
1034
1035	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1036	if (IS_ERR(tb)) {
1037		ret = PTR_ERR(tb);
1038		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1039		return ret;
1040	}
1041
1042	ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
1043	if (!ev) {
1044		kfree(tb);
1045		return -EPROTO;
1046	}
1047
1048	skb_pull(skb, sizeof(*ev));
1049	arg->vdev_id = ev->vdev_id;
1050	arg->req_id = ev->req_id;
1051	arg->resp_type = ev->resp_type;
1052	arg->status = ev->status;
1053
1054	kfree(tb);
1055	return 0;
1056}
1057
1058static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
1059					       struct sk_buff *skb,
1060					       struct wmi_peer_kick_ev_arg *arg)
1061{
1062	const void **tb;
1063	const struct wmi_peer_sta_kickout_event *ev;
1064	int ret;
1065
1066	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1067	if (IS_ERR(tb)) {
1068		ret = PTR_ERR(tb);
1069		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1070		return ret;
1071	}
1072
1073	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
1074	if (!ev) {
1075		kfree(tb);
1076		return -EPROTO;
1077	}
1078
1079	arg->mac_addr = ev->peer_macaddr.addr;
1080
1081	kfree(tb);
1082	return 0;
1083}
1084
1085struct wmi_tlv_swba_parse {
1086	const struct wmi_host_swba_event *ev;
1087	bool tim_done;
1088	bool noa_done;
1089	size_t n_tim;
1090	size_t n_noa;
1091	struct wmi_swba_ev_arg *arg;
1092};
1093
1094static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
1095					 const void *ptr, void *data)
1096{
1097	struct wmi_tlv_swba_parse *swba = data;
1098	struct wmi_tim_info_arg *tim_info_arg;
1099	const struct wmi_tim_info *tim_info_ev = ptr;
1100
1101	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
1102		return -EPROTO;
1103
1104	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
1105		return -ENOBUFS;
1106
1107	if (__le32_to_cpu(tim_info_ev->tim_len) >
1108	     sizeof(tim_info_ev->tim_bitmap)) {
1109		ath10k_warn(ar, "refusing to parse invalid swba structure\n");
1110		return -EPROTO;
1111	}
1112
1113	tim_info_arg = &swba->arg->tim_info[swba->n_tim];
1114	tim_info_arg->tim_len = tim_info_ev->tim_len;
1115	tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
1116	tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
1117	tim_info_arg->tim_changed = tim_info_ev->tim_changed;
1118	tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
1119
1120	swba->n_tim++;
1121
1122	return 0;
1123}
1124
1125static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
1126					 const void *ptr, void *data)
1127{
1128	struct wmi_tlv_swba_parse *swba = data;
1129
1130	if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
1131		return -EPROTO;
1132
1133	if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
1134		return -ENOBUFS;
1135
1136	swba->arg->noa_info[swba->n_noa++] = ptr;
1137	return 0;
1138}
1139
1140static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
1141				     const void *ptr, void *data)
1142{
1143	struct wmi_tlv_swba_parse *swba = data;
1144	int ret;
1145
1146	switch (tag) {
1147	case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
1148		swba->ev = ptr;
1149		break;
1150	case WMI_TLV_TAG_ARRAY_STRUCT:
1151		if (!swba->tim_done) {
1152			swba->tim_done = true;
1153			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1154						  ath10k_wmi_tlv_swba_tim_parse,
1155						  swba);
1156			if (ret)
1157				return ret;
1158		} else if (!swba->noa_done) {
1159			swba->noa_done = true;
1160			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1161						  ath10k_wmi_tlv_swba_noa_parse,
1162						  swba);
1163			if (ret)
1164				return ret;
1165		}
1166		break;
1167	default:
1168		break;
1169	}
1170	return 0;
1171}
1172
1173static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
1174					  struct sk_buff *skb,
1175					  struct wmi_swba_ev_arg *arg)
1176{
1177	struct wmi_tlv_swba_parse swba = { .arg = arg };
1178	u32 map;
1179	size_t n_vdevs;
1180	int ret;
1181
1182	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1183				  ath10k_wmi_tlv_swba_parse, &swba);
1184	if (ret) {
1185		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1186		return ret;
1187	}
1188
1189	if (!swba.ev)
1190		return -EPROTO;
1191
1192	arg->vdev_map = swba.ev->vdev_map;
1193
1194	for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
1195		if (map & BIT(0))
1196			n_vdevs++;
1197
1198	if (n_vdevs != swba.n_tim ||
1199	    n_vdevs != swba.n_noa)
1200		return -EPROTO;
1201
1202	return 0;
1203}
1204
1205static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
1206						struct sk_buff *skb,
1207						struct wmi_phyerr_hdr_arg *arg)
1208{
1209	const void **tb;
1210	const struct wmi_tlv_phyerr_ev *ev;
1211	const void *phyerrs;
1212	int ret;
1213
1214	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1215	if (IS_ERR(tb)) {
1216		ret = PTR_ERR(tb);
1217		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1218		return ret;
1219	}
1220
1221	ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
1222	phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
1223
1224	if (!ev || !phyerrs) {
1225		kfree(tb);
1226		return -EPROTO;
1227	}
1228
1229	arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
1230	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
1231	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
1232	arg->buf_len = __le32_to_cpu(ev->buf_len);
1233	arg->phyerrs = phyerrs;
1234
1235	kfree(tb);
1236	return 0;
1237}
1238
1239#define WMI_TLV_ABI_VER_NS0 0x5F414351
1240#define WMI_TLV_ABI_VER_NS1 0x00004C4D
1241#define WMI_TLV_ABI_VER_NS2 0x00000000
1242#define WMI_TLV_ABI_VER_NS3 0x00000000
1243
1244#define WMI_TLV_ABI_VER0_MAJOR 1
1245#define WMI_TLV_ABI_VER0_MINOR 0
1246#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
1247			  (((WMI_TLV_ABI_VER0_MINOR) <<  0) & 0x00FFFFFF))
1248#define WMI_TLV_ABI_VER1 53
1249
1250static int
1251ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
1252			      const void *ptr, void *data)
1253{
1254	struct wmi_svc_rdy_ev_arg *arg = data;
1255	int i;
1256
1257	if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
1258		return -EPROTO;
1259
1260	for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
1261		if (!arg->mem_reqs[i]) {
1262			arg->mem_reqs[i] = ptr;
1263			return 0;
1264		}
1265	}
1266
1267	return -ENOMEM;
1268}
1269
1270struct wmi_tlv_svc_rdy_parse {
1271	const struct hal_reg_capabilities *reg;
1272	const struct wmi_tlv_svc_rdy_ev *ev;
1273	const __le32 *svc_bmap;
1274	const struct wlan_host_mem_req *mem_reqs;
1275	bool svc_bmap_done;
1276	bool dbs_hw_mode_done;
1277};
1278
1279static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
1280					const void *ptr, void *data)
1281{
1282	struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
1283
1284	switch (tag) {
1285	case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
1286		svc_rdy->ev = ptr;
1287		break;
1288	case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
1289		svc_rdy->reg = ptr;
1290		break;
1291	case WMI_TLV_TAG_ARRAY_STRUCT:
1292		svc_rdy->mem_reqs = ptr;
1293		break;
1294	case WMI_TLV_TAG_ARRAY_UINT32:
1295		if (!svc_rdy->svc_bmap_done) {
1296			svc_rdy->svc_bmap_done = true;
1297			svc_rdy->svc_bmap = ptr;
1298		} else if (!svc_rdy->dbs_hw_mode_done) {
1299			svc_rdy->dbs_hw_mode_done = true;
1300		}
1301		break;
1302	default:
1303		break;
1304	}
1305	return 0;
1306}
1307
1308static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
1309					     struct sk_buff *skb,
1310					     struct wmi_svc_rdy_ev_arg *arg)
1311{
1312	const struct hal_reg_capabilities *reg;
1313	const struct wmi_tlv_svc_rdy_ev *ev;
1314	const __le32 *svc_bmap;
1315	const struct wlan_host_mem_req *mem_reqs;
1316	struct wmi_tlv_svc_rdy_parse svc_rdy = { };
1317	int ret;
1318
1319	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1320				  ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
1321	if (ret) {
1322		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1323		return ret;
1324	}
1325
1326	ev = svc_rdy.ev;
1327	reg = svc_rdy.reg;
1328	svc_bmap = svc_rdy.svc_bmap;
1329	mem_reqs = svc_rdy.mem_reqs;
1330
1331	if (!ev || !reg || !svc_bmap || !mem_reqs)
1332		return -EPROTO;
1333
1334	/* This is an internal ABI compatibility check for WMI TLV so check it
1335	 * here instead of the generic WMI code.
1336	 */
1337	ath10k_dbg(ar, ATH10K_DBG_WMI,
1338		   "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
1339		   __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
1340		   __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
1341		   __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
1342		   __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
1343		   __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
1344
1345	if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
1346	    __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
1347	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
1348	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
1349	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
1350		return -ENOTSUPP;
1351	}
1352
1353	arg->min_tx_power = ev->hw_min_tx_power;
1354	arg->max_tx_power = ev->hw_max_tx_power;
1355	arg->ht_cap = ev->ht_cap_info;
1356	arg->vht_cap = ev->vht_cap_info;
1357	arg->vht_supp_mcs = ev->vht_supp_mcs;
1358	arg->sw_ver0 = ev->abi.abi_ver0;
1359	arg->sw_ver1 = ev->abi.abi_ver1;
1360	arg->fw_build = ev->fw_build_vers;
1361	arg->phy_capab = ev->phy_capability;
1362	arg->num_rf_chains = ev->num_rf_chains;
1363	arg->eeprom_rd = reg->eeprom_rd;
1364	arg->low_2ghz_chan = reg->low_2ghz_chan;
1365	arg->high_2ghz_chan = reg->high_2ghz_chan;
1366	arg->low_5ghz_chan = reg->low_5ghz_chan;
1367	arg->high_5ghz_chan = reg->high_5ghz_chan;
1368	arg->num_mem_reqs = ev->num_mem_reqs;
1369	arg->service_map = svc_bmap;
1370	arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
1371	arg->sys_cap_info = ev->sys_cap_info;
1372
1373	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
1374				  ath10k_wmi_tlv_parse_mem_reqs, arg);
1375	if (ret) {
1376		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
1377		return ret;
1378	}
1379
1380	return 0;
1381}
1382
1383static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
1384					 struct sk_buff *skb,
1385					 struct wmi_rdy_ev_arg *arg)
1386{
1387	const void **tb;
1388	const struct wmi_tlv_rdy_ev *ev;
1389	int ret;
1390
1391	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1392	if (IS_ERR(tb)) {
1393		ret = PTR_ERR(tb);
1394		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1395		return ret;
1396	}
1397
1398	ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
1399	if (!ev) {
1400		kfree(tb);
1401		return -EPROTO;
1402	}
1403
1404	arg->sw_version = ev->abi.abi_ver0;
1405	arg->abi_version = ev->abi.abi_ver1;
1406	arg->status = ev->status;
1407	arg->mac_addr = ev->mac_addr.addr;
1408
1409	kfree(tb);
1410	return 0;
1411}
1412
1413static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
1414					  const void *ptr, void *data)
1415{
1416	struct wmi_svc_avail_ev_arg *arg = data;
1417
1418	switch (tag) {
1419	case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
1420		arg->service_map_ext_valid = true;
1421		arg->service_map_ext_len = *(__le32 *)ptr;
1422		arg->service_map_ext = ptr + sizeof(__le32);
1423		return 0;
1424	default:
1425		break;
1426	}
1427
1428	return 0;
1429}
1430
1431static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
1432					    struct sk_buff *skb,
1433					    struct wmi_svc_avail_ev_arg *arg)
1434{
1435	int ret;
1436
1437	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1438				  ath10k_wmi_tlv_svc_avail_parse, arg);
1439
1440	if (ret) {
1441		ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
1442		return ret;
1443	}
1444
1445	return 0;
1446}
1447
1448static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
1449					   struct ath10k_fw_stats_vdev *dst)
1450{
1451	int i;
1452
1453	dst->vdev_id = __le32_to_cpu(src->vdev_id);
1454	dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
1455	dst->data_snr = __le32_to_cpu(src->data_snr);
1456	dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
1457	dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
1458	dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
1459	dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
1460	dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
1461	dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
1462
1463	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
1464		dst->num_tx_frames[i] =
1465			__le32_to_cpu(src->num_tx_frames[i]);
1466
1467	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
1468		dst->num_tx_frames_retries[i] =
1469			__le32_to_cpu(src->num_tx_frames_retries[i]);
1470
1471	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
1472		dst->num_tx_frames_failures[i] =
1473			__le32_to_cpu(src->num_tx_frames_failures[i]);
1474
1475	for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
1476		dst->tx_rate_history[i] =
1477			__le32_to_cpu(src->tx_rate_history[i]);
1478
1479	for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
1480		dst->beacon_rssi_history[i] =
1481			__le32_to_cpu(src->beacon_rssi_history[i]);
1482}
1483
1484static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
1485					   struct sk_buff *skb,
1486					   struct ath10k_fw_stats *stats)
1487{
1488	const void **tb;
1489	const struct wmi_tlv_stats_ev *ev;
1490	u32 num_peer_stats_extd;
1491	const void *data;
1492	u32 num_pdev_stats;
1493	u32 num_vdev_stats;
1494	u32 num_peer_stats;
1495	u32 num_bcnflt_stats;
1496	u32 num_chan_stats;
1497	size_t data_len;
1498	u32 stats_id;
1499	int ret;
1500	int i;
1501
1502	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1503	if (IS_ERR(tb)) {
1504		ret = PTR_ERR(tb);
1505		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1506		return ret;
1507	}
1508
1509	ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
1510	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
1511
1512	if (!ev || !data) {
1513		kfree(tb);
1514		return -EPROTO;
1515	}
1516
1517	data_len = ath10k_wmi_tlv_len(data);
1518	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1519	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1520	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1521	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
1522	num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
1523	stats_id = __le32_to_cpu(ev->stats_id);
1524	num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
1525
1526	ath10k_dbg(ar, ATH10K_DBG_WMI,
1527		   "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
1528		   num_pdev_stats, num_vdev_stats, num_peer_stats,
1529		   num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
1530
1531	for (i = 0; i < num_pdev_stats; i++) {
1532		const struct wmi_pdev_stats *src;
1533		struct ath10k_fw_stats_pdev *dst;
1534
1535		src = data;
1536		if (data_len < sizeof(*src)) {
1537			kfree(tb);
1538			return -EPROTO;
1539		}
1540
1541		data += sizeof(*src);
1542		data_len -= sizeof(*src);
1543
1544		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1545		if (!dst)
1546			continue;
1547
1548		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1549		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1550		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1551		list_add_tail(&dst->list, &stats->pdevs);
1552	}
1553
1554	for (i = 0; i < num_vdev_stats; i++) {
1555		const struct wmi_tlv_vdev_stats *src;
1556		struct ath10k_fw_stats_vdev *dst;
1557
1558		src = data;
1559		if (data_len < sizeof(*src)) {
1560			kfree(tb);
1561			return -EPROTO;
1562		}
1563
1564		data += sizeof(*src);
1565		data_len -= sizeof(*src);
1566
1567		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1568		if (!dst)
1569			continue;
1570
1571		ath10k_wmi_tlv_pull_vdev_stats(src, dst);
1572		list_add_tail(&dst->list, &stats->vdevs);
1573	}
1574
1575	for (i = 0; i < num_peer_stats; i++) {
1576		const struct wmi_10x_peer_stats *src;
1577		struct ath10k_fw_stats_peer *dst;
1578
1579		src = data;
1580		if (data_len < sizeof(*src)) {
1581			kfree(tb);
1582			return -EPROTO;
1583		}
1584
1585		data += sizeof(*src);
1586		data_len -= sizeof(*src);
1587
1588		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1589		if (!dst)
1590			continue;
1591
1592		ath10k_wmi_pull_peer_stats(&src->old, dst);
1593		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1594
1595		if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
1596			const struct wmi_tlv_peer_stats_extd *extd;
1597			unsigned long rx_duration_high;
1598
1599			extd = data + sizeof(*src) * (num_peer_stats - i - 1)
1600			       + sizeof(*extd) * i;
1601
1602			dst->rx_duration = __le32_to_cpu(extd->rx_duration);
1603			rx_duration_high = __le32_to_cpu
1604						(extd->rx_duration_high);
1605
1606			if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
1607				     &rx_duration_high)) {
1608				rx_duration_high =
1609					FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
1610						  rx_duration_high);
1611				dst->rx_duration |= (u64)rx_duration_high <<
1612						    WMI_TLV_PEER_RX_DURATION_SHIFT;
1613			}
1614		}
1615
1616		list_add_tail(&dst->list, &stats->peers);
1617	}
1618
1619	kfree(tb);
1620	return 0;
1621}
1622
1623static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
1624					  struct sk_buff *skb,
1625					  struct wmi_roam_ev_arg *arg)
1626{
1627	const void **tb;
1628	const struct wmi_tlv_roam_ev *ev;
1629	int ret;
1630
1631	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1632	if (IS_ERR(tb)) {
1633		ret = PTR_ERR(tb);
1634		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1635		return ret;
1636	}
1637
1638	ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
1639	if (!ev) {
1640		kfree(tb);
1641		return -EPROTO;
1642	}
1643
1644	arg->vdev_id = ev->vdev_id;
1645	arg->reason = ev->reason;
1646	arg->rssi = ev->rssi;
1647
1648	kfree(tb);
1649	return 0;
1650}
1651
1652static int
1653ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
1654			      struct wmi_wow_ev_arg *arg)
1655{
1656	const void **tb;
1657	const struct wmi_tlv_wow_event_info *ev;
1658	int ret;
1659
1660	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1661	if (IS_ERR(tb)) {
1662		ret = PTR_ERR(tb);
1663		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1664		return ret;
1665	}
1666
1667	ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
1668	if (!ev) {
1669		kfree(tb);
1670		return -EPROTO;
1671	}
1672
1673	arg->vdev_id = __le32_to_cpu(ev->vdev_id);
1674	arg->flag = __le32_to_cpu(ev->flag);
1675	arg->wake_reason = __le32_to_cpu(ev->wake_reason);
1676	arg->data_len = __le32_to_cpu(ev->data_len);
1677
1678	kfree(tb);
1679	return 0;
1680}
1681
1682static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
1683					  struct sk_buff *skb,
1684					  struct wmi_echo_ev_arg *arg)
1685{
1686	const void **tb;
1687	const struct wmi_echo_event *ev;
1688	int ret;
1689
1690	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1691	if (IS_ERR(tb)) {
1692		ret = PTR_ERR(tb);
1693		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1694		return ret;
1695	}
1696
1697	ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
1698	if (!ev) {
1699		kfree(tb);
1700		return -EPROTO;
1701	}
1702
1703	arg->value = ev->value;
1704
1705	kfree(tb);
1706	return 0;
1707}
1708
1709static struct sk_buff *
1710ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
1711{
1712	struct wmi_tlv_pdev_suspend *cmd;
1713	struct wmi_tlv *tlv;
1714	struct sk_buff *skb;
1715
1716	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1717	if (!skb)
1718		return ERR_PTR(-ENOMEM);
1719
1720	tlv = (void *)skb->data;
1721	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
1722	tlv->len = __cpu_to_le16(sizeof(*cmd));
1723	cmd = (void *)tlv->value;
1724	cmd->opt = __cpu_to_le32(opt);
1725
1726	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
1727	return skb;
1728}
1729
1730static struct sk_buff *
1731ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
1732{
1733	struct wmi_tlv_resume_cmd *cmd;
1734	struct wmi_tlv *tlv;
1735	struct sk_buff *skb;
1736
1737	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1738	if (!skb)
1739		return ERR_PTR(-ENOMEM);
1740
1741	tlv = (void *)skb->data;
1742	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
1743	tlv->len = __cpu_to_le16(sizeof(*cmd));
1744	cmd = (void *)tlv->value;
1745	cmd->reserved = __cpu_to_le32(0);
1746
1747	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
1748	return skb;
1749}
1750
1751static struct sk_buff *
1752ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
1753				  u16 rd, u16 rd2g, u16 rd5g,
1754				  u16 ctl2g, u16 ctl5g,
1755				  enum wmi_dfs_region dfs_reg)
1756{
1757	struct wmi_tlv_pdev_set_rd_cmd *cmd;
1758	struct wmi_tlv *tlv;
1759	struct sk_buff *skb;
1760
1761	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1762	if (!skb)
1763		return ERR_PTR(-ENOMEM);
1764
1765	tlv = (void *)skb->data;
1766	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
1767	tlv->len = __cpu_to_le16(sizeof(*cmd));
1768	cmd = (void *)tlv->value;
1769	cmd->regd = __cpu_to_le32(rd);
1770	cmd->regd_2ghz = __cpu_to_le32(rd2g);
1771	cmd->regd_5ghz = __cpu_to_le32(rd5g);
1772	cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
1773	cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
1774
1775	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
1776	return skb;
1777}
1778
1779static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
1780{
1781	return WMI_TXBF_CONF_AFTER_ASSOC;
1782}
1783
1784static struct sk_buff *
1785ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
1786				     u32 param_value)
1787{
1788	struct wmi_tlv_pdev_set_param_cmd *cmd;
1789	struct wmi_tlv *tlv;
1790	struct sk_buff *skb;
1791
1792	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1793	if (!skb)
1794		return ERR_PTR(-ENOMEM);
1795
1796	tlv = (void *)skb->data;
1797	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
1798	tlv->len = __cpu_to_le16(sizeof(*cmd));
1799	cmd = (void *)tlv->value;
1800	cmd->param_id = __cpu_to_le32(param_id);
1801	cmd->param_value = __cpu_to_le32(param_value);
1802
1803	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
1804		   param_id, param_value);
1805	return skb;
1806}
1807
1808static void
1809ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
1810{
1811	struct host_memory_chunk_tlv *chunk;
1812	struct wmi_tlv *tlv;
1813	dma_addr_t paddr;
1814	int i;
1815	__le16 tlv_len, tlv_tag;
1816
1817	tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
1818	tlv_len = __cpu_to_le16(sizeof(*chunk));
1819	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
1820		tlv = host_mem_chunks;
1821		tlv->tag = tlv_tag;
1822		tlv->len = tlv_len;
1823		chunk = (void *)tlv->value;
1824
1825		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
1826		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
1827		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
1828
1829		if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
1830			     ar->wmi.svc_map)) {
1831			paddr = ar->wmi.mem_chunks[i].paddr;
1832			chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
1833		}
1834
1835		ath10k_dbg(ar, ATH10K_DBG_WMI,
1836			   "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
1837			   i,
1838			   ar->wmi.mem_chunks[i].len,
1839			   (unsigned long long)ar->wmi.mem_chunks[i].paddr,
1840			   ar->wmi.mem_chunks[i].req_id);
1841
1842		host_mem_chunks += sizeof(*tlv);
1843		host_mem_chunks += sizeof(*chunk);
1844	}
1845}
1846
1847static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1848{
1849	struct sk_buff *skb;
1850	struct wmi_tlv *tlv;
1851	struct wmi_tlv_init_cmd *cmd;
1852	struct wmi_tlv_resource_config *cfg;
1853	void *chunks;
1854	size_t len, chunks_len;
1855	void *ptr;
1856
1857	chunks_len = ar->wmi.num_mem_chunks *
1858		     (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
1859	len = (sizeof(*tlv) + sizeof(*cmd)) +
1860	      (sizeof(*tlv) + sizeof(*cfg)) +
1861	      (sizeof(*tlv) + chunks_len);
1862
1863	skb = ath10k_wmi_alloc_skb(ar, len);
1864	if (!skb)
1865		return ERR_PTR(-ENOMEM);
1866
1867	ptr = skb->data;
1868
1869	tlv = ptr;
1870	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1871	tlv->len = __cpu_to_le16(sizeof(*cmd));
1872	cmd = (void *)tlv->value;
1873	ptr += sizeof(*tlv);
1874	ptr += sizeof(*cmd);
1875
1876	tlv = ptr;
1877	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1878	tlv->len = __cpu_to_le16(sizeof(*cfg));
1879	cfg = (void *)tlv->value;
1880	ptr += sizeof(*tlv);
1881	ptr += sizeof(*cfg);
1882
1883	tlv = ptr;
1884	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1885	tlv->len = __cpu_to_le16(chunks_len);
1886	chunks = (void *)tlv->value;
1887
1888	ptr += sizeof(*tlv);
1889	ptr += chunks_len;
1890
1891	cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1892	cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1893	cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1894	cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1895	cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1896	cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1897	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1898
1899	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1900
1901	if (ar->hw_params.num_peers)
1902		cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
1903	else
1904		cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1905	cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
1906	cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
1907
1908	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1909		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1910		cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1911	} else {
1912		cfg->num_offload_peers = __cpu_to_le32(0);
1913		cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1914	}
1915
1916	cfg->num_peer_keys = __cpu_to_le32(2);
1917	if (ar->hw_params.num_peers)
1918		cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
1919	else
1920		cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1921	cfg->tx_chain_mask = __cpu_to_le32(0x7);
1922	cfg->rx_chain_mask = __cpu_to_le32(0x7);
1923	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1924	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1925	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1926	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1927	cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
1928	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1929	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1930	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1931	cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1932	cfg->num_mcast_groups = __cpu_to_le32(0);
1933	cfg->num_mcast_table_elems = __cpu_to_le32(0);
1934	cfg->mcast2ucast_mode = __cpu_to_le32(0);
1935	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1936	cfg->dma_burst_size = __cpu_to_le32(0);
1937	cfg->mac_aggr_delim = __cpu_to_le32(0);
1938	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1939	cfg->vow_config = __cpu_to_le32(0);
1940	cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1941	cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
1942	cfg->max_frag_entries = __cpu_to_le32(2);
1943	cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
1944	cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1945	cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1946	cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1947	cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
1948	cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1949	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1950	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1951	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1952	cfg->wmi_send_separate = __cpu_to_le32(0);
1953	cfg->num_ocb_vdevs = __cpu_to_le32(0);
1954	cfg->num_ocb_channels = __cpu_to_le32(0);
1955	cfg->num_ocb_schedules = __cpu_to_le32(0);
1956	cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
1957
1958	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
1959		cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
1960
1961	ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
1962
1963	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1964	return skb;
1965}
1966
1967static struct sk_buff *
1968ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1969				 const struct wmi_start_scan_arg *arg)
1970{
1971	struct wmi_tlv_start_scan_cmd *cmd;
1972	struct wmi_tlv *tlv;
1973	struct sk_buff *skb;
1974	size_t len, chan_len, ssid_len, bssid_len, ie_len;
1975	__le32 *chans;
1976	struct wmi_ssid *ssids;
1977	struct wmi_mac_addr *addrs;
1978	void *ptr;
1979	int i, ret;
1980
1981	ret = ath10k_wmi_start_scan_verify(arg);
1982	if (ret)
1983		return ERR_PTR(ret);
1984
1985	chan_len = arg->n_channels * sizeof(__le32);
1986	ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1987	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1988	ie_len = roundup(arg->ie_len, 4);
1989	len = (sizeof(*tlv) + sizeof(*cmd)) +
1990	      sizeof(*tlv) + chan_len +
1991	      sizeof(*tlv) + ssid_len +
1992	      sizeof(*tlv) + bssid_len +
1993	      sizeof(*tlv) + ie_len;
1994
1995	skb = ath10k_wmi_alloc_skb(ar, len);
1996	if (!skb)
1997		return ERR_PTR(-ENOMEM);
1998
1999	ptr = (void *)skb->data;
2000	tlv = ptr;
2001	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
2002	tlv->len = __cpu_to_le16(sizeof(*cmd));
2003	cmd = (void *)tlv->value;
2004
2005	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
2006	cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
2007	cmd->num_channels = __cpu_to_le32(arg->n_channels);
2008	cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
2009	cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
2010	cmd->ie_len = __cpu_to_le32(arg->ie_len);
2011	cmd->num_probes = __cpu_to_le32(3);
2012	ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
2013	ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
2014
2015	/* FIXME: There are some scan flag inconsistencies across firmwares,
2016	 * e.g. WMI-TLV inverts the logic behind the following flag.
2017	 */
2018	cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2019
2020	ptr += sizeof(*tlv);
2021	ptr += sizeof(*cmd);
2022
2023	tlv = ptr;
2024	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2025	tlv->len = __cpu_to_le16(chan_len);
2026	chans = (void *)tlv->value;
2027	for (i = 0; i < arg->n_channels; i++)
2028		chans[i] = __cpu_to_le32(arg->channels[i]);
2029
2030	ptr += sizeof(*tlv);
2031	ptr += chan_len;
2032
2033	tlv = ptr;
2034	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2035	tlv->len = __cpu_to_le16(ssid_len);
2036	ssids = (void *)tlv->value;
2037	for (i = 0; i < arg->n_ssids; i++) {
2038		ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
2039		memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
2040	}
2041
2042	ptr += sizeof(*tlv);
2043	ptr += ssid_len;
2044
2045	tlv = ptr;
2046	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2047	tlv->len = __cpu_to_le16(bssid_len);
2048	addrs = (void *)tlv->value;
2049	for (i = 0; i < arg->n_bssids; i++)
2050		ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
2051
2052	ptr += sizeof(*tlv);
2053	ptr += bssid_len;
2054
2055	tlv = ptr;
2056	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2057	tlv->len = __cpu_to_le16(ie_len);
2058	memcpy(tlv->value, arg->ie, arg->ie_len);
2059
2060	ptr += sizeof(*tlv);
2061	ptr += ie_len;
2062
2063	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
2064	return skb;
2065}
2066
2067static struct sk_buff *
2068ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
2069				const struct wmi_stop_scan_arg *arg)
2070{
2071	struct wmi_stop_scan_cmd *cmd;
2072	struct wmi_tlv *tlv;
2073	struct sk_buff *skb;
2074	u32 scan_id;
2075	u32 req_id;
2076
2077	if (arg->req_id > 0xFFF)
2078		return ERR_PTR(-EINVAL);
2079	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
2080		return ERR_PTR(-EINVAL);
2081
2082	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2083	if (!skb)
2084		return ERR_PTR(-ENOMEM);
2085
2086	scan_id = arg->u.scan_id;
2087	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
2088
2089	req_id = arg->req_id;
2090	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
2091
2092	tlv = (void *)skb->data;
2093	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
2094	tlv->len = __cpu_to_le16(sizeof(*cmd));
2095	cmd = (void *)tlv->value;
2096	cmd->req_type = __cpu_to_le32(arg->req_type);
2097	cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
2098	cmd->scan_id = __cpu_to_le32(scan_id);
2099	cmd->scan_req_id = __cpu_to_le32(req_id);
2100
2101	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
2102	return skb;
2103}
2104
2105static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
2106					      enum wmi_vdev_subtype subtype)
2107{
2108	switch (subtype) {
2109	case WMI_VDEV_SUBTYPE_NONE:
2110		return WMI_TLV_VDEV_SUBTYPE_NONE;
2111	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
2112		return WMI_TLV_VDEV_SUBTYPE_P2P_DEV;
2113	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
2114		return WMI_TLV_VDEV_SUBTYPE_P2P_CLI;
2115	case WMI_VDEV_SUBTYPE_P2P_GO:
2116		return WMI_TLV_VDEV_SUBTYPE_P2P_GO;
2117	case WMI_VDEV_SUBTYPE_PROXY_STA:
2118		return WMI_TLV_VDEV_SUBTYPE_PROXY_STA;
2119	case WMI_VDEV_SUBTYPE_MESH_11S:
2120		return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
2121	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
2122		return -ENOTSUPP;
2123	}
2124	return -ENOTSUPP;
2125}
2126
2127static struct sk_buff *
2128ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
2129				  u32 vdev_id,
2130				  enum wmi_vdev_type vdev_type,
2131				  enum wmi_vdev_subtype vdev_subtype,
2132				  const u8 mac_addr[ETH_ALEN])
2133{
2134	struct wmi_vdev_create_cmd *cmd;
2135	struct wmi_tlv *tlv;
2136	struct sk_buff *skb;
2137
2138	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2139	if (!skb)
2140		return ERR_PTR(-ENOMEM);
2141
2142	tlv = (void *)skb->data;
2143	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
2144	tlv->len = __cpu_to_le16(sizeof(*cmd));
2145	cmd = (void *)tlv->value;
2146	cmd->vdev_id = __cpu_to_le32(vdev_id);
2147	cmd->vdev_type = __cpu_to_le32(vdev_type);
2148	cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
2149	ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
2150
2151	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
2152	return skb;
2153}
2154
2155static struct sk_buff *
2156ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
2157{
2158	struct wmi_vdev_delete_cmd *cmd;
2159	struct wmi_tlv *tlv;
2160	struct sk_buff *skb;
2161
2162	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2163	if (!skb)
2164		return ERR_PTR(-ENOMEM);
2165
2166	tlv = (void *)skb->data;
2167	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
2168	tlv->len = __cpu_to_le16(sizeof(*cmd));
2169	cmd = (void *)tlv->value;
2170	cmd->vdev_id = __cpu_to_le32(vdev_id);
2171
2172	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
2173	return skb;
2174}
2175
2176static struct sk_buff *
2177ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
2178				 const struct wmi_vdev_start_request_arg *arg,
2179				 bool restart)
2180{
2181	struct wmi_tlv_vdev_start_cmd *cmd;
2182	struct wmi_channel *ch;
2183	struct wmi_tlv *tlv;
2184	struct sk_buff *skb;
2185	size_t len;
2186	void *ptr;
2187	u32 flags = 0;
2188
2189	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
2190		return ERR_PTR(-EINVAL);
2191	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
2192		return ERR_PTR(-EINVAL);
2193
2194	len = (sizeof(*tlv) + sizeof(*cmd)) +
2195	      (sizeof(*tlv) + sizeof(*ch)) +
2196	      (sizeof(*tlv) + 0);
2197	skb = ath10k_wmi_alloc_skb(ar, len);
2198	if (!skb)
2199		return ERR_PTR(-ENOMEM);
2200
2201	if (arg->hidden_ssid)
2202		flags |= WMI_VDEV_START_HIDDEN_SSID;
2203	if (arg->pmf_enabled)
2204		flags |= WMI_VDEV_START_PMF_ENABLED;
2205
2206	ptr = (void *)skb->data;
2207
2208	tlv = ptr;
2209	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
2210	tlv->len = __cpu_to_le16(sizeof(*cmd));
2211	cmd = (void *)tlv->value;
2212	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2213	cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
2214	cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
2215	cmd->flags = __cpu_to_le32(flags);
2216	cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
2217	cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
2218	cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
2219
2220	if (arg->ssid) {
2221		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
2222		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
2223	}
2224
2225	ptr += sizeof(*tlv);
2226	ptr += sizeof(*cmd);
2227
2228	tlv = ptr;
2229	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2230	tlv->len = __cpu_to_le16(sizeof(*ch));
2231	ch = (void *)tlv->value;
2232	ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
2233
2234	ptr += sizeof(*tlv);
2235	ptr += sizeof(*ch);
2236
2237	tlv = ptr;
2238	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2239	tlv->len = 0;
2240
2241	/* Note: This is a nested TLV containing:
2242	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
2243	 */
2244
2245	ptr += sizeof(*tlv);
2246	ptr += 0;
2247
2248	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
2249	return skb;
2250}
2251
2252static struct sk_buff *
2253ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
2254{
2255	struct wmi_vdev_stop_cmd *cmd;
2256	struct wmi_tlv *tlv;
2257	struct sk_buff *skb;
2258
2259	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2260	if (!skb)
2261		return ERR_PTR(-ENOMEM);
2262
2263	tlv = (void *)skb->data;
2264	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
2265	tlv->len = __cpu_to_le16(sizeof(*cmd));
2266	cmd = (void *)tlv->value;
2267	cmd->vdev_id = __cpu_to_le32(vdev_id);
2268
2269	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
2270	return skb;
2271}
2272
2273static struct sk_buff *
2274ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
2275			      const u8 *bssid)
2276
2277{
2278	struct wmi_vdev_up_cmd *cmd;
2279	struct wmi_tlv *tlv;
2280	struct sk_buff *skb;
2281
2282	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2283	if (!skb)
2284		return ERR_PTR(-ENOMEM);
2285
2286	tlv = (void *)skb->data;
2287	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
2288	tlv->len = __cpu_to_le16(sizeof(*cmd));
2289	cmd = (void *)tlv->value;
2290	cmd->vdev_id = __cpu_to_le32(vdev_id);
2291	cmd->vdev_assoc_id = __cpu_to_le32(aid);
2292	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
2293
2294	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
2295	return skb;
2296}
2297
2298static struct sk_buff *
2299ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
2300{
2301	struct wmi_vdev_down_cmd *cmd;
2302	struct wmi_tlv *tlv;
2303	struct sk_buff *skb;
2304
2305	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2306	if (!skb)
2307		return ERR_PTR(-ENOMEM);
2308
2309	tlv = (void *)skb->data;
2310	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
2311	tlv->len = __cpu_to_le16(sizeof(*cmd));
2312	cmd = (void *)tlv->value;
2313	cmd->vdev_id = __cpu_to_le32(vdev_id);
2314
2315	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
2316	return skb;
2317}
2318
2319static struct sk_buff *
2320ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
2321				     u32 param_id, u32 param_value)
2322{
2323	struct wmi_vdev_set_param_cmd *cmd;
2324	struct wmi_tlv *tlv;
2325	struct sk_buff *skb;
2326
2327	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2328	if (!skb)
2329		return ERR_PTR(-ENOMEM);
2330
2331	tlv = (void *)skb->data;
2332	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
2333	tlv->len = __cpu_to_le16(sizeof(*cmd));
2334	cmd = (void *)tlv->value;
2335	cmd->vdev_id = __cpu_to_le32(vdev_id);
2336	cmd->param_id = __cpu_to_le32(param_id);
2337	cmd->param_value = __cpu_to_le32(param_value);
2338
2339	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
2340		   vdev_id, param_id, param_value);
2341	return skb;
2342}
2343
2344static struct sk_buff *
2345ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
2346				       const struct wmi_vdev_install_key_arg *arg)
2347{
2348	struct wmi_vdev_install_key_cmd *cmd;
2349	struct wmi_tlv *tlv;
2350	struct sk_buff *skb;
2351	size_t len;
2352	void *ptr;
2353
2354	if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2355	    arg->key_data)
2356		return ERR_PTR(-EINVAL);
2357	if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2358	    !arg->key_data)
2359		return ERR_PTR(-EINVAL);
2360
2361	len = sizeof(*tlv) + sizeof(*cmd) +
2362	      sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
2363	skb = ath10k_wmi_alloc_skb(ar, len);
2364	if (!skb)
2365		return ERR_PTR(-ENOMEM);
2366
2367	ptr = (void *)skb->data;
2368	tlv = ptr;
2369	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
2370	tlv->len = __cpu_to_le16(sizeof(*cmd));
2371	cmd = (void *)tlv->value;
2372	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2373	cmd->key_idx = __cpu_to_le32(arg->key_idx);
2374	cmd->key_flags = __cpu_to_le32(arg->key_flags);
2375	cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
2376	cmd->key_len = __cpu_to_le32(arg->key_len);
2377	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
2378	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
2379
2380	if (arg->macaddr)
2381		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2382
2383	ptr += sizeof(*tlv);
2384	ptr += sizeof(*cmd);
2385
2386	tlv = ptr;
2387	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2388	tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
2389	if (arg->key_data)
2390		memcpy(tlv->value, arg->key_data, arg->key_len);
2391
2392	ptr += sizeof(*tlv);
2393	ptr += roundup(arg->key_len, sizeof(__le32));
2394
2395	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
2396	return skb;
2397}
2398
2399static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
2400					 const struct wmi_sta_uapsd_auto_trig_arg *arg)
2401{
2402	struct wmi_sta_uapsd_auto_trig_param *ac;
2403	struct wmi_tlv *tlv;
2404
2405	tlv = ptr;
2406	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
2407	tlv->len = __cpu_to_le16(sizeof(*ac));
2408	ac = (void *)tlv->value;
2409
2410	ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
2411	ac->user_priority = __cpu_to_le32(arg->user_priority);
2412	ac->service_interval = __cpu_to_le32(arg->service_interval);
2413	ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
2414	ac->delay_interval = __cpu_to_le32(arg->delay_interval);
2415
2416	ath10k_dbg(ar, ATH10K_DBG_WMI,
2417		   "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
2418		   ac->wmm_ac, ac->user_priority, ac->service_interval,
2419		   ac->suspend_interval, ac->delay_interval);
2420
2421	return ptr + sizeof(*tlv) + sizeof(*ac);
2422}
2423
2424static struct sk_buff *
2425ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
2426				     const u8 peer_addr[ETH_ALEN],
2427				     const struct wmi_sta_uapsd_auto_trig_arg *args,
2428				     u32 num_ac)
2429{
2430	struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
2431	struct wmi_sta_uapsd_auto_trig_param *ac;
2432	struct wmi_tlv *tlv;
2433	struct sk_buff *skb;
2434	size_t len;
2435	size_t ac_tlv_len;
2436	void *ptr;
2437	int i;
2438
2439	ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
2440	len = sizeof(*tlv) + sizeof(*cmd) +
2441	      sizeof(*tlv) + ac_tlv_len;
2442	skb = ath10k_wmi_alloc_skb(ar, len);
2443	if (!skb)
2444		return ERR_PTR(-ENOMEM);
2445
2446	ptr = (void *)skb->data;
2447	tlv = ptr;
2448	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
2449	tlv->len = __cpu_to_le16(sizeof(*cmd));
2450	cmd = (void *)tlv->value;
2451	cmd->vdev_id = __cpu_to_le32(vdev_id);
2452	cmd->num_ac = __cpu_to_le32(num_ac);
2453	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2454
2455	ptr += sizeof(*tlv);
2456	ptr += sizeof(*cmd);
2457
2458	tlv = ptr;
2459	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2460	tlv->len = __cpu_to_le16(ac_tlv_len);
2461	ac = (void *)tlv->value;
2462
2463	ptr += sizeof(*tlv);
2464	for (i = 0; i < num_ac; i++)
2465		ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
2466
2467	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
2468	return skb;
2469}
2470
2471static void *ath10k_wmi_tlv_put_wmm(void *ptr,
2472				    const struct wmi_wmm_params_arg *arg)
2473{
2474	struct wmi_wmm_params *wmm;
2475	struct wmi_tlv *tlv;
2476
2477	tlv = ptr;
2478	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
2479	tlv->len = __cpu_to_le16(sizeof(*wmm));
2480	wmm = (void *)tlv->value;
2481	ath10k_wmi_set_wmm_param(wmm, arg);
2482
2483	return ptr + sizeof(*tlv) + sizeof(*wmm);
2484}
2485
2486static struct sk_buff *
2487ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
2488				    const struct wmi_wmm_params_all_arg *arg)
2489{
2490	struct wmi_tlv_vdev_set_wmm_cmd *cmd;
2491	struct wmi_tlv *tlv;
2492	struct sk_buff *skb;
2493	size_t len;
2494	void *ptr;
2495
2496	len = sizeof(*tlv) + sizeof(*cmd);
2497	skb = ath10k_wmi_alloc_skb(ar, len);
2498	if (!skb)
2499		return ERR_PTR(-ENOMEM);
2500
2501	ptr = (void *)skb->data;
2502	tlv = ptr;
2503	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
2504	tlv->len = __cpu_to_le16(sizeof(*cmd));
2505	cmd = (void *)tlv->value;
2506	cmd->vdev_id = __cpu_to_le32(vdev_id);
2507
2508	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
2509	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
2510	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
2511	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
2512
2513	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
2514	return skb;
2515}
2516
2517static struct sk_buff *
2518ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
2519				    const struct wmi_sta_keepalive_arg *arg)
2520{
2521	struct wmi_tlv_sta_keepalive_cmd *cmd;
2522	struct wmi_sta_keepalive_arp_resp *arp;
2523	struct sk_buff *skb;
2524	struct wmi_tlv *tlv;
2525	void *ptr;
2526	size_t len;
2527
2528	len = sizeof(*tlv) + sizeof(*cmd) +
2529	      sizeof(*tlv) + sizeof(*arp);
2530	skb = ath10k_wmi_alloc_skb(ar, len);
2531	if (!skb)
2532		return ERR_PTR(-ENOMEM);
2533
2534	ptr = (void *)skb->data;
2535	tlv = ptr;
2536	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
2537	tlv->len = __cpu_to_le16(sizeof(*cmd));
2538	cmd = (void *)tlv->value;
2539	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2540	cmd->enabled = __cpu_to_le32(arg->enabled);
2541	cmd->method = __cpu_to_le32(arg->method);
2542	cmd->interval = __cpu_to_le32(arg->interval);
2543
2544	ptr += sizeof(*tlv);
2545	ptr += sizeof(*cmd);
2546
2547	tlv = ptr;
2548	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
2549	tlv->len = __cpu_to_le16(sizeof(*arp));
2550	arp = (void *)tlv->value;
2551
2552	arp->src_ip4_addr = arg->src_ip4_addr;
2553	arp->dest_ip4_addr = arg->dest_ip4_addr;
2554	ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
2555
2556	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
2557		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
2558	return skb;
2559}
2560
2561static struct sk_buff *
2562ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
2563				  const u8 peer_addr[ETH_ALEN],
2564				  enum wmi_peer_type peer_type)
2565{
2566	struct wmi_tlv_peer_create_cmd *cmd;
2567	struct wmi_tlv *tlv;
2568	struct sk_buff *skb;
2569
2570	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2571	if (!skb)
2572		return ERR_PTR(-ENOMEM);
2573
2574	tlv = (void *)skb->data;
2575	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
2576	tlv->len = __cpu_to_le16(sizeof(*cmd));
2577	cmd = (void *)tlv->value;
2578	cmd->vdev_id = __cpu_to_le32(vdev_id);
2579	cmd->peer_type = __cpu_to_le32(peer_type);
2580	ether_addr_copy(cmd->peer_addr.addr, peer_addr);
2581
2582	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
2583	return skb;
2584}
2585
2586static struct sk_buff *
2587ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
2588				  const u8 peer_addr[ETH_ALEN])
2589{
2590	struct wmi_peer_delete_cmd *cmd;
2591	struct wmi_tlv *tlv;
2592	struct sk_buff *skb;
2593
2594	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2595	if (!skb)
2596		return ERR_PTR(-ENOMEM);
2597
2598	tlv = (void *)skb->data;
2599	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
2600	tlv->len = __cpu_to_le16(sizeof(*cmd));
2601	cmd = (void *)tlv->value;
2602	cmd->vdev_id = __cpu_to_le32(vdev_id);
2603	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2604
2605	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
2606	return skb;
2607}
2608
2609static struct sk_buff *
2610ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
2611				 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
2612{
2613	struct wmi_peer_flush_tids_cmd *cmd;
2614	struct wmi_tlv *tlv;
2615	struct sk_buff *skb;
2616
2617	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2618	if (!skb)
2619		return ERR_PTR(-ENOMEM);
2620
2621	tlv = (void *)skb->data;
2622	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
2623	tlv->len = __cpu_to_le16(sizeof(*cmd));
2624	cmd = (void *)tlv->value;
2625	cmd->vdev_id = __cpu_to_le32(vdev_id);
2626	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
2627	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2628
2629	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
2630	return skb;
2631}
2632
2633static struct sk_buff *
2634ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
2635				     const u8 *peer_addr,
2636				     enum wmi_peer_param param_id,
2637				     u32 param_value)
2638{
2639	struct wmi_peer_set_param_cmd *cmd;
2640	struct wmi_tlv *tlv;
2641	struct sk_buff *skb;
2642
2643	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2644	if (!skb)
2645		return ERR_PTR(-ENOMEM);
2646
2647	tlv = (void *)skb->data;
2648	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
2649	tlv->len = __cpu_to_le16(sizeof(*cmd));
2650	cmd = (void *)tlv->value;
2651	cmd->vdev_id = __cpu_to_le32(vdev_id);
2652	cmd->param_id = __cpu_to_le32(param_id);
2653	cmd->param_value = __cpu_to_le32(param_value);
2654	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2655
2656	ath10k_dbg(ar, ATH10K_DBG_WMI,
2657		   "wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
2658		   vdev_id, peer_addr, param_id, param_value);
2659	return skb;
2660}
2661
2662static struct sk_buff *
2663ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
2664				 const struct wmi_peer_assoc_complete_arg *arg)
2665{
2666	struct wmi_tlv_peer_assoc_cmd *cmd;
2667	struct wmi_vht_rate_set *vht_rate;
2668	struct wmi_tlv *tlv;
2669	struct sk_buff *skb;
2670	size_t len, legacy_rate_len, ht_rate_len;
2671	void *ptr;
2672
2673	if (arg->peer_mpdu_density > 16)
2674		return ERR_PTR(-EINVAL);
2675	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2676		return ERR_PTR(-EINVAL);
2677	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2678		return ERR_PTR(-EINVAL);
2679
2680	legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
2681				  sizeof(__le32));
2682	ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
2683	len = (sizeof(*tlv) + sizeof(*cmd)) +
2684	      (sizeof(*tlv) + legacy_rate_len) +
2685	      (sizeof(*tlv) + ht_rate_len) +
2686	      (sizeof(*tlv) + sizeof(*vht_rate));
2687	skb = ath10k_wmi_alloc_skb(ar, len);
2688	if (!skb)
2689		return ERR_PTR(-ENOMEM);
2690
2691	ptr = (void *)skb->data;
2692	tlv = ptr;
2693	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
2694	tlv->len = __cpu_to_le16(sizeof(*cmd));
2695	cmd = (void *)tlv->value;
2696
2697	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2698	cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2699	cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
2700	cmd->flags = __cpu_to_le32(arg->peer_flags);
2701	cmd->caps = __cpu_to_le32(arg->peer_caps);
2702	cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2703	cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2704	cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2705	cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2706	cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2707	cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2708	cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2709	cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
2710	cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2711	cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
2712	ether_addr_copy(cmd->mac_addr.addr, arg->addr);
2713
2714	ptr += sizeof(*tlv);
2715	ptr += sizeof(*cmd);
2716
2717	tlv = ptr;
2718	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2719	tlv->len = __cpu_to_le16(legacy_rate_len);
2720	memcpy(tlv->value, arg->peer_legacy_rates.rates,
2721	       arg->peer_legacy_rates.num_rates);
2722
2723	ptr += sizeof(*tlv);
2724	ptr += legacy_rate_len;
2725
2726	tlv = ptr;
2727	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2728	tlv->len = __cpu_to_le16(ht_rate_len);
2729	memcpy(tlv->value, arg->peer_ht_rates.rates,
2730	       arg->peer_ht_rates.num_rates);
2731
2732	ptr += sizeof(*tlv);
2733	ptr += ht_rate_len;
2734
2735	tlv = ptr;
2736	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
2737	tlv->len = __cpu_to_le16(sizeof(*vht_rate));
2738	vht_rate = (void *)tlv->value;
2739
2740	vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2741	vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2742	vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2743	vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2744
2745	ptr += sizeof(*tlv);
2746	ptr += sizeof(*vht_rate);
2747
2748	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
2749	return skb;
2750}
2751
2752static struct sk_buff *
2753ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
2754				 enum wmi_sta_ps_mode psmode)
2755{
2756	struct wmi_sta_powersave_mode_cmd *cmd;
2757	struct wmi_tlv *tlv;
2758	struct sk_buff *skb;
2759
2760	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2761	if (!skb)
2762		return ERR_PTR(-ENOMEM);
2763
2764	tlv = (void *)skb->data;
2765	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
2766	tlv->len = __cpu_to_le16(sizeof(*cmd));
2767	cmd = (void *)tlv->value;
2768	cmd->vdev_id = __cpu_to_le32(vdev_id);
2769	cmd->sta_ps_mode = __cpu_to_le32(psmode);
2770
2771	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
2772	return skb;
2773}
2774
2775static struct sk_buff *
2776ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
2777				 enum wmi_sta_powersave_param param_id,
2778				 u32 param_value)
2779{
2780	struct wmi_sta_powersave_param_cmd *cmd;
2781	struct wmi_tlv *tlv;
2782	struct sk_buff *skb;
2783
2784	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2785	if (!skb)
2786		return ERR_PTR(-ENOMEM);
2787
2788	tlv = (void *)skb->data;
2789	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
2790	tlv->len = __cpu_to_le16(sizeof(*cmd));
2791	cmd = (void *)tlv->value;
2792	cmd->vdev_id = __cpu_to_le32(vdev_id);
2793	cmd->param_id = __cpu_to_le32(param_id);
2794	cmd->param_value = __cpu_to_le32(param_value);
2795
2796	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
2797	return skb;
2798}
2799
2800static struct sk_buff *
2801ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2802				enum wmi_ap_ps_peer_param param_id, u32 value)
2803{
2804	struct wmi_ap_ps_peer_cmd *cmd;
2805	struct wmi_tlv *tlv;
2806	struct sk_buff *skb;
2807
2808	if (!mac)
2809		return ERR_PTR(-EINVAL);
2810
2811	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2812	if (!skb)
2813		return ERR_PTR(-ENOMEM);
2814
2815	tlv = (void *)skb->data;
2816	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
2817	tlv->len = __cpu_to_le16(sizeof(*cmd));
2818	cmd = (void *)tlv->value;
2819	cmd->vdev_id = __cpu_to_le32(vdev_id);
2820	cmd->param_id = __cpu_to_le32(param_id);
2821	cmd->param_value = __cpu_to_le32(value);
2822	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2823
2824	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
2825	return skb;
2826}
2827
2828static struct sk_buff *
2829ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
2830				     const struct wmi_scan_chan_list_arg *arg)
2831{
2832	struct wmi_tlv_scan_chan_list_cmd *cmd;
2833	struct wmi_channel *ci;
2834	struct wmi_channel_arg *ch;
2835	struct wmi_tlv *tlv;
2836	struct sk_buff *skb;
2837	size_t chans_len, len;
2838	int i;
2839	void *ptr, *chans;
2840
2841	chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
2842	len = (sizeof(*tlv) + sizeof(*cmd)) +
2843	      (sizeof(*tlv) + chans_len);
2844
2845	skb = ath10k_wmi_alloc_skb(ar, len);
2846	if (!skb)
2847		return ERR_PTR(-ENOMEM);
2848
2849	ptr = (void *)skb->data;
2850	tlv = ptr;
2851	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
2852	tlv->len = __cpu_to_le16(sizeof(*cmd));
2853	cmd = (void *)tlv->value;
2854	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2855
2856	ptr += sizeof(*tlv);
2857	ptr += sizeof(*cmd);
2858
2859	tlv = ptr;
2860	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2861	tlv->len = __cpu_to_le16(chans_len);
2862	chans = (void *)tlv->value;
2863
2864	for (i = 0; i < arg->n_channels; i++) {
2865		ch = &arg->channels[i];
2866
2867		tlv = chans;
2868		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2869		tlv->len = __cpu_to_le16(sizeof(*ci));
2870		ci = (void *)tlv->value;
2871
2872		ath10k_wmi_put_wmi_channel(ar, ci, ch);
2873
2874		chans += sizeof(*tlv);
2875		chans += sizeof(*ci);
2876	}
2877
2878	ptr += sizeof(*tlv);
2879	ptr += chans_len;
2880
2881	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2882	return skb;
2883}
2884
2885static struct sk_buff *
2886ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
2887{
2888	struct wmi_scan_prob_req_oui_cmd *cmd;
2889	struct wmi_tlv *tlv;
2890	struct sk_buff *skb;
2891
2892	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2893	if (!skb)
2894		return ERR_PTR(-ENOMEM);
2895
2896	tlv = (void *)skb->data;
2897	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
2898	tlv->len = __cpu_to_le16(sizeof(*cmd));
2899	cmd = (void *)tlv->value;
2900	cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
2901
2902	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
2903	return skb;
2904}
2905
2906static struct sk_buff *
2907ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2908				 const void *bcn, size_t bcn_len,
2909				 u32 bcn_paddr, bool dtim_zero,
2910				 bool deliver_cab)
2911
2912{
2913	struct wmi_bcn_tx_ref_cmd *cmd;
2914	struct wmi_tlv *tlv;
2915	struct sk_buff *skb;
2916	struct ieee80211_hdr *hdr;
2917	u16 fc;
2918
2919	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2920	if (!skb)
2921		return ERR_PTR(-ENOMEM);
2922
2923	hdr = (struct ieee80211_hdr *)bcn;
2924	fc = le16_to_cpu(hdr->frame_control);
2925
2926	tlv = (void *)skb->data;
2927	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2928	tlv->len = __cpu_to_le16(sizeof(*cmd));
2929	cmd = (void *)tlv->value;
2930	cmd->vdev_id = __cpu_to_le32(vdev_id);
2931	cmd->data_len = __cpu_to_le32(bcn_len);
2932	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2933	cmd->msdu_id = 0;
2934	cmd->frame_control = __cpu_to_le32(fc);
2935	cmd->flags = 0;
2936
2937	if (dtim_zero)
2938		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2939
2940	if (deliver_cab)
2941		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2942
2943	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2944	return skb;
2945}
2946
2947static struct sk_buff *
2948ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2949				   const struct wmi_wmm_params_all_arg *arg)
2950{
2951	struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2952	struct wmi_wmm_params *wmm;
2953	struct wmi_tlv *tlv;
2954	struct sk_buff *skb;
2955	size_t len;
2956	void *ptr;
2957
2958	len = (sizeof(*tlv) + sizeof(*cmd)) +
2959	      (4 * (sizeof(*tlv) + sizeof(*wmm)));
2960	skb = ath10k_wmi_alloc_skb(ar, len);
2961	if (!skb)
2962		return ERR_PTR(-ENOMEM);
2963
2964	ptr = (void *)skb->data;
2965
2966	tlv = ptr;
2967	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2968	tlv->len = __cpu_to_le16(sizeof(*cmd));
2969	cmd = (void *)tlv->value;
2970
2971	/* nothing to set here */
2972
2973	ptr += sizeof(*tlv);
2974	ptr += sizeof(*cmd);
2975
2976	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2977	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2978	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2979	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2980
2981	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2982	return skb;
2983}
2984
2985static struct sk_buff *
2986ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
2987{
2988	struct wmi_request_stats_cmd *cmd;
2989	struct wmi_tlv *tlv;
2990	struct sk_buff *skb;
2991
2992	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2993	if (!skb)
2994		return ERR_PTR(-ENOMEM);
2995
2996	tlv = (void *)skb->data;
2997	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
2998	tlv->len = __cpu_to_le16(sizeof(*cmd));
2999	cmd = (void *)tlv->value;
3000	cmd->stats_id = __cpu_to_le32(stats_mask);
3001
3002	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
3003	return skb;
3004}
3005
3006static struct sk_buff *
3007ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
3008					      u32 vdev_id,
3009					      enum wmi_peer_stats_info_request_type type,
3010					      u8 *addr,
3011					      u32 reset)
3012{
3013	struct wmi_tlv_request_peer_stats_info *cmd;
3014	struct wmi_tlv *tlv;
3015	struct sk_buff *skb;
3016
3017	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3018	if (!skb)
3019		return ERR_PTR(-ENOMEM);
3020
3021	tlv = (void *)skb->data;
3022	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
3023	tlv->len = __cpu_to_le16(sizeof(*cmd));
3024	cmd = (void *)tlv->value;
3025	cmd->vdev_id = __cpu_to_le32(vdev_id);
3026	cmd->request_type = __cpu_to_le32(type);
3027
3028	if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
3029		ether_addr_copy(cmd->peer_macaddr.addr, addr);
3030
3031	cmd->reset_after_request = __cpu_to_le32(reset);
3032	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
3033	return skb;
3034}
3035
3036static int
3037ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
3038				       struct sk_buff *msdu)
3039{
3040	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3041	struct ath10k_wmi *wmi = &ar->wmi;
3042
3043	idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
3044
3045	return 0;
3046}
3047
3048static int
3049ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
3050				 dma_addr_t paddr)
3051{
3052	struct ath10k_wmi *wmi = &ar->wmi;
3053	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
3054	int ret;
3055
3056	pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
3057	if (!pkt_addr)
3058		return -ENOMEM;
3059
3060	pkt_addr->vaddr = skb;
3061	pkt_addr->paddr = paddr;
3062
3063	spin_lock_bh(&ar->data_lock);
3064	ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
3065			wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
3066	spin_unlock_bh(&ar->data_lock);
3067
3068	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
3069	return ret;
3070}
3071
3072static struct sk_buff *
3073ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
3074				   dma_addr_t paddr)
3075{
3076	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3077	struct wmi_tlv_mgmt_tx_cmd *cmd;
3078	struct ieee80211_hdr *hdr;
3079	struct ath10k_vif *arvif;
3080	u32 buf_len = msdu->len;
3081	struct wmi_tlv *tlv;
3082	struct sk_buff *skb;
3083	int len, desc_id;
3084	u32 vdev_id;
3085	void *ptr;
3086
3087	if (!cb->vif)
3088		return ERR_PTR(-EINVAL);
3089
3090	hdr = (struct ieee80211_hdr *)msdu->data;
3091	arvif = (void *)cb->vif->drv_priv;
3092	vdev_id = arvif->vdev_id;
3093
3094	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
3095			 (!(ieee80211_is_nullfunc(hdr->frame_control) ||
3096			 ieee80211_is_qos_nullfunc(hdr->frame_control)))))
3097		return ERR_PTR(-EINVAL);
3098
3099	len = sizeof(*cmd) + 2 * sizeof(*tlv);
3100
3101	if ((ieee80211_is_action(hdr->frame_control) ||
3102	     ieee80211_is_deauth(hdr->frame_control) ||
3103	     ieee80211_is_disassoc(hdr->frame_control)) &&
3104	     ieee80211_has_protected(hdr->frame_control)) {
3105		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
3106		buf_len += IEEE80211_CCMP_MIC_LEN;
3107	}
3108
3109	buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
3110	buf_len = round_up(buf_len, 4);
3111
3112	len += buf_len;
3113	len = round_up(len, 4);
3114	skb = ath10k_wmi_alloc_skb(ar, len);
3115	if (!skb)
3116		return ERR_PTR(-ENOMEM);
3117
3118	desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
3119	if (desc_id < 0)
3120		goto err_free_skb;
3121
3122	cb->msdu_id = desc_id;
3123
3124	ptr = (void *)skb->data;
3125	tlv = ptr;
3126	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
3127	tlv->len = __cpu_to_le16(sizeof(*cmd));
3128	cmd = (void *)tlv->value;
3129	cmd->vdev_id = __cpu_to_le32(vdev_id);
3130	cmd->desc_id = __cpu_to_le32(desc_id);
3131	cmd->chanfreq = 0;
3132	cmd->buf_len = __cpu_to_le32(buf_len);
3133	cmd->frame_len = __cpu_to_le32(msdu->len);
3134	cmd->paddr = __cpu_to_le64(paddr);
3135
3136	ptr += sizeof(*tlv);
3137	ptr += sizeof(*cmd);
3138
3139	tlv = ptr;
3140	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3141	tlv->len = __cpu_to_le16(buf_len);
3142
3143	ptr += sizeof(*tlv);
3144	memcpy(ptr, msdu->data, buf_len);
3145
3146	return skb;
3147
3148err_free_skb:
3149	dev_kfree_skb(skb);
3150	return ERR_PTR(desc_id);
3151}
3152
3153static struct sk_buff *
3154ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
3155				    enum wmi_force_fw_hang_type type,
3156				    u32 delay_ms)
3157{
3158	struct wmi_force_fw_hang_cmd *cmd;
3159	struct wmi_tlv *tlv;
3160	struct sk_buff *skb;
3161
3162	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3163	if (!skb)
3164		return ERR_PTR(-ENOMEM);
3165
3166	tlv = (void *)skb->data;
3167	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
3168	tlv->len = __cpu_to_le16(sizeof(*cmd));
3169	cmd = (void *)tlv->value;
3170	cmd->type = __cpu_to_le32(type);
3171	cmd->delay_ms = __cpu_to_le32(delay_ms);
3172
3173	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
3174	return skb;
3175}
3176
3177static struct sk_buff *
3178ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
3179				 u32 log_level)
3180{
3181	struct wmi_tlv_dbglog_cmd *cmd;
3182	struct wmi_tlv *tlv;
3183	struct sk_buff *skb;
3184	size_t len, bmap_len;
3185	u32 value;
3186	void *ptr;
3187
3188	if (module_enable) {
3189		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3190				module_enable,
3191				WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
3192	} else {
3193		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3194				WMI_TLV_DBGLOG_ALL_MODULES,
3195				WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
3196	}
3197
3198	bmap_len = 0;
3199	len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
3200	skb = ath10k_wmi_alloc_skb(ar, len);
3201	if (!skb)
3202		return ERR_PTR(-ENOMEM);
3203
3204	ptr = (void *)skb->data;
3205
3206	tlv = ptr;
3207	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
3208	tlv->len = __cpu_to_le16(sizeof(*cmd));
3209	cmd = (void *)tlv->value;
3210	cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
3211	cmd->value = __cpu_to_le32(value);
3212
3213	ptr += sizeof(*tlv);
3214	ptr += sizeof(*cmd);
3215
3216	tlv = ptr;
3217	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3218	tlv->len = __cpu_to_le16(bmap_len);
3219
3220	/* nothing to do here */
3221
3222	ptr += sizeof(*tlv);
3223	ptr += sizeof(bmap_len);
3224
3225	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
3226	return skb;
3227}
3228
3229static struct sk_buff *
3230ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
3231{
3232	struct wmi_tlv_pktlog_enable *cmd;
3233	struct wmi_tlv *tlv;
3234	struct sk_buff *skb;
3235	void *ptr;
3236	size_t len;
3237
3238	len = sizeof(*tlv) + sizeof(*cmd);
3239	skb = ath10k_wmi_alloc_skb(ar, len);
3240	if (!skb)
3241		return ERR_PTR(-ENOMEM);
3242
3243	ptr = (void *)skb->data;
3244	tlv = ptr;
3245	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
3246	tlv->len = __cpu_to_le16(sizeof(*cmd));
3247	cmd = (void *)tlv->value;
3248	cmd->filter = __cpu_to_le32(filter);
3249
3250	ptr += sizeof(*tlv);
3251	ptr += sizeof(*cmd);
3252
3253	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
3254		   filter);
3255	return skb;
3256}
3257
3258static struct sk_buff *
3259ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar)
3260{
3261	struct wmi_tlv_pdev_get_temp_cmd *cmd;
3262	struct wmi_tlv *tlv;
3263	struct sk_buff *skb;
3264
3265	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3266	if (!skb)
3267		return ERR_PTR(-ENOMEM);
3268
3269	tlv = (void *)skb->data;
3270	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD);
3271	tlv->len = __cpu_to_le16(sizeof(*cmd));
3272	cmd = (void *)tlv->value;
3273	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n");
3274	return skb;
3275}
3276
3277static struct sk_buff *
3278ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
3279{
3280	struct wmi_tlv_pktlog_disable *cmd;
3281	struct wmi_tlv *tlv;
3282	struct sk_buff *skb;
3283	void *ptr;
3284	size_t len;
3285
3286	len = sizeof(*tlv) + sizeof(*cmd);
3287	skb = ath10k_wmi_alloc_skb(ar, len);
3288	if (!skb)
3289		return ERR_PTR(-ENOMEM);
3290
3291	ptr = (void *)skb->data;
3292	tlv = ptr;
3293	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
3294	tlv->len = __cpu_to_le16(sizeof(*cmd));
3295	cmd = (void *)tlv->value;
3296
3297	ptr += sizeof(*tlv);
3298	ptr += sizeof(*cmd);
3299
3300	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
3301	return skb;
3302}
3303
3304static struct sk_buff *
3305ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
3306			       u32 tim_ie_offset, struct sk_buff *bcn,
3307			       u32 prb_caps, u32 prb_erp, void *prb_ies,
3308			       size_t prb_ies_len)
3309{
3310	struct wmi_tlv_bcn_tmpl_cmd *cmd;
3311	struct wmi_tlv_bcn_prb_info *info;
3312	struct wmi_tlv *tlv;
3313	struct sk_buff *skb;
3314	void *ptr;
3315	size_t len;
3316
3317	if (WARN_ON(prb_ies_len > 0 && !prb_ies))
3318		return ERR_PTR(-EINVAL);
3319
3320	len = sizeof(*tlv) + sizeof(*cmd) +
3321	      sizeof(*tlv) + sizeof(*info) + prb_ies_len +
3322	      sizeof(*tlv) + roundup(bcn->len, 4);
3323	skb = ath10k_wmi_alloc_skb(ar, len);
3324	if (!skb)
3325		return ERR_PTR(-ENOMEM);
3326
3327	ptr = (void *)skb->data;
3328	tlv = ptr;
3329	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
3330	tlv->len = __cpu_to_le16(sizeof(*cmd));
3331	cmd = (void *)tlv->value;
3332	cmd->vdev_id = __cpu_to_le32(vdev_id);
3333	cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
3334	cmd->buf_len = __cpu_to_le32(bcn->len);
3335
3336	ptr += sizeof(*tlv);
3337	ptr += sizeof(*cmd);
3338
3339	/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
3340	 * then it is then impossible to pass original ie len.
3341	 * This chunk is not used yet so if setting probe resp template yields
3342	 * problems with beaconing or crashes firmware look here.
3343	 */
3344	tlv = ptr;
3345	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3346	tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
3347	info = (void *)tlv->value;
3348	info->caps = __cpu_to_le32(prb_caps);
3349	info->erp = __cpu_to_le32(prb_erp);
3350	memcpy(info->ies, prb_ies, prb_ies_len);
3351
3352	ptr += sizeof(*tlv);
3353	ptr += sizeof(*info);
3354	ptr += prb_ies_len;
3355
3356	tlv = ptr;
3357	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3358	tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
3359	memcpy(tlv->value, bcn->data, bcn->len);
3360
3361	/* FIXME: Adjust TSF? */
3362
3363	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
3364		   vdev_id);
3365	return skb;
3366}
3367
3368static struct sk_buff *
3369ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
3370			       struct sk_buff *prb)
3371{
3372	struct wmi_tlv_prb_tmpl_cmd *cmd;
3373	struct wmi_tlv_bcn_prb_info *info;
3374	struct wmi_tlv *tlv;
3375	struct sk_buff *skb;
3376	void *ptr;
3377	size_t len;
3378
3379	len = sizeof(*tlv) + sizeof(*cmd) +
3380	      sizeof(*tlv) + sizeof(*info) +
3381	      sizeof(*tlv) + roundup(prb->len, 4);
3382	skb = ath10k_wmi_alloc_skb(ar, len);
3383	if (!skb)
3384		return ERR_PTR(-ENOMEM);
3385
3386	ptr = (void *)skb->data;
3387	tlv = ptr;
3388	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
3389	tlv->len = __cpu_to_le16(sizeof(*cmd));
3390	cmd = (void *)tlv->value;
3391	cmd->vdev_id = __cpu_to_le32(vdev_id);
3392	cmd->buf_len = __cpu_to_le32(prb->len);
3393
3394	ptr += sizeof(*tlv);
3395	ptr += sizeof(*cmd);
3396
3397	tlv = ptr;
3398	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3399	tlv->len = __cpu_to_le16(sizeof(*info));
3400	info = (void *)tlv->value;
3401	info->caps = 0;
3402	info->erp = 0;
3403
3404	ptr += sizeof(*tlv);
3405	ptr += sizeof(*info);
3406
3407	tlv = ptr;
3408	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3409	tlv->len = __cpu_to_le16(roundup(prb->len, 4));
3410	memcpy(tlv->value, prb->data, prb->len);
3411
3412	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
3413		   vdev_id);
3414	return skb;
3415}
3416
3417static struct sk_buff *
3418ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
3419				    const u8 *p2p_ie)
3420{
3421	struct wmi_tlv_p2p_go_bcn_ie *cmd;
3422	struct wmi_tlv *tlv;
3423	struct sk_buff *skb;
3424	void *ptr;
3425	size_t len;
3426
3427	len = sizeof(*tlv) + sizeof(*cmd) +
3428	      sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
3429	skb = ath10k_wmi_alloc_skb(ar, len);
3430	if (!skb)
3431		return ERR_PTR(-ENOMEM);
3432
3433	ptr = (void *)skb->data;
3434	tlv = ptr;
3435	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
3436	tlv->len = __cpu_to_le16(sizeof(*cmd));
3437	cmd = (void *)tlv->value;
3438	cmd->vdev_id = __cpu_to_le32(vdev_id);
3439	cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
3440
3441	ptr += sizeof(*tlv);
3442	ptr += sizeof(*cmd);
3443
3444	tlv = ptr;
3445	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3446	tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
3447	memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
3448
3449	ptr += sizeof(*tlv);
3450	ptr += roundup(p2p_ie[1] + 2, 4);
3451
3452	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
3453		   vdev_id);
3454	return skb;
3455}
3456
3457static struct sk_buff *
3458ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
3459					   enum wmi_tdls_state state)
3460{
3461	struct wmi_tdls_set_state_cmd *cmd;
3462	struct wmi_tlv *tlv;
3463	struct sk_buff *skb;
3464	void *ptr;
3465	size_t len;
3466	/* Set to options from wmi_tlv_tdls_options,
3467	 * for now none of them are enabled.
3468	 */
3469	u32 options = 0;
3470
3471	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
3472		options |=  WMI_TLV_TDLS_BUFFER_STA_EN;
3473
3474	/* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS
3475	 * link inactivity detecting logic.
3476	 */
3477	if (state == WMI_TDLS_ENABLE_ACTIVE)
3478		state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL;
3479
3480	len = sizeof(*tlv) + sizeof(*cmd);
3481	skb = ath10k_wmi_alloc_skb(ar, len);
3482	if (!skb)
3483		return ERR_PTR(-ENOMEM);
3484
3485	ptr = (void *)skb->data;
3486	tlv = ptr;
3487	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
3488	tlv->len = __cpu_to_le16(sizeof(*cmd));
3489
3490	cmd = (void *)tlv->value;
3491	cmd->vdev_id = __cpu_to_le32(vdev_id);
3492	cmd->state = __cpu_to_le32(state);
3493	cmd->notification_interval_ms = __cpu_to_le32(5000);
3494	cmd->tx_discovery_threshold = __cpu_to_le32(100);
3495	cmd->tx_teardown_threshold = __cpu_to_le32(5);
3496	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
3497	cmd->rssi_delta = __cpu_to_le32(-20);
3498	cmd->tdls_options = __cpu_to_le32(options);
3499	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
3500	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
3501	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
3502	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
3503	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
3504
3505	ptr += sizeof(*tlv);
3506	ptr += sizeof(*cmd);
3507
3508	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
3509		   state, vdev_id);
3510	return skb;
3511}
3512
3513static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
3514{
3515	u32 peer_qos = 0;
3516
3517	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
3518		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
3519	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
3520		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
3521	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
3522		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
3523	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
3524		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
3525
3526	peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
3527
3528	return peer_qos;
3529}
3530
3531static struct sk_buff *
3532ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
3533				       const struct wmi_tdls_peer_update_cmd_arg *arg,
3534				       const struct wmi_tdls_peer_capab_arg *cap,
3535				       const struct wmi_channel_arg *chan_arg)
3536{
3537	struct wmi_tdls_peer_update_cmd *cmd;
3538	struct wmi_tdls_peer_capab *peer_cap;
3539	struct wmi_channel *chan;
3540	struct wmi_tlv *tlv;
3541	struct sk_buff *skb;
3542	u32 peer_qos;
3543	void *ptr;
3544	int len;
3545	int i;
3546
3547	len = sizeof(*tlv) + sizeof(*cmd) +
3548	      sizeof(*tlv) + sizeof(*peer_cap) +
3549	      sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
3550
3551	skb = ath10k_wmi_alloc_skb(ar, len);
3552	if (!skb)
3553		return ERR_PTR(-ENOMEM);
3554
3555	ptr = (void *)skb->data;
3556	tlv = ptr;
3557	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
3558	tlv->len = __cpu_to_le16(sizeof(*cmd));
3559
3560	cmd = (void *)tlv->value;
3561	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
3562	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
3563	cmd->peer_state = __cpu_to_le32(arg->peer_state);
3564
3565	ptr += sizeof(*tlv);
3566	ptr += sizeof(*cmd);
3567
3568	tlv = ptr;
3569	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
3570	tlv->len = __cpu_to_le16(sizeof(*peer_cap));
3571	peer_cap = (void *)tlv->value;
3572	peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
3573						   cap->peer_max_sp);
3574	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
3575	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
3576	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
3577	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
3578	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
3579	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
3580	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
3581
3582	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
3583		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
3584
3585	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
3586	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
3587	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
3588
3589	ptr += sizeof(*tlv);
3590	ptr += sizeof(*peer_cap);
3591
3592	tlv = ptr;
3593	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3594	tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
3595
3596	ptr += sizeof(*tlv);
3597
3598	for (i = 0; i < cap->peer_chan_len; i++) {
3599		tlv = ptr;
3600		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
3601		tlv->len = __cpu_to_le16(sizeof(*chan));
3602		chan = (void *)tlv->value;
3603		ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
3604
3605		ptr += sizeof(*tlv);
3606		ptr += sizeof(*chan);
3607	}
3608
3609	ath10k_dbg(ar, ATH10K_DBG_WMI,
3610		   "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
3611		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
3612	return skb;
3613}
3614
3615static struct sk_buff *
3616ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
3617					  u32 duration, u32 next_offset,
3618					  u32 enabled)
3619{
3620	struct wmi_tlv_set_quiet_cmd *cmd;
3621	struct wmi_tlv *tlv;
3622	struct sk_buff *skb;
3623
3624	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3625	if (!skb)
3626		return ERR_PTR(-ENOMEM);
3627
3628	tlv = (void *)skb->data;
3629	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD);
3630	tlv->len = __cpu_to_le16(sizeof(*cmd));
3631	cmd = (void *)tlv->value;
3632
3633	/* vdev_id is not in use, set to 0 */
3634	cmd->vdev_id = __cpu_to_le32(0);
3635	cmd->period = __cpu_to_le32(period);
3636	cmd->duration = __cpu_to_le32(duration);
3637	cmd->next_start = __cpu_to_le32(next_offset);
3638	cmd->enabled = __cpu_to_le32(enabled);
3639
3640	ath10k_dbg(ar, ATH10K_DBG_WMI,
3641		   "wmi tlv quiet param: period %u duration %u enabled %d\n",
3642		   period, duration, enabled);
3643	return skb;
3644}
3645
3646static struct sk_buff *
3647ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
3648{
3649	struct wmi_tlv_wow_enable_cmd *cmd;
3650	struct wmi_tlv *tlv;
3651	struct sk_buff *skb;
3652	size_t len;
3653
3654	len = sizeof(*tlv) + sizeof(*cmd);
3655	skb = ath10k_wmi_alloc_skb(ar, len);
3656	if (!skb)
3657		return ERR_PTR(-ENOMEM);
3658
3659	tlv = (struct wmi_tlv *)skb->data;
3660	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
3661	tlv->len = __cpu_to_le16(sizeof(*cmd));
3662	cmd = (void *)tlv->value;
3663
3664	cmd->enable = __cpu_to_le32(1);
3665	if (!ar->bus_param.link_can_suspend)
3666		cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED);
3667
3668	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
3669	return skb;
3670}
3671
3672static struct sk_buff *
3673ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
3674					   u32 vdev_id,
3675					   enum wmi_wow_wakeup_event event,
3676					   u32 enable)
3677{
3678	struct wmi_tlv_wow_add_del_event_cmd *cmd;
3679	struct wmi_tlv *tlv;
3680	struct sk_buff *skb;
3681	size_t len;
3682
3683	len = sizeof(*tlv) + sizeof(*cmd);
3684	skb = ath10k_wmi_alloc_skb(ar, len);
3685	if (!skb)
3686		return ERR_PTR(-ENOMEM);
3687
3688	tlv = (struct wmi_tlv *)skb->data;
3689	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
3690	tlv->len = __cpu_to_le16(sizeof(*cmd));
3691	cmd = (void *)tlv->value;
3692
3693	cmd->vdev_id = __cpu_to_le32(vdev_id);
3694	cmd->is_add = __cpu_to_le32(enable);
3695	cmd->event_bitmap = __cpu_to_le32(1 << event);
3696
3697	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
3698		   wow_wakeup_event(event), enable, vdev_id);
3699	return skb;
3700}
3701
3702static struct sk_buff *
3703ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
3704{
3705	struct wmi_tlv_wow_host_wakeup_ind *cmd;
3706	struct wmi_tlv *tlv;
3707	struct sk_buff *skb;
3708	size_t len;
3709
3710	len = sizeof(*tlv) + sizeof(*cmd);
3711	skb = ath10k_wmi_alloc_skb(ar, len);
3712	if (!skb)
3713		return ERR_PTR(-ENOMEM);
3714
3715	tlv = (struct wmi_tlv *)skb->data;
3716	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
3717	tlv->len = __cpu_to_le16(sizeof(*cmd));
3718	cmd = (void *)tlv->value;
3719
3720	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
3721	return skb;
3722}
3723
3724static struct sk_buff *
3725ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
3726				      u32 pattern_id, const u8 *pattern,
3727				      const u8 *bitmask, int pattern_len,
3728				      int pattern_offset)
3729{
3730	struct wmi_tlv_wow_add_pattern_cmd *cmd;
3731	struct wmi_tlv_wow_bitmap_pattern *bitmap;
3732	struct wmi_tlv *tlv;
3733	struct sk_buff *skb;
3734	void *ptr;
3735	size_t len;
3736
3737	len = sizeof(*tlv) + sizeof(*cmd) +
3738	      sizeof(*tlv) +			/* array struct */
3739	      sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
3740	      sizeof(*tlv) +			/* empty ipv4 sync */
3741	      sizeof(*tlv) +			/* empty ipv6 sync */
3742	      sizeof(*tlv) +			/* empty magic */
3743	      sizeof(*tlv) +			/* empty info timeout */
3744	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
3745
3746	skb = ath10k_wmi_alloc_skb(ar, len);
3747	if (!skb)
3748		return ERR_PTR(-ENOMEM);
3749
3750	/* cmd */
3751	ptr = (void *)skb->data;
3752	tlv = ptr;
3753	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
3754	tlv->len = __cpu_to_le16(sizeof(*cmd));
3755	cmd = (void *)tlv->value;
3756
3757	cmd->vdev_id = __cpu_to_le32(vdev_id);
3758	cmd->pattern_id = __cpu_to_le32(pattern_id);
3759	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3760
3761	ptr += sizeof(*tlv);
3762	ptr += sizeof(*cmd);
3763
3764	/* bitmap */
3765	tlv = ptr;
3766	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3767	tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
3768
3769	ptr += sizeof(*tlv);
3770
3771	tlv = ptr;
3772	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
3773	tlv->len = __cpu_to_le16(sizeof(*bitmap));
3774	bitmap = (void *)tlv->value;
3775
3776	memcpy(bitmap->patternbuf, pattern, pattern_len);
3777	memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
3778	bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
3779	bitmap->pattern_len = __cpu_to_le32(pattern_len);
3780	bitmap->bitmask_len = __cpu_to_le32(pattern_len);
3781	bitmap->pattern_id = __cpu_to_le32(pattern_id);
3782
3783	ptr += sizeof(*tlv);
3784	ptr += sizeof(*bitmap);
3785
3786	/* ipv4 sync */
3787	tlv = ptr;
3788	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3789	tlv->len = __cpu_to_le16(0);
3790
3791	ptr += sizeof(*tlv);
3792
3793	/* ipv6 sync */
3794	tlv = ptr;
3795	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3796	tlv->len = __cpu_to_le16(0);
3797
3798	ptr += sizeof(*tlv);
3799
3800	/* magic */
3801	tlv = ptr;
3802	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3803	tlv->len = __cpu_to_le16(0);
3804
3805	ptr += sizeof(*tlv);
3806
3807	/* pattern info timeout */
3808	tlv = ptr;
3809	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3810	tlv->len = __cpu_to_le16(0);
3811
3812	ptr += sizeof(*tlv);
3813
3814	/* ratelimit interval */
3815	tlv = ptr;
3816	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3817	tlv->len = __cpu_to_le16(sizeof(u32));
3818
3819	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
3820		   vdev_id, pattern_id, pattern_offset);
3821	return skb;
3822}
3823
3824static struct sk_buff *
3825ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
3826				      u32 pattern_id)
3827{
3828	struct wmi_tlv_wow_del_pattern_cmd *cmd;
3829	struct wmi_tlv *tlv;
3830	struct sk_buff *skb;
3831	size_t len;
3832
3833	len = sizeof(*tlv) + sizeof(*cmd);
3834	skb = ath10k_wmi_alloc_skb(ar, len);
3835	if (!skb)
3836		return ERR_PTR(-ENOMEM);
3837
3838	tlv = (struct wmi_tlv *)skb->data;
3839	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
3840	tlv->len = __cpu_to_le16(sizeof(*cmd));
3841	cmd = (void *)tlv->value;
3842
3843	cmd->vdev_id = __cpu_to_le32(vdev_id);
3844	cmd->pattern_id = __cpu_to_le32(pattern_id);
3845	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3846
3847	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
3848		   vdev_id, pattern_id);
3849	return skb;
3850}
3851
3852/* Request FW to start PNO operation */
3853static struct sk_buff *
3854ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
3855				       u32 vdev_id,
3856				       struct wmi_pno_scan_req *pno)
3857{
3858	struct nlo_configured_parameters *nlo_list;
3859	struct wmi_tlv_wow_nlo_config_cmd *cmd;
3860	struct wmi_tlv *tlv;
3861	struct sk_buff *skb;
3862	__le32 *channel_list;
3863	u16 tlv_len;
3864	size_t len;
3865	void *ptr;
3866	u32 i;
3867
3868	len = sizeof(*tlv) + sizeof(*cmd) +
3869	      sizeof(*tlv) +
3870	      /* TLV place holder for array of structures
3871	       * nlo_configured_parameters(nlo_list)
3872	       */
3873	      sizeof(*tlv);
3874	      /* TLV place holder for array of uint32 channel_list */
3875
3876	len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
3877				   WMI_NLO_MAX_CHAN);
3878	len += sizeof(struct nlo_configured_parameters) *
3879				min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
3880
3881	skb = ath10k_wmi_alloc_skb(ar, len);
3882	if (!skb)
3883		return ERR_PTR(-ENOMEM);
3884
3885	ptr = (void *)skb->data;
3886	tlv = ptr;
3887	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
3888	tlv->len = __cpu_to_le16(sizeof(*cmd));
3889	cmd = (void *)tlv->value;
3890
3891	/* wmi_tlv_wow_nlo_config_cmd parameters*/
3892	cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
3893	cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
3894
3895	/* current FW does not support min-max range for dwell time */
3896	cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
3897	cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
3898
3899	if (pno->do_passive_scan)
3900		cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
3901
3902	/* copy scan interval */
3903	cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
3904	cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
3905	cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
3906	cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
3907
3908	if (pno->enable_pno_scan_randomization) {
3909		cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
3910				WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
3911		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
3912		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
3913	}
3914
3915	ptr += sizeof(*tlv);
3916	ptr += sizeof(*cmd);
3917
3918	/* nlo_configured_parameters(nlo_list) */
3919	cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
3920					       WMI_NLO_MAX_SSIDS));
3921	tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
3922		sizeof(struct nlo_configured_parameters);
3923
3924	tlv = ptr;
3925	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3926	tlv->len = __cpu_to_le16(tlv_len);
3927
3928	ptr += sizeof(*tlv);
3929	nlo_list = ptr;
3930	for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
3931		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
3932		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3933		tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
3934					 sizeof(*tlv));
3935
3936		/* copy ssid and it's length */
3937		nlo_list[i].ssid.valid = __cpu_to_le32(true);
3938		nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
3939		memcpy(nlo_list[i].ssid.ssid.ssid,
3940		       pno->a_networks[i].ssid.ssid,
3941		       __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
3942
3943		/* copy rssi threshold */
3944		if (pno->a_networks[i].rssi_threshold &&
3945		    pno->a_networks[i].rssi_threshold > -300) {
3946			nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
3947			nlo_list[i].rssi_cond.rssi =
3948				__cpu_to_le32(pno->a_networks[i].rssi_threshold);
3949		}
3950
3951		nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
3952		nlo_list[i].bcast_nw_type.bcast_nw_type =
3953			__cpu_to_le32(pno->a_networks[i].bcast_nw_type);
3954	}
3955
3956	ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
3957
3958	/* copy channel info */
3959	cmd->num_of_channels = __cpu_to_le32(min_t(u8,
3960						   pno->a_networks[0].channel_count,
3961						   WMI_NLO_MAX_CHAN));
3962
3963	tlv = ptr;
3964	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3965	tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
3966				 sizeof(u_int32_t));
3967	ptr += sizeof(*tlv);
3968
3969	channel_list = (__le32 *)ptr;
3970	for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
3971		channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
3972
3973	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
3974		   vdev_id);
3975
3976	return skb;
3977}
3978
3979/* Request FW to stop ongoing PNO operation */
3980static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
3981							     u32 vdev_id)
3982{
3983	struct wmi_tlv_wow_nlo_config_cmd *cmd;
3984	struct wmi_tlv *tlv;
3985	struct sk_buff *skb;
3986	void *ptr;
3987	size_t len;
3988
3989	len = sizeof(*tlv) + sizeof(*cmd) +
3990	      sizeof(*tlv) +
3991	      /* TLV place holder for array of structures
3992	       * nlo_configured_parameters(nlo_list)
3993	       */
3994	      sizeof(*tlv);
3995	      /* TLV place holder for array of uint32 channel_list */
3996	skb = ath10k_wmi_alloc_skb(ar, len);
3997	if (!skb)
3998		return ERR_PTR(-ENOMEM);
3999
4000	ptr = (void *)skb->data;
4001	tlv = ptr;
4002	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
4003	tlv->len = __cpu_to_le16(sizeof(*cmd));
4004	cmd = (void *)tlv->value;
4005
4006	cmd->vdev_id = __cpu_to_le32(vdev_id);
4007	cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
4008
4009	ptr += sizeof(*tlv);
4010	ptr += sizeof(*cmd);
4011
4012	/* nlo_configured_parameters(nlo_list) */
4013	tlv = ptr;
4014	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
4015	tlv->len = __cpu_to_le16(0);
4016
4017	ptr += sizeof(*tlv);
4018
4019	/* channel list */
4020	tlv = ptr;
4021	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
4022	tlv->len = __cpu_to_le16(0);
4023
4024	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
4025	return skb;
4026}
4027
4028static struct sk_buff *
4029ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
4030				 struct wmi_pno_scan_req *pno_scan)
4031{
4032	if (pno_scan->enable)
4033		return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
4034	else
4035		return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
4036}
4037
4038static struct sk_buff *
4039ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
4040{
4041	struct wmi_tlv_adaptive_qcs *cmd;
4042	struct wmi_tlv *tlv;
4043	struct sk_buff *skb;
4044	void *ptr;
4045	size_t len;
4046
4047	len = sizeof(*tlv) + sizeof(*cmd);
4048	skb = ath10k_wmi_alloc_skb(ar, len);
4049	if (!skb)
4050		return ERR_PTR(-ENOMEM);
4051
4052	ptr = (void *)skb->data;
4053	tlv = ptr;
4054	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
4055	tlv->len = __cpu_to_le16(sizeof(*cmd));
4056	cmd = (void *)tlv->value;
4057	cmd->enable = __cpu_to_le32(enable ? 1 : 0);
4058
4059	ptr += sizeof(*tlv);
4060	ptr += sizeof(*cmd);
4061
4062	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
4063	return skb;
4064}
4065
4066static struct sk_buff *
4067ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
4068{
4069	struct wmi_echo_cmd *cmd;
4070	struct wmi_tlv *tlv;
4071	struct sk_buff *skb;
4072	void *ptr;
4073	size_t len;
4074
4075	len = sizeof(*tlv) + sizeof(*cmd);
4076	skb = ath10k_wmi_alloc_skb(ar, len);
4077	if (!skb)
4078		return ERR_PTR(-ENOMEM);
4079
4080	ptr = (void *)skb->data;
4081	tlv = ptr;
4082	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
4083	tlv->len = __cpu_to_le16(sizeof(*cmd));
4084	cmd = (void *)tlv->value;
4085	cmd->value = cpu_to_le32(value);
4086
4087	ptr += sizeof(*tlv);
4088	ptr += sizeof(*cmd);
4089
4090	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
4091	return skb;
4092}
4093
4094static struct sk_buff *
4095ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
4096					 const struct wmi_vdev_spectral_conf_arg *arg)
4097{
4098	struct wmi_vdev_spectral_conf_cmd *cmd;
4099	struct sk_buff *skb;
4100	struct wmi_tlv *tlv;
4101	void *ptr;
4102	size_t len;
4103
4104	len = sizeof(*tlv) + sizeof(*cmd);
4105	skb = ath10k_wmi_alloc_skb(ar, len);
4106	if (!skb)
4107		return ERR_PTR(-ENOMEM);
4108
4109	ptr = (void *)skb->data;
4110	tlv = ptr;
4111	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
4112	tlv->len = __cpu_to_le16(sizeof(*cmd));
4113	cmd = (void *)tlv->value;
4114	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
4115	cmd->scan_count = __cpu_to_le32(arg->scan_count);
4116	cmd->scan_period = __cpu_to_le32(arg->scan_period);
4117	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
4118	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
4119	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
4120	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
4121	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
4122	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
4123	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
4124	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
4125	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
4126	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
4127	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
4128	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
4129	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
4130	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
4131	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
4132	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
4133
4134	return skb;
4135}
4136
4137static struct sk_buff *
4138ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
4139					   u32 trigger, u32 enable)
4140{
4141	struct wmi_vdev_spectral_enable_cmd *cmd;
4142	struct sk_buff *skb;
4143	struct wmi_tlv *tlv;
4144	void *ptr;
4145	size_t len;
4146
4147	len = sizeof(*tlv) + sizeof(*cmd);
4148	skb = ath10k_wmi_alloc_skb(ar, len);
4149	if (!skb)
4150		return ERR_PTR(-ENOMEM);
4151
4152	ptr = (void *)skb->data;
4153	tlv = ptr;
4154	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
4155	tlv->len = __cpu_to_le16(sizeof(*cmd));
4156	cmd = (void *)tlv->value;
4157	cmd->vdev_id = __cpu_to_le32(vdev_id);
4158	cmd->trigger_cmd = __cpu_to_le32(trigger);
4159	cmd->enable_cmd = __cpu_to_le32(enable);
4160
4161	return skb;
4162}
4163
4164/****************/
4165/* TLV mappings */
4166/****************/
4167
4168static struct wmi_cmd_map wmi_tlv_cmd_map = {
4169	.init_cmdid = WMI_TLV_INIT_CMDID,
4170	.start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
4171	.stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
4172	.scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
4173	.scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
4174	.scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
4175	.pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
4176	.pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
4177	.pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
4178	.pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
4179	.pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
4180	.pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
4181	.pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
4182	.pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
4183	.pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
4184	.pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
4185	.pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
4186	.pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
4187	.pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
4188	.vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
4189	.vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
4190	.vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
4191	.vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
4192	.vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
4193	.vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
4194	.vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
4195	.vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
4196	.vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
4197	.peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
4198	.peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
4199	.peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
4200	.peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
4201	.peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
4202	.peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
4203	.peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
4204	.peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
4205	.bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
4206	.pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
4207	.bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
4208	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
4209	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
4210	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
4211	.mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
4212	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
4213	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
4214	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
4215	.addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
4216	.delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
4217	.addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
4218	.send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
4219	.sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
4220	.sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
4221	.sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
4222	.pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
4223	.pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
4224	.roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
4225	.roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
4226	.roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
4227	.roam_scan_rssi_change_threshold =
4228				WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
4229	.roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4230	.ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4231	.ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
4232	.ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
4233	.p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
4234	.p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
4235	.p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
4236	.p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
4237	.p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
4238	.ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
4239	.ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
4240	.peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
4241	.wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
4242	.wlan_profile_set_hist_intvl_cmdid =
4243				WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
4244	.wlan_profile_get_profile_data_cmdid =
4245				WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
4246	.wlan_profile_enable_profile_id_cmdid =
4247				WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
4248	.wlan_profile_list_profile_id_cmdid =
4249				WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
4250	.pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
4251	.pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
4252	.add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
4253	.rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
4254	.wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
4255	.wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
4256	.wow_enable_disable_wake_event_cmdid =
4257				WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
4258	.wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
4259	.wow_hostwakeup_from_sleep_cmdid =
4260				WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
4261	.rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
4262	.rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
4263	.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
4264	.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
4265	.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
4266	.request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
4267	.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
4268	.network_list_offload_config_cmdid =
4269				WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
4270	.gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
4271	.csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
4272	.csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
4273	.chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
4274	.peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
4275	.peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
4276	.sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
4277	.sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
4278	.sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
4279	.echo_cmdid = WMI_TLV_ECHO_CMDID,
4280	.pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
4281	.dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
4282	.pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
4283	.pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
4284	.vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
4285	.vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
4286	.force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
4287	.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
4288	.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
4289	.pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
4290	.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
4291	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
4292	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
4293	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
4294	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
4295	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
4296	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
4297	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
4298	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
4299	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
4300	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
4301	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
4302	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
4303	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
4304	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
4305	.nan_cmdid = WMI_CMD_UNSUPPORTED,
4306	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
4307	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
4308	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
4309	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4310	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4311	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
4312	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
4313	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
4314	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
4315	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
4316	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
4317	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
4318	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
4319	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
4320	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
4321	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4322	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4323	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
4324	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
4325	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
4326};
4327
4328static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
4329	.tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
4330	.rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
4331	.txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
4332	.txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
4333	.txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
4334	.beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
4335	.beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
4336	.resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
4337	.protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
4338	.dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
4339	.non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
4340	.agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
4341	.sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
4342	.ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
4343	.ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
4344	.ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
4345	.ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
4346	.ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
4347	.ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
4348	.ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
4349	.ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
4350	.ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
4351	.ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
4352	.l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
4353	.dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
4354	.pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
4355	.pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4356	.pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4357	.pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
4358	.pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
4359	.vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
4360	.peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
4361	.bcnflt_stats_update_period =
4362				WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
4363	.pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
4364	.arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
4365	.dcs = WMI_TLV_PDEV_PARAM_DCS,
4366	.ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
4367	.ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
4368	.ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
4369	.ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
4370	.ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
4371	.dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
4372	.proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
4373	.idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
4374	.power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
4375	.fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
4376	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
4377	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
4378	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
4379	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
4380	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4381	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
4382	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
4383	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4384	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
4385	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
4386	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4387	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4388	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4389	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4390	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4391	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4392	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
4393	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
4394	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4395	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4396	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4397	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4398	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4399	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4400	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
4401	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
4402	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
4403	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4404	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4405	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
4406	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
4407	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
4408	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
4409	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
4410	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
4411	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
4412	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
4413	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
4414	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
4415	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4416	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
4417	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
4418	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
4419	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4420	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4421	.rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
4422	.rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
4423	.peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
4424};
4425
4426static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
4427	.smps_state = WMI_TLV_PEER_SMPS_STATE,
4428	.ampdu = WMI_TLV_PEER_AMPDU,
4429	.authorize = WMI_TLV_PEER_AUTHORIZE,
4430	.chan_width = WMI_TLV_PEER_CHAN_WIDTH,
4431	.nss = WMI_TLV_PEER_NSS,
4432	.use_4addr = WMI_TLV_PEER_USE_4ADDR,
4433	.membership = WMI_TLV_PEER_MEMBERSHIP,
4434	.user_pos = WMI_TLV_PEER_USERPOS,
4435	.crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
4436	.tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
4437	.set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
4438	.ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
4439	.phymode = WMI_TLV_PEER_PHYMODE,
4440	.use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
4441	.dummy_var = WMI_TLV_PEER_DUMMY_VAR,
4442};
4443
4444static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
4445	.rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
4446	.fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
4447	.beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
4448	.listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
4449	.multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
4450	.mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
4451	.slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
4452	.preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
4453	.swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
4454	.wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
4455	.wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
4456	.wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
4457	.dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
4458	.wmi_vdev_oc_scheduler_air_time_limit =
4459				WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
4460	.wds = WMI_TLV_VDEV_PARAM_WDS,
4461	.atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
4462	.bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
4463	.bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
4464	.bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
4465	.feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
4466	.chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
4467	.chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
4468	.disable_htprotection =	WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
4469	.sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
4470	.mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
4471	.protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
4472	.fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
4473	.sgi = WMI_TLV_VDEV_PARAM_SGI,
4474	.ldpc = WMI_TLV_VDEV_PARAM_LDPC,
4475	.tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
4476	.rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
4477	.intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
4478	.def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
4479	.nss = WMI_TLV_VDEV_PARAM_NSS,
4480	.bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
4481	.mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
4482	.mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
4483	.dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
4484	.unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
4485	.ap_keepalive_min_idle_inactive_time_secs =
4486		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
4487	.ap_keepalive_max_idle_inactive_time_secs =
4488		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
4489	.ap_keepalive_max_unresponsive_time_secs =
4490		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
4491	.ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
4492	.mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4493	.enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
4494	.txbf = WMI_TLV_VDEV_PARAM_TXBF,
4495	.packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
4496	.drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
4497	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
4498	.ap_detect_out_of_sync_sleeping_sta_time_secs =
4499					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4500	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
4501	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
4502	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
4503	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
4504	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
4505	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4506	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
4507	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
4508	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
4509	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
4510	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
4511	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
4512	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
4513	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
4514	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
4515	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4516};
4517
4518static const struct wmi_ops wmi_tlv_ops = {
4519	.rx = ath10k_wmi_tlv_op_rx,
4520	.map_svc = wmi_tlv_svc_map,
4521	.map_svc_ext = wmi_tlv_svc_map_ext,
4522
4523	.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
4524	.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
4525	.pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
4526	.pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev,
4527	.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
4528	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
4529	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
4530	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
4531	.pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
4532	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
4533	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
4534	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
4535	.pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
4536	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
4537	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
4538	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
4539	.pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
4540	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
4541
4542	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
4543	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
4544	.gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
4545	.gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
4546	.gen_init = ath10k_wmi_tlv_op_gen_init,
4547	.gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
4548	.gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
4549	.gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
4550	.gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
4551	.gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
4552	.gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
4553	.gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
4554	.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
4555	.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
4556	.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
4557	.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
4558	.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
4559	.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
4560	.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
4561	.gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
4562	.gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
4563	.gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
4564	.gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
4565	.gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
4566	.gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
4567	.gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
4568	.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
4569	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
4570	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
4571	.gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
4572	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
4573	/* .gen_mgmt_tx = not implemented; HTT is used */
4574	.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
4575	.cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
4576	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
4577	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
4578	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
4579	.gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode,
4580	.gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature,
4581	/* .gen_addba_clear_resp not implemented */
4582	/* .gen_addba_send not implemented */
4583	/* .gen_addba_set_resp not implemented */
4584	/* .gen_delba_send not implemented */
4585	.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
4586	.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
4587	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
4588	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
4589	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
4590	.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
4591	.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
4592	.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
4593	.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
4594	.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
4595	.gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
4596	.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
4597	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
4598	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
4599	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
4600	.get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype,
4601	.gen_echo = ath10k_wmi_tlv_op_gen_echo,
4602	.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
4603	.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
4604};
4605
4606static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
4607	.auth = WMI_TLV_PEER_AUTH,
4608	.qos = WMI_TLV_PEER_QOS,
4609	.need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
4610	.need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
4611	.apsd = WMI_TLV_PEER_APSD,
4612	.ht = WMI_TLV_PEER_HT,
4613	.bw40 = WMI_TLV_PEER_40MHZ,
4614	.stbc = WMI_TLV_PEER_STBC,
4615	.ldbc = WMI_TLV_PEER_LDPC,
4616	.dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
4617	.static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
4618	.spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
4619	.vht = WMI_TLV_PEER_VHT,
4620	.bw80 = WMI_TLV_PEER_80MHZ,
4621	.pmf = WMI_TLV_PEER_PMF,
4622	.bw160 = WMI_TLV_PEER_160MHZ,
4623};
4624
4625/************/
4626/* TLV init */
4627/************/
4628
4629void ath10k_wmi_tlv_attach(struct ath10k *ar)
4630{
4631	ar->wmi.cmd = &wmi_tlv_cmd_map;
4632	ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
4633	ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
4634	ar->wmi.peer_param = &wmi_tlv_peer_param_map;
4635	ar->wmi.ops = &wmi_tlv_ops;
4636	ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
4637}