Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 
   6 */
   7#include "core.h"
   8#include "debug.h"
   9#include "mac.h"
  10#include "hw.h"
  11#include "wmi.h"
  12#include "wmi-ops.h"
  13#include "wmi-tlv.h"
  14#include "p2p.h"
  15#include "testmode.h"
  16#include <linux/bitfield.h>
  17
  18/***************/
  19/* TLV helpers */
  20/**************/
  21
  22struct wmi_tlv_policy {
  23	size_t min_len;
  24};
  25
  26static const struct wmi_tlv_policy wmi_tlv_policies[] = {
  27	[WMI_TLV_TAG_ARRAY_BYTE]
  28		= { .min_len = 0 },
  29	[WMI_TLV_TAG_ARRAY_UINT32]
  30		= { .min_len = 0 },
  31	[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
  32		= { .min_len = sizeof(struct wmi_scan_event) },
  33	[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
  34		= { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
  35	[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
  36		= { .min_len = sizeof(struct wmi_chan_info_event) },
  37	[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
  38		= { .min_len = sizeof(struct wmi_vdev_start_response_event) },
  39	[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
  40		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
  41	[WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
  42		= { .min_len = sizeof(struct wmi_host_swba_event) },
  43	[WMI_TLV_TAG_STRUCT_TIM_INFO]
  44		= { .min_len = sizeof(struct wmi_tim_info) },
  45	[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
  46		= { .min_len = sizeof(struct wmi_p2p_noa_info) },
  47	[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
  48		= { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
  49	[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
  50		= { .min_len = sizeof(struct hal_reg_capabilities) },
  51	[WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
  52		= { .min_len = sizeof(struct wlan_host_mem_req) },
  53	[WMI_TLV_TAG_STRUCT_READY_EVENT]
  54		= { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
  55	[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
  56		= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
  57	[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
  58		= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
  59	[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
  60		= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
  61	[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
  62		= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
  63	[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
  64		= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
  65	[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
  66		= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
  67};
  68
  69static int
  70ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
  71		    int (*iter)(struct ath10k *ar, u16 tag, u16 len,
  72				const void *ptr, void *data),
  73		    void *data)
  74{
  75	const void *begin = ptr;
  76	const struct wmi_tlv *tlv;
  77	u16 tlv_tag, tlv_len;
  78	int ret;
  79
  80	while (len > 0) {
  81		if (len < sizeof(*tlv)) {
  82			ath10k_dbg(ar, ATH10K_DBG_WMI,
  83				   "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
  84				   ptr - begin, len, sizeof(*tlv));
  85			return -EINVAL;
  86		}
  87
  88		tlv = ptr;
  89		tlv_tag = __le16_to_cpu(tlv->tag);
  90		tlv_len = __le16_to_cpu(tlv->len);
  91		ptr += sizeof(*tlv);
  92		len -= sizeof(*tlv);
  93
  94		if (tlv_len > len) {
  95			ath10k_dbg(ar, ATH10K_DBG_WMI,
  96				   "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
  97				   tlv_tag, ptr - begin, len, tlv_len);
  98			return -EINVAL;
  99		}
 100
 101		if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
 102		    wmi_tlv_policies[tlv_tag].min_len &&
 103		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
 104			ath10k_dbg(ar, ATH10K_DBG_WMI,
 105				   "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
 106				   tlv_tag, ptr - begin, tlv_len,
 107				   wmi_tlv_policies[tlv_tag].min_len);
 108			return -EINVAL;
 109		}
 110
 111		ret = iter(ar, tlv_tag, tlv_len, ptr, data);
 112		if (ret)
 113			return ret;
 114
 115		ptr += tlv_len;
 116		len -= tlv_len;
 117	}
 118
 119	return 0;
 120}
 121
 122static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
 123				     const void *ptr, void *data)
 124{
 125	const void **tb = data;
 126
 127	if (tag < WMI_TLV_TAG_MAX)
 128		tb[tag] = ptr;
 129
 130	return 0;
 131}
 132
 133static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
 134				const void *ptr, size_t len)
 135{
 136	return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
 137				   (void *)tb);
 138}
 139
 140static const void **
 141ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
 142			   size_t len, gfp_t gfp)
 143{
 144	const void **tb;
 145	int ret;
 146
 147	tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
 148	if (!tb)
 149		return ERR_PTR(-ENOMEM);
 150
 151	ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
 152	if (ret) {
 153		kfree(tb);
 154		return ERR_PTR(ret);
 155	}
 156
 157	return tb;
 158}
 159
 160static u16 ath10k_wmi_tlv_len(const void *ptr)
 161{
 162	return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
 163}
 164
 165/**************/
 166/* TLV events */
 167/**************/
 168static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
 169					      struct sk_buff *skb)
 170{
 171	const void **tb;
 172	const struct wmi_tlv_bcn_tx_status_ev *ev;
 173	struct ath10k_vif *arvif;
 174	u32 vdev_id, tx_status;
 175	int ret;
 176
 177	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 178	if (IS_ERR(tb)) {
 179		ret = PTR_ERR(tb);
 180		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 181		return ret;
 182	}
 183
 184	ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
 185	if (!ev) {
 186		kfree(tb);
 187		return -EPROTO;
 188	}
 189
 190	tx_status = __le32_to_cpu(ev->tx_status);
 191	vdev_id = __le32_to_cpu(ev->vdev_id);
 192
 193	switch (tx_status) {
 194	case WMI_TLV_BCN_TX_STATUS_OK:
 195		break;
 196	case WMI_TLV_BCN_TX_STATUS_XRETRY:
 197	case WMI_TLV_BCN_TX_STATUS_DROP:
 198	case WMI_TLV_BCN_TX_STATUS_FILTERED:
 199		/* FIXME: It's probably worth telling mac80211 to stop the
 200		 * interface as it is crippled.
 201		 */
 202		ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
 203			    vdev_id, tx_status);
 204		break;
 205	}
 206
 207	arvif = ath10k_get_arvif(ar, vdev_id);
 208	if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
 209		ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
 210
 211	kfree(tb);
 212	return 0;
 213}
 214
 215static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
 216						  struct sk_buff *skb)
 217{
 218	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
 219	complete(&ar->vdev_delete_done);
 220}
 221
 222static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
 223						const void *ptr, void *data)
 224{
 225	const struct wmi_tlv_peer_stats_info *stat = ptr;
 226	struct ieee80211_sta *sta;
 227	struct ath10k_sta *arsta;
 228
 229	if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
 230		return -EPROTO;
 231
 232	ath10k_dbg(ar, ATH10K_DBG_WMI,
 233		   "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
 234		   stat->peer_macaddr.addr,
 235		   __le32_to_cpu(stat->last_rx_rate_code),
 236		   __le32_to_cpu(stat->last_rx_bitrate_kbps));
 237
 238	ath10k_dbg(ar, ATH10K_DBG_WMI,
 239		   "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
 240		   __le32_to_cpu(stat->last_tx_rate_code),
 241		   __le32_to_cpu(stat->last_tx_bitrate_kbps));
 242
 243	rcu_read_lock();
 244	sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
 245	if (!sta) {
 246		rcu_read_unlock();
 247		ath10k_warn(ar, "not found station for peer stats\n");
 248		return -EINVAL;
 249	}
 250
 251	arsta = (struct ath10k_sta *)sta->drv_priv;
 252	arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
 253	arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
 254	arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
 255	arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
 256	rcu_read_unlock();
 257
 258	return 0;
 259}
 260
 261static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
 262						  struct sk_buff *skb)
 263{
 264	const void **tb;
 265	const struct wmi_tlv_peer_stats_info_ev *ev;
 266	const void *data;
 267	u32 num_peer_stats;
 268	int ret;
 269
 270	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 271	if (IS_ERR(tb)) {
 272		ret = PTR_ERR(tb);
 273		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 274		return ret;
 275	}
 276
 277	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
 278	data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
 279
 280	if (!ev || !data) {
 281		kfree(tb);
 282		return -EPROTO;
 283	}
 284
 285	num_peer_stats = __le32_to_cpu(ev->num_peers);
 286
 287	ath10k_dbg(ar, ATH10K_DBG_WMI,
 288		   "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
 289		   __le32_to_cpu(ev->vdev_id),
 290		   num_peer_stats,
 291		   __le32_to_cpu(ev->more_data));
 292
 293	ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
 294				  ath10k_wmi_tlv_parse_peer_stats_info, NULL);
 295	if (ret)
 296		ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
 297
 298	kfree(tb);
 299	return 0;
 300}
 301
 302static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
 303						 struct sk_buff *skb)
 304{
 305	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
 306	ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
 307	complete(&ar->peer_stats_info_complete);
 308}
 309
 310static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
 311					  struct sk_buff *skb)
 312{
 313	const void **tb;
 314	const struct wmi_tlv_diag_data_ev *ev;
 315	const struct wmi_tlv_diag_item *item;
 316	const void *data;
 317	int ret, num_items, len;
 318
 319	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 320	if (IS_ERR(tb)) {
 321		ret = PTR_ERR(tb);
 322		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 323		return ret;
 324	}
 325
 326	ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
 327	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
 328	if (!ev || !data) {
 329		kfree(tb);
 330		return -EPROTO;
 331	}
 332
 333	num_items = __le32_to_cpu(ev->num_items);
 334	len = ath10k_wmi_tlv_len(data);
 335
 336	while (num_items--) {
 337		if (len == 0)
 338			break;
 339		if (len < sizeof(*item)) {
 340			ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
 341			break;
 342		}
 343
 344		item = data;
 345
 346		if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
 347			ath10k_warn(ar, "failed to parse diag data: item is too long\n");
 348			break;
 349		}
 350
 351		trace_ath10k_wmi_diag_container(ar,
 352						item->type,
 353						__le32_to_cpu(item->timestamp),
 354						__le32_to_cpu(item->code),
 355						__le16_to_cpu(item->len),
 356						item->payload);
 357
 358		len -= sizeof(*item);
 359		len -= roundup(__le16_to_cpu(item->len), 4);
 360
 361		data += sizeof(*item);
 362		data += roundup(__le16_to_cpu(item->len), 4);
 363	}
 364
 365	if (num_items != -1 || len != 0)
 366		ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
 367			    num_items, len);
 368
 369	kfree(tb);
 370	return 0;
 371}
 372
 373static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
 374				     struct sk_buff *skb)
 375{
 376	const void **tb;
 377	const void *data;
 378	int ret, len;
 379
 380	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 381	if (IS_ERR(tb)) {
 382		ret = PTR_ERR(tb);
 383		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 384		return ret;
 385	}
 386
 387	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
 388	if (!data) {
 389		kfree(tb);
 390		return -EPROTO;
 391	}
 392	len = ath10k_wmi_tlv_len(data);
 393
 394	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
 395	trace_ath10k_wmi_diag(ar, data, len);
 396
 397	kfree(tb);
 398	return 0;
 399}
 400
 401static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
 402					struct sk_buff *skb)
 403{
 404	const void **tb;
 405	const struct wmi_tlv_p2p_noa_ev *ev;
 406	const struct wmi_p2p_noa_info *noa;
 407	int ret, vdev_id;
 408
 409	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 410	if (IS_ERR(tb)) {
 411		ret = PTR_ERR(tb);
 412		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 413		return ret;
 414	}
 415
 416	ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
 417	noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
 418
 419	if (!ev || !noa) {
 420		kfree(tb);
 421		return -EPROTO;
 422	}
 423
 424	vdev_id = __le32_to_cpu(ev->vdev_id);
 425
 426	ath10k_dbg(ar, ATH10K_DBG_WMI,
 427		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
 428		   vdev_id, noa->num_descriptors);
 429
 430	ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
 431	kfree(tb);
 432	return 0;
 433}
 434
 435static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
 436					 struct sk_buff *skb)
 437{
 438	const void **tb;
 439	const struct wmi_tlv_tx_pause_ev *ev;
 440	int ret, vdev_id;
 441	u32 pause_id, action, vdev_map, peer_id, tid_map;
 442
 443	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 444	if (IS_ERR(tb)) {
 445		ret = PTR_ERR(tb);
 446		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 447		return ret;
 448	}
 449
 450	ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
 451	if (!ev) {
 452		kfree(tb);
 453		return -EPROTO;
 454	}
 455
 456	pause_id = __le32_to_cpu(ev->pause_id);
 457	action = __le32_to_cpu(ev->action);
 458	vdev_map = __le32_to_cpu(ev->vdev_map);
 459	peer_id = __le32_to_cpu(ev->peer_id);
 460	tid_map = __le32_to_cpu(ev->tid_map);
 461
 462	ath10k_dbg(ar, ATH10K_DBG_WMI,
 463		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
 464		   pause_id, action, vdev_map, peer_id, tid_map);
 465
 466	switch (pause_id) {
 467	case WMI_TLV_TX_PAUSE_ID_MCC:
 468	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
 469	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
 470	case WMI_TLV_TX_PAUSE_ID_AP_PS:
 471	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
 472		for (vdev_id = 0; vdev_map; vdev_id++) {
 473			if (!(vdev_map & BIT(vdev_id)))
 474				continue;
 475
 476			vdev_map &= ~BIT(vdev_id);
 477			ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
 478							action);
 479		}
 480		break;
 481	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
 482	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
 483	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
 484	case WMI_TLV_TX_PAUSE_ID_HOST:
 485		ath10k_dbg(ar, ATH10K_DBG_MAC,
 486			   "mac ignoring unsupported tx pause id %d\n",
 487			   pause_id);
 488		break;
 489	default:
 490		ath10k_dbg(ar, ATH10K_DBG_MAC,
 491			   "mac ignoring unknown tx pause vdev %d\n",
 492			   pause_id);
 493		break;
 494	}
 495
 496	kfree(tb);
 497	return 0;
 498}
 499
 500static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
 501						     struct sk_buff *skb)
 502{
 503	const struct wmi_tlv_rfkill_state_change_ev *ev;
 504	const void **tb;
 505	bool radio;
 506	int ret;
 507
 508	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 509	if (IS_ERR(tb)) {
 510		ret = PTR_ERR(tb);
 511		ath10k_warn(ar,
 512			    "failed to parse rfkill state change event: %d\n",
 513			    ret);
 514		return;
 515	}
 516
 517	ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
 518	if (!ev) {
 519		kfree(tb);
 520		return;
 521	}
 522
 523	ath10k_dbg(ar, ATH10K_DBG_MAC,
 524		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
 525		   __le32_to_cpu(ev->gpio_pin_num),
 526		   __le32_to_cpu(ev->int_type),
 527		   __le32_to_cpu(ev->radio_state));
 528
 529	radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
 530
 531	spin_lock_bh(&ar->data_lock);
 532
 533	if (!radio)
 534		ar->hw_rfkill_on = true;
 535
 536	spin_unlock_bh(&ar->data_lock);
 537
 538	/* notify cfg80211 radio state change */
 539	ath10k_mac_rfkill_enable_radio(ar, radio);
 540	wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
 541}
 542
 543static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
 544					    struct sk_buff *skb)
 545{
 546	const struct wmi_tlv_pdev_temperature_event *ev;
 547
 548	ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
 549	if (WARN_ON(skb->len < sizeof(*ev)))
 550		return -EPROTO;
 551
 552	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
 553	return 0;
 554}
 555
 556static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
 557{
 558	struct ieee80211_sta *station;
 559	const struct wmi_tlv_tdls_peer_event *ev;
 560	const void **tb;
 561	struct ath10k_vif *arvif;
 562
 563	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 564	if (IS_ERR(tb)) {
 565		ath10k_warn(ar, "tdls peer failed to parse tlv");
 566		return;
 567	}
 568	ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
 569	if (!ev) {
 570		kfree(tb);
 571		ath10k_warn(ar, "tdls peer NULL event");
 572		return;
 573	}
 574
 575	switch (__le32_to_cpu(ev->peer_reason)) {
 576	case WMI_TDLS_TEARDOWN_REASON_TX:
 577	case WMI_TDLS_TEARDOWN_REASON_RSSI:
 578	case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
 579		rcu_read_lock();
 580		station = ieee80211_find_sta_by_ifaddr(ar->hw,
 581						       ev->peer_macaddr.addr,
 582						       NULL);
 583		if (!station) {
 584			ath10k_warn(ar, "did not find station from tdls peer event");
 585			goto exit;
 586		}
 587
 588		arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
 589		if (!arvif) {
 590			ath10k_warn(ar, "no vif for vdev_id %d found",
 591				    __le32_to_cpu(ev->vdev_id));
 592			goto exit;
 593		}
 594
 595		ieee80211_tdls_oper_request(
 596					arvif->vif, station->addr,
 597					NL80211_TDLS_TEARDOWN,
 598					WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
 599					GFP_ATOMIC
 600					);
 601		break;
 602	default:
 603		kfree(tb);
 604		return;
 605	}
 606
 607exit:
 608	rcu_read_unlock();
 609	kfree(tb);
 610}
 611
 612static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
 613						 struct sk_buff *skb)
 614{
 615	struct wmi_peer_delete_resp_ev_arg *arg;
 616	struct wmi_tlv *tlv_hdr;
 617
 618	tlv_hdr = (struct wmi_tlv *)skb->data;
 619	arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value;
 620
 621	ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id);
 622	ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr);
 623	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n");
 624
 625	complete(&ar->peer_delete_done);
 626
 627	return 0;
 628}
 629
 630/***********/
 631/* TLV ops */
 632/***********/
 633
 634static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
 635{
 636	struct wmi_cmd_hdr *cmd_hdr;
 637	enum wmi_tlv_event_id id;
 638	bool consumed;
 639
 640	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 641	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 642
 643	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
 644		goto out;
 645
 646	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 647
 648	consumed = ath10k_tm_event_wmi(ar, id, skb);
 649
 650	/* Ready event must be handled normally also in UTF mode so that we
 651	 * know the UTF firmware has booted, others we are just bypass WMI
 652	 * events to testmode.
 653	 */
 654	if (consumed && id != WMI_TLV_READY_EVENTID) {
 655		ath10k_dbg(ar, ATH10K_DBG_WMI,
 656			   "wmi tlv testmode consumed 0x%x\n", id);
 657		goto out;
 658	}
 659
 660	switch (id) {
 661	case WMI_TLV_MGMT_RX_EVENTID:
 662		ath10k_wmi_event_mgmt_rx(ar, skb);
 663		/* mgmt_rx() owns the skb now! */
 664		return;
 665	case WMI_TLV_SCAN_EVENTID:
 666		ath10k_wmi_event_scan(ar, skb);
 667		break;
 668	case WMI_TLV_CHAN_INFO_EVENTID:
 669		ath10k_wmi_event_chan_info(ar, skb);
 670		break;
 671	case WMI_TLV_ECHO_EVENTID:
 672		ath10k_wmi_event_echo(ar, skb);
 673		break;
 674	case WMI_TLV_DEBUG_MESG_EVENTID:
 675		ath10k_wmi_event_debug_mesg(ar, skb);
 676		break;
 677	case WMI_TLV_UPDATE_STATS_EVENTID:
 678		ath10k_wmi_event_update_stats(ar, skb);
 679		break;
 680	case WMI_TLV_PEER_STATS_INFO_EVENTID:
 681		ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
 682		break;
 683	case WMI_TLV_VDEV_START_RESP_EVENTID:
 684		ath10k_wmi_event_vdev_start_resp(ar, skb);
 685		break;
 686	case WMI_TLV_VDEV_STOPPED_EVENTID:
 687		ath10k_wmi_event_vdev_stopped(ar, skb);
 688		break;
 689	case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
 690		ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
 691		break;
 692	case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
 693		ath10k_wmi_event_peer_sta_kickout(ar, skb);
 694		break;
 695	case WMI_TLV_HOST_SWBA_EVENTID:
 696		ath10k_wmi_event_host_swba(ar, skb);
 697		break;
 698	case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
 699		ath10k_wmi_event_tbttoffset_update(ar, skb);
 700		break;
 701	case WMI_TLV_PHYERR_EVENTID:
 702		ath10k_wmi_event_phyerr(ar, skb);
 703		break;
 704	case WMI_TLV_ROAM_EVENTID:
 705		ath10k_wmi_event_roam(ar, skb);
 706		break;
 707	case WMI_TLV_PROFILE_MATCH:
 708		ath10k_wmi_event_profile_match(ar, skb);
 709		break;
 710	case WMI_TLV_DEBUG_PRINT_EVENTID:
 711		ath10k_wmi_event_debug_print(ar, skb);
 712		break;
 713	case WMI_TLV_PDEV_QVIT_EVENTID:
 714		ath10k_wmi_event_pdev_qvit(ar, skb);
 715		break;
 716	case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
 717		ath10k_wmi_event_wlan_profile_data(ar, skb);
 718		break;
 719	case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
 720		ath10k_wmi_event_rtt_measurement_report(ar, skb);
 721		break;
 722	case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
 723		ath10k_wmi_event_tsf_measurement_report(ar, skb);
 724		break;
 725	case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
 726		ath10k_wmi_event_rtt_error_report(ar, skb);
 727		break;
 728	case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
 729		ath10k_wmi_event_wow_wakeup_host(ar, skb);
 730		break;
 731	case WMI_TLV_DCS_INTERFERENCE_EVENTID:
 732		ath10k_wmi_event_dcs_interference(ar, skb);
 733		break;
 734	case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
 735		ath10k_wmi_event_pdev_tpc_config(ar, skb);
 736		break;
 737	case WMI_TLV_PDEV_FTM_INTG_EVENTID:
 738		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
 739		break;
 740	case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
 741		ath10k_wmi_event_gtk_offload_status(ar, skb);
 742		break;
 743	case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
 744		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
 745		break;
 746	case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
 747		ath10k_wmi_event_delba_complete(ar, skb);
 748		break;
 749	case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
 750		ath10k_wmi_event_addba_complete(ar, skb);
 751		break;
 752	case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
 753		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
 754		break;
 755	case WMI_TLV_SERVICE_READY_EVENTID:
 756		ath10k_wmi_event_service_ready(ar, skb);
 757		return;
 758	case WMI_TLV_READY_EVENTID:
 759		ath10k_wmi_event_ready(ar, skb);
 760		break;
 761	case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
 762		ath10k_wmi_event_service_available(ar, skb);
 763		break;
 764	case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
 765		ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
 766		break;
 767	case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
 768		ath10k_wmi_tlv_event_diag_data(ar, skb);
 769		break;
 770	case WMI_TLV_DIAG_EVENTID:
 771		ath10k_wmi_tlv_event_diag(ar, skb);
 772		break;
 773	case WMI_TLV_P2P_NOA_EVENTID:
 774		ath10k_wmi_tlv_event_p2p_noa(ar, skb);
 775		break;
 776	case WMI_TLV_TX_PAUSE_EVENTID:
 777		ath10k_wmi_tlv_event_tx_pause(ar, skb);
 778		break;
 779	case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
 780		ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
 781		break;
 782	case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
 783		ath10k_wmi_tlv_event_temperature(ar, skb);
 784		break;
 785	case WMI_TLV_TDLS_PEER_EVENTID:
 786		ath10k_wmi_event_tdls_peer(ar, skb);
 787		break;
 788	case WMI_TLV_PEER_DELETE_RESP_EVENTID:
 789		ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
 790		break;
 791	case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
 792		ath10k_wmi_event_mgmt_tx_compl(ar, skb);
 793		break;
 794	case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID:
 795		ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
 796		break;
 797	default:
 798		ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
 799		break;
 800	}
 801
 802out:
 803	dev_kfree_skb(skb);
 804}
 805
 806static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
 807					  struct sk_buff *skb,
 808					  struct wmi_scan_ev_arg *arg)
 809{
 810	const void **tb;
 811	const struct wmi_scan_event *ev;
 812	int ret;
 813
 814	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 815	if (IS_ERR(tb)) {
 816		ret = PTR_ERR(tb);
 817		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 818		return ret;
 819	}
 820
 821	ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
 822	if (!ev) {
 823		kfree(tb);
 824		return -EPROTO;
 825	}
 826
 827	arg->event_type = ev->event_type;
 828	arg->reason = ev->reason;
 829	arg->channel_freq = ev->channel_freq;
 830	arg->scan_req_id = ev->scan_req_id;
 831	arg->scan_id = ev->scan_id;
 832	arg->vdev_id = ev->vdev_id;
 833
 834	kfree(tb);
 835	return 0;
 836}
 837
 838static int
 839ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
 840					struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
 841{
 842	const void **tb;
 843	const struct wmi_tlv_mgmt_tx_compl_ev *ev;
 844	int ret;
 845
 846	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 847	if (IS_ERR(tb)) {
 848		ret = PTR_ERR(tb);
 849		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 850		return ret;
 851	}
 852
 853	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
 
 
 
 
 854
 855	arg->desc_id = ev->desc_id;
 856	arg->status = ev->status;
 857	arg->pdev_id = ev->pdev_id;
 858	arg->ppdu_id = ev->ppdu_id;
 859
 860	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
 861		arg->ack_rssi = ev->ack_rssi;
 862
 863	kfree(tb);
 864	return 0;
 865}
 866
 867struct wmi_tlv_tx_bundle_compl_parse {
 868	const __le32 *num_reports;
 869	const __le32 *desc_ids;
 870	const __le32 *status;
 871	const __le32 *ppdu_ids;
 872	const __le32 *ack_rssi;
 873	bool desc_ids_done;
 874	bool status_done;
 875	bool ppdu_ids_done;
 876	bool ack_rssi_done;
 877};
 878
 879static int
 880ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
 881					  const void *ptr, void *data)
 882{
 883	struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data;
 884
 885	switch (tag) {
 886	case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT:
 887		bundle_tx_compl->num_reports = ptr;
 888		break;
 889	case WMI_TLV_TAG_ARRAY_UINT32:
 890		if (!bundle_tx_compl->desc_ids_done) {
 891			bundle_tx_compl->desc_ids_done = true;
 892			bundle_tx_compl->desc_ids = ptr;
 893		} else if (!bundle_tx_compl->status_done) {
 894			bundle_tx_compl->status_done = true;
 895			bundle_tx_compl->status = ptr;
 896		} else if (!bundle_tx_compl->ppdu_ids_done) {
 897			bundle_tx_compl->ppdu_ids_done = true;
 898			bundle_tx_compl->ppdu_ids = ptr;
 899		} else if (!bundle_tx_compl->ack_rssi_done) {
 900			bundle_tx_compl->ack_rssi_done = true;
 901			bundle_tx_compl->ack_rssi = ptr;
 902		}
 903		break;
 904	default:
 905		break;
 906	}
 907	return 0;
 908}
 909
 910static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
 911				struct ath10k *ar, struct sk_buff *skb,
 912				struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
 913{
 914	struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { };
 915	int ret;
 916
 917	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
 918				  ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse,
 919				  &bundle_tx_compl);
 920	if (ret) {
 921		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 922		return ret;
 923	}
 924
 925	if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids ||
 926	    !bundle_tx_compl.status)
 927		return -EPROTO;
 928
 929	arg->num_reports = *bundle_tx_compl.num_reports;
 930	arg->desc_ids = bundle_tx_compl.desc_ids;
 931	arg->status = bundle_tx_compl.status;
 932	arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
 933
 934	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
 935		arg->ack_rssi = bundle_tx_compl.ack_rssi;
 936
 937	return 0;
 938}
 939
 940static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
 941					     struct sk_buff *skb,
 942					     struct wmi_mgmt_rx_ev_arg *arg)
 943{
 944	const void **tb;
 945	const struct wmi_tlv_mgmt_rx_ev *ev;
 946	const u8 *frame;
 947	u32 msdu_len;
 948	int ret, i;
 949
 950	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 951	if (IS_ERR(tb)) {
 952		ret = PTR_ERR(tb);
 953		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 954		return ret;
 955	}
 956
 957	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
 958	frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
 959
 960	if (!ev || !frame) {
 961		kfree(tb);
 962		return -EPROTO;
 963	}
 964
 965	arg->channel = ev->channel;
 966	arg->buf_len = ev->buf_len;
 967	arg->status = ev->status;
 968	arg->snr = ev->snr;
 969	arg->phy_mode = ev->phy_mode;
 970	arg->rate = ev->rate;
 971
 972	for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
 973		arg->rssi[i] = ev->rssi[i];
 974
 975	msdu_len = __le32_to_cpu(arg->buf_len);
 976
 977	if (skb->len < (frame - skb->data) + msdu_len) {
 978		kfree(tb);
 979		return -EPROTO;
 980	}
 981
 982	/* shift the sk_buff to point to `frame` */
 983	skb_trim(skb, 0);
 984	skb_put(skb, frame - skb->data);
 985	skb_pull(skb, frame - skb->data);
 986	skb_put(skb, msdu_len);
 987
 988	kfree(tb);
 989	return 0;
 990}
 991
 992static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
 993					     struct sk_buff *skb,
 994					     struct wmi_ch_info_ev_arg *arg)
 995{
 996	const void **tb;
 997	const struct wmi_tlv_chan_info_event *ev;
 998	int ret;
 999
1000	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1001	if (IS_ERR(tb)) {
1002		ret = PTR_ERR(tb);
1003		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1004		return ret;
1005	}
1006
1007	ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
1008	if (!ev) {
1009		kfree(tb);
1010		return -EPROTO;
1011	}
1012
1013	arg->err_code = ev->err_code;
1014	arg->freq = ev->freq;
1015	arg->cmd_flags = ev->cmd_flags;
1016	arg->noise_floor = ev->noise_floor;
1017	arg->rx_clear_count = ev->rx_clear_count;
1018	arg->cycle_count = ev->cycle_count;
1019	if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
1020		     ar->running_fw->fw_file.fw_features))
1021		arg->mac_clk_mhz = ev->mac_clk_mhz;
1022
1023	kfree(tb);
1024	return 0;
1025}
1026
1027static int
1028ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
1029				     struct wmi_vdev_start_ev_arg *arg)
1030{
1031	const void **tb;
1032	const struct wmi_vdev_start_response_event *ev;
1033	int ret;
1034
1035	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1036	if (IS_ERR(tb)) {
1037		ret = PTR_ERR(tb);
1038		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1039		return ret;
1040	}
1041
1042	ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
1043	if (!ev) {
1044		kfree(tb);
1045		return -EPROTO;
1046	}
1047
1048	skb_pull(skb, sizeof(*ev));
1049	arg->vdev_id = ev->vdev_id;
1050	arg->req_id = ev->req_id;
1051	arg->resp_type = ev->resp_type;
1052	arg->status = ev->status;
1053
1054	kfree(tb);
1055	return 0;
1056}
1057
1058static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
1059					       struct sk_buff *skb,
1060					       struct wmi_peer_kick_ev_arg *arg)
1061{
1062	const void **tb;
1063	const struct wmi_peer_sta_kickout_event *ev;
1064	int ret;
1065
1066	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1067	if (IS_ERR(tb)) {
1068		ret = PTR_ERR(tb);
1069		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1070		return ret;
1071	}
1072
1073	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
1074	if (!ev) {
1075		kfree(tb);
1076		return -EPROTO;
1077	}
1078
1079	arg->mac_addr = ev->peer_macaddr.addr;
1080
1081	kfree(tb);
1082	return 0;
1083}
1084
1085struct wmi_tlv_swba_parse {
1086	const struct wmi_host_swba_event *ev;
1087	bool tim_done;
1088	bool noa_done;
1089	size_t n_tim;
1090	size_t n_noa;
1091	struct wmi_swba_ev_arg *arg;
1092};
1093
1094static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
1095					 const void *ptr, void *data)
1096{
1097	struct wmi_tlv_swba_parse *swba = data;
1098	struct wmi_tim_info_arg *tim_info_arg;
1099	const struct wmi_tim_info *tim_info_ev = ptr;
1100
1101	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
1102		return -EPROTO;
1103
1104	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
1105		return -ENOBUFS;
1106
1107	if (__le32_to_cpu(tim_info_ev->tim_len) >
1108	     sizeof(tim_info_ev->tim_bitmap)) {
1109		ath10k_warn(ar, "refusing to parse invalid swba structure\n");
1110		return -EPROTO;
1111	}
1112
1113	tim_info_arg = &swba->arg->tim_info[swba->n_tim];
1114	tim_info_arg->tim_len = tim_info_ev->tim_len;
1115	tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
1116	tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
1117	tim_info_arg->tim_changed = tim_info_ev->tim_changed;
1118	tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
1119
1120	swba->n_tim++;
1121
1122	return 0;
1123}
1124
1125static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
1126					 const void *ptr, void *data)
1127{
1128	struct wmi_tlv_swba_parse *swba = data;
1129
1130	if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
1131		return -EPROTO;
1132
1133	if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
1134		return -ENOBUFS;
1135
1136	swba->arg->noa_info[swba->n_noa++] = ptr;
1137	return 0;
1138}
1139
1140static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
1141				     const void *ptr, void *data)
1142{
1143	struct wmi_tlv_swba_parse *swba = data;
1144	int ret;
1145
1146	switch (tag) {
1147	case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
1148		swba->ev = ptr;
1149		break;
1150	case WMI_TLV_TAG_ARRAY_STRUCT:
1151		if (!swba->tim_done) {
1152			swba->tim_done = true;
1153			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1154						  ath10k_wmi_tlv_swba_tim_parse,
1155						  swba);
1156			if (ret)
1157				return ret;
1158		} else if (!swba->noa_done) {
1159			swba->noa_done = true;
1160			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1161						  ath10k_wmi_tlv_swba_noa_parse,
1162						  swba);
1163			if (ret)
1164				return ret;
1165		}
1166		break;
1167	default:
1168		break;
1169	}
1170	return 0;
1171}
1172
1173static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
1174					  struct sk_buff *skb,
1175					  struct wmi_swba_ev_arg *arg)
1176{
1177	struct wmi_tlv_swba_parse swba = { .arg = arg };
1178	u32 map;
1179	size_t n_vdevs;
1180	int ret;
1181
1182	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1183				  ath10k_wmi_tlv_swba_parse, &swba);
1184	if (ret) {
1185		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1186		return ret;
1187	}
1188
1189	if (!swba.ev)
1190		return -EPROTO;
1191
1192	arg->vdev_map = swba.ev->vdev_map;
1193
1194	for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
1195		if (map & BIT(0))
1196			n_vdevs++;
1197
1198	if (n_vdevs != swba.n_tim ||
1199	    n_vdevs != swba.n_noa)
1200		return -EPROTO;
1201
1202	return 0;
1203}
1204
1205static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
1206						struct sk_buff *skb,
1207						struct wmi_phyerr_hdr_arg *arg)
1208{
1209	const void **tb;
1210	const struct wmi_tlv_phyerr_ev *ev;
1211	const void *phyerrs;
1212	int ret;
1213
1214	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1215	if (IS_ERR(tb)) {
1216		ret = PTR_ERR(tb);
1217		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1218		return ret;
1219	}
1220
1221	ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
1222	phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
1223
1224	if (!ev || !phyerrs) {
1225		kfree(tb);
1226		return -EPROTO;
1227	}
1228
1229	arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
1230	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
1231	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
1232	arg->buf_len = __le32_to_cpu(ev->buf_len);
1233	arg->phyerrs = phyerrs;
1234
1235	kfree(tb);
1236	return 0;
1237}
1238
1239#define WMI_TLV_ABI_VER_NS0 0x5F414351
1240#define WMI_TLV_ABI_VER_NS1 0x00004C4D
1241#define WMI_TLV_ABI_VER_NS2 0x00000000
1242#define WMI_TLV_ABI_VER_NS3 0x00000000
1243
1244#define WMI_TLV_ABI_VER0_MAJOR 1
1245#define WMI_TLV_ABI_VER0_MINOR 0
1246#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
1247			  (((WMI_TLV_ABI_VER0_MINOR) <<  0) & 0x00FFFFFF))
1248#define WMI_TLV_ABI_VER1 53
1249
1250static int
1251ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
1252			      const void *ptr, void *data)
1253{
1254	struct wmi_svc_rdy_ev_arg *arg = data;
1255	int i;
1256
1257	if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
1258		return -EPROTO;
1259
1260	for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
1261		if (!arg->mem_reqs[i]) {
1262			arg->mem_reqs[i] = ptr;
1263			return 0;
1264		}
1265	}
1266
1267	return -ENOMEM;
1268}
1269
1270struct wmi_tlv_svc_rdy_parse {
1271	const struct hal_reg_capabilities *reg;
1272	const struct wmi_tlv_svc_rdy_ev *ev;
1273	const __le32 *svc_bmap;
1274	const struct wlan_host_mem_req *mem_reqs;
1275	bool svc_bmap_done;
1276	bool dbs_hw_mode_done;
1277};
1278
1279static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
1280					const void *ptr, void *data)
1281{
1282	struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
1283
1284	switch (tag) {
1285	case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
1286		svc_rdy->ev = ptr;
1287		break;
1288	case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
1289		svc_rdy->reg = ptr;
1290		break;
1291	case WMI_TLV_TAG_ARRAY_STRUCT:
1292		svc_rdy->mem_reqs = ptr;
1293		break;
1294	case WMI_TLV_TAG_ARRAY_UINT32:
1295		if (!svc_rdy->svc_bmap_done) {
1296			svc_rdy->svc_bmap_done = true;
1297			svc_rdy->svc_bmap = ptr;
1298		} else if (!svc_rdy->dbs_hw_mode_done) {
1299			svc_rdy->dbs_hw_mode_done = true;
1300		}
1301		break;
1302	default:
1303		break;
1304	}
1305	return 0;
1306}
1307
1308static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
1309					     struct sk_buff *skb,
1310					     struct wmi_svc_rdy_ev_arg *arg)
1311{
1312	const struct hal_reg_capabilities *reg;
1313	const struct wmi_tlv_svc_rdy_ev *ev;
1314	const __le32 *svc_bmap;
1315	const struct wlan_host_mem_req *mem_reqs;
1316	struct wmi_tlv_svc_rdy_parse svc_rdy = { };
1317	int ret;
1318
1319	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1320				  ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
1321	if (ret) {
1322		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1323		return ret;
1324	}
1325
1326	ev = svc_rdy.ev;
1327	reg = svc_rdy.reg;
1328	svc_bmap = svc_rdy.svc_bmap;
1329	mem_reqs = svc_rdy.mem_reqs;
1330
1331	if (!ev || !reg || !svc_bmap || !mem_reqs)
1332		return -EPROTO;
1333
1334	/* This is an internal ABI compatibility check for WMI TLV so check it
1335	 * here instead of the generic WMI code.
1336	 */
1337	ath10k_dbg(ar, ATH10K_DBG_WMI,
1338		   "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
1339		   __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
1340		   __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
1341		   __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
1342		   __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
1343		   __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
1344
1345	if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
1346	    __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
1347	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
1348	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
1349	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
1350		return -ENOTSUPP;
1351	}
1352
1353	arg->min_tx_power = ev->hw_min_tx_power;
1354	arg->max_tx_power = ev->hw_max_tx_power;
1355	arg->ht_cap = ev->ht_cap_info;
1356	arg->vht_cap = ev->vht_cap_info;
1357	arg->vht_supp_mcs = ev->vht_supp_mcs;
1358	arg->sw_ver0 = ev->abi.abi_ver0;
1359	arg->sw_ver1 = ev->abi.abi_ver1;
1360	arg->fw_build = ev->fw_build_vers;
1361	arg->phy_capab = ev->phy_capability;
1362	arg->num_rf_chains = ev->num_rf_chains;
1363	arg->eeprom_rd = reg->eeprom_rd;
1364	arg->low_2ghz_chan = reg->low_2ghz_chan;
1365	arg->high_2ghz_chan = reg->high_2ghz_chan;
1366	arg->low_5ghz_chan = reg->low_5ghz_chan;
1367	arg->high_5ghz_chan = reg->high_5ghz_chan;
1368	arg->num_mem_reqs = ev->num_mem_reqs;
1369	arg->service_map = svc_bmap;
1370	arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
1371	arg->sys_cap_info = ev->sys_cap_info;
1372
1373	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
1374				  ath10k_wmi_tlv_parse_mem_reqs, arg);
1375	if (ret) {
1376		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
1377		return ret;
1378	}
1379
1380	return 0;
1381}
1382
1383static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
1384					 struct sk_buff *skb,
1385					 struct wmi_rdy_ev_arg *arg)
1386{
1387	const void **tb;
1388	const struct wmi_tlv_rdy_ev *ev;
1389	int ret;
1390
1391	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1392	if (IS_ERR(tb)) {
1393		ret = PTR_ERR(tb);
1394		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1395		return ret;
1396	}
1397
1398	ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
1399	if (!ev) {
1400		kfree(tb);
1401		return -EPROTO;
1402	}
1403
1404	arg->sw_version = ev->abi.abi_ver0;
1405	arg->abi_version = ev->abi.abi_ver1;
1406	arg->status = ev->status;
1407	arg->mac_addr = ev->mac_addr.addr;
1408
1409	kfree(tb);
1410	return 0;
1411}
1412
1413static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
1414					  const void *ptr, void *data)
1415{
1416	struct wmi_svc_avail_ev_arg *arg = data;
1417
1418	switch (tag) {
1419	case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
1420		arg->service_map_ext_valid = true;
1421		arg->service_map_ext_len = *(__le32 *)ptr;
1422		arg->service_map_ext = ptr + sizeof(__le32);
1423		return 0;
1424	default:
1425		break;
1426	}
1427
1428	return 0;
1429}
1430
1431static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
1432					    struct sk_buff *skb,
1433					    struct wmi_svc_avail_ev_arg *arg)
1434{
1435	int ret;
1436
1437	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1438				  ath10k_wmi_tlv_svc_avail_parse, arg);
1439
1440	if (ret) {
1441		ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
1442		return ret;
1443	}
1444
1445	return 0;
1446}
1447
1448static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
1449					   struct ath10k_fw_stats_vdev *dst)
1450{
1451	int i;
1452
1453	dst->vdev_id = __le32_to_cpu(src->vdev_id);
1454	dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
1455	dst->data_snr = __le32_to_cpu(src->data_snr);
1456	dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
1457	dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
1458	dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
1459	dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
1460	dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
1461	dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
1462
1463	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
1464		dst->num_tx_frames[i] =
1465			__le32_to_cpu(src->num_tx_frames[i]);
1466
1467	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
1468		dst->num_tx_frames_retries[i] =
1469			__le32_to_cpu(src->num_tx_frames_retries[i]);
1470
1471	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
1472		dst->num_tx_frames_failures[i] =
1473			__le32_to_cpu(src->num_tx_frames_failures[i]);
1474
1475	for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
1476		dst->tx_rate_history[i] =
1477			__le32_to_cpu(src->tx_rate_history[i]);
1478
1479	for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
1480		dst->beacon_rssi_history[i] =
1481			__le32_to_cpu(src->beacon_rssi_history[i]);
1482}
1483
1484static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
1485					   struct sk_buff *skb,
1486					   struct ath10k_fw_stats *stats)
1487{
1488	const void **tb;
1489	const struct wmi_tlv_stats_ev *ev;
1490	u32 num_peer_stats_extd;
1491	const void *data;
1492	u32 num_pdev_stats;
1493	u32 num_vdev_stats;
1494	u32 num_peer_stats;
1495	u32 num_bcnflt_stats;
1496	u32 num_chan_stats;
1497	size_t data_len;
1498	u32 stats_id;
1499	int ret;
1500	int i;
1501
1502	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1503	if (IS_ERR(tb)) {
1504		ret = PTR_ERR(tb);
1505		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1506		return ret;
1507	}
1508
1509	ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
1510	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
1511
1512	if (!ev || !data) {
1513		kfree(tb);
1514		return -EPROTO;
1515	}
1516
1517	data_len = ath10k_wmi_tlv_len(data);
1518	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1519	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1520	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1521	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
1522	num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
1523	stats_id = __le32_to_cpu(ev->stats_id);
1524	num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
1525
1526	ath10k_dbg(ar, ATH10K_DBG_WMI,
1527		   "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
1528		   num_pdev_stats, num_vdev_stats, num_peer_stats,
1529		   num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
1530
1531	for (i = 0; i < num_pdev_stats; i++) {
1532		const struct wmi_pdev_stats *src;
1533		struct ath10k_fw_stats_pdev *dst;
1534
1535		src = data;
1536		if (data_len < sizeof(*src)) {
1537			kfree(tb);
1538			return -EPROTO;
1539		}
1540
1541		data += sizeof(*src);
1542		data_len -= sizeof(*src);
1543
1544		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1545		if (!dst)
1546			continue;
1547
1548		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1549		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1550		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1551		list_add_tail(&dst->list, &stats->pdevs);
1552	}
1553
1554	for (i = 0; i < num_vdev_stats; i++) {
1555		const struct wmi_tlv_vdev_stats *src;
1556		struct ath10k_fw_stats_vdev *dst;
1557
1558		src = data;
1559		if (data_len < sizeof(*src)) {
1560			kfree(tb);
1561			return -EPROTO;
1562		}
1563
1564		data += sizeof(*src);
1565		data_len -= sizeof(*src);
1566
1567		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1568		if (!dst)
1569			continue;
1570
1571		ath10k_wmi_tlv_pull_vdev_stats(src, dst);
1572		list_add_tail(&dst->list, &stats->vdevs);
1573	}
1574
1575	for (i = 0; i < num_peer_stats; i++) {
1576		const struct wmi_10x_peer_stats *src;
1577		struct ath10k_fw_stats_peer *dst;
1578
1579		src = data;
1580		if (data_len < sizeof(*src)) {
1581			kfree(tb);
1582			return -EPROTO;
1583		}
1584
1585		data += sizeof(*src);
1586		data_len -= sizeof(*src);
1587
1588		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1589		if (!dst)
1590			continue;
1591
1592		ath10k_wmi_pull_peer_stats(&src->old, dst);
1593		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1594
1595		if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
1596			const struct wmi_tlv_peer_stats_extd *extd;
1597			unsigned long rx_duration_high;
1598
1599			extd = data + sizeof(*src) * (num_peer_stats - i - 1)
1600			       + sizeof(*extd) * i;
1601
1602			dst->rx_duration = __le32_to_cpu(extd->rx_duration);
1603			rx_duration_high = __le32_to_cpu
1604						(extd->rx_duration_high);
1605
1606			if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
1607				     &rx_duration_high)) {
1608				rx_duration_high =
1609					FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
1610						  rx_duration_high);
1611				dst->rx_duration |= (u64)rx_duration_high <<
1612						    WMI_TLV_PEER_RX_DURATION_SHIFT;
1613			}
1614		}
1615
1616		list_add_tail(&dst->list, &stats->peers);
1617	}
1618
1619	kfree(tb);
1620	return 0;
1621}
1622
1623static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
1624					  struct sk_buff *skb,
1625					  struct wmi_roam_ev_arg *arg)
1626{
1627	const void **tb;
1628	const struct wmi_tlv_roam_ev *ev;
1629	int ret;
1630
1631	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1632	if (IS_ERR(tb)) {
1633		ret = PTR_ERR(tb);
1634		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1635		return ret;
1636	}
1637
1638	ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
1639	if (!ev) {
1640		kfree(tb);
1641		return -EPROTO;
1642	}
1643
1644	arg->vdev_id = ev->vdev_id;
1645	arg->reason = ev->reason;
1646	arg->rssi = ev->rssi;
1647
1648	kfree(tb);
1649	return 0;
1650}
1651
1652static int
1653ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
1654			      struct wmi_wow_ev_arg *arg)
1655{
1656	const void **tb;
1657	const struct wmi_tlv_wow_event_info *ev;
1658	int ret;
1659
1660	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1661	if (IS_ERR(tb)) {
1662		ret = PTR_ERR(tb);
1663		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1664		return ret;
1665	}
1666
1667	ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
1668	if (!ev) {
1669		kfree(tb);
1670		return -EPROTO;
1671	}
1672
1673	arg->vdev_id = __le32_to_cpu(ev->vdev_id);
1674	arg->flag = __le32_to_cpu(ev->flag);
1675	arg->wake_reason = __le32_to_cpu(ev->wake_reason);
1676	arg->data_len = __le32_to_cpu(ev->data_len);
1677
1678	kfree(tb);
1679	return 0;
1680}
1681
1682static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
1683					  struct sk_buff *skb,
1684					  struct wmi_echo_ev_arg *arg)
1685{
1686	const void **tb;
1687	const struct wmi_echo_event *ev;
1688	int ret;
1689
1690	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1691	if (IS_ERR(tb)) {
1692		ret = PTR_ERR(tb);
1693		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1694		return ret;
1695	}
1696
1697	ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
1698	if (!ev) {
1699		kfree(tb);
1700		return -EPROTO;
1701	}
1702
1703	arg->value = ev->value;
1704
1705	kfree(tb);
1706	return 0;
1707}
1708
1709static struct sk_buff *
1710ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
1711{
1712	struct wmi_tlv_pdev_suspend *cmd;
1713	struct wmi_tlv *tlv;
1714	struct sk_buff *skb;
1715
1716	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1717	if (!skb)
1718		return ERR_PTR(-ENOMEM);
1719
1720	tlv = (void *)skb->data;
1721	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
1722	tlv->len = __cpu_to_le16(sizeof(*cmd));
1723	cmd = (void *)tlv->value;
1724	cmd->opt = __cpu_to_le32(opt);
1725
1726	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
1727	return skb;
1728}
1729
1730static struct sk_buff *
1731ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
1732{
1733	struct wmi_tlv_resume_cmd *cmd;
1734	struct wmi_tlv *tlv;
1735	struct sk_buff *skb;
1736
1737	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1738	if (!skb)
1739		return ERR_PTR(-ENOMEM);
1740
1741	tlv = (void *)skb->data;
1742	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
1743	tlv->len = __cpu_to_le16(sizeof(*cmd));
1744	cmd = (void *)tlv->value;
1745	cmd->reserved = __cpu_to_le32(0);
1746
1747	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
1748	return skb;
1749}
1750
1751static struct sk_buff *
1752ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
1753				  u16 rd, u16 rd2g, u16 rd5g,
1754				  u16 ctl2g, u16 ctl5g,
1755				  enum wmi_dfs_region dfs_reg)
1756{
1757	struct wmi_tlv_pdev_set_rd_cmd *cmd;
1758	struct wmi_tlv *tlv;
1759	struct sk_buff *skb;
1760
1761	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1762	if (!skb)
1763		return ERR_PTR(-ENOMEM);
1764
1765	tlv = (void *)skb->data;
1766	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
1767	tlv->len = __cpu_to_le16(sizeof(*cmd));
1768	cmd = (void *)tlv->value;
1769	cmd->regd = __cpu_to_le32(rd);
1770	cmd->regd_2ghz = __cpu_to_le32(rd2g);
1771	cmd->regd_5ghz = __cpu_to_le32(rd5g);
1772	cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
1773	cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
1774
1775	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
1776	return skb;
1777}
1778
1779static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
1780{
1781	return WMI_TXBF_CONF_AFTER_ASSOC;
1782}
1783
1784static struct sk_buff *
1785ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
1786				     u32 param_value)
1787{
1788	struct wmi_tlv_pdev_set_param_cmd *cmd;
1789	struct wmi_tlv *tlv;
1790	struct sk_buff *skb;
1791
1792	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1793	if (!skb)
1794		return ERR_PTR(-ENOMEM);
1795
1796	tlv = (void *)skb->data;
1797	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
1798	tlv->len = __cpu_to_le16(sizeof(*cmd));
1799	cmd = (void *)tlv->value;
1800	cmd->param_id = __cpu_to_le32(param_id);
1801	cmd->param_value = __cpu_to_le32(param_value);
1802
1803	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
1804		   param_id, param_value);
1805	return skb;
1806}
1807
1808static void
1809ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
1810{
1811	struct host_memory_chunk_tlv *chunk;
1812	struct wmi_tlv *tlv;
1813	dma_addr_t paddr;
1814	int i;
1815	__le16 tlv_len, tlv_tag;
1816
1817	tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
1818	tlv_len = __cpu_to_le16(sizeof(*chunk));
1819	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
1820		tlv = host_mem_chunks;
1821		tlv->tag = tlv_tag;
1822		tlv->len = tlv_len;
1823		chunk = (void *)tlv->value;
1824
1825		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
1826		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
1827		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
1828
1829		if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
1830			     ar->wmi.svc_map)) {
1831			paddr = ar->wmi.mem_chunks[i].paddr;
1832			chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
1833		}
1834
1835		ath10k_dbg(ar, ATH10K_DBG_WMI,
1836			   "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
1837			   i,
1838			   ar->wmi.mem_chunks[i].len,
1839			   (unsigned long long)ar->wmi.mem_chunks[i].paddr,
1840			   ar->wmi.mem_chunks[i].req_id);
1841
1842		host_mem_chunks += sizeof(*tlv);
1843		host_mem_chunks += sizeof(*chunk);
1844	}
1845}
1846
1847static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1848{
1849	struct sk_buff *skb;
1850	struct wmi_tlv *tlv;
1851	struct wmi_tlv_init_cmd *cmd;
1852	struct wmi_tlv_resource_config *cfg;
1853	void *chunks;
1854	size_t len, chunks_len;
1855	void *ptr;
1856
1857	chunks_len = ar->wmi.num_mem_chunks *
1858		     (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
1859	len = (sizeof(*tlv) + sizeof(*cmd)) +
1860	      (sizeof(*tlv) + sizeof(*cfg)) +
1861	      (sizeof(*tlv) + chunks_len);
1862
1863	skb = ath10k_wmi_alloc_skb(ar, len);
1864	if (!skb)
1865		return ERR_PTR(-ENOMEM);
1866
1867	ptr = skb->data;
1868
1869	tlv = ptr;
1870	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1871	tlv->len = __cpu_to_le16(sizeof(*cmd));
1872	cmd = (void *)tlv->value;
1873	ptr += sizeof(*tlv);
1874	ptr += sizeof(*cmd);
1875
1876	tlv = ptr;
1877	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1878	tlv->len = __cpu_to_le16(sizeof(*cfg));
1879	cfg = (void *)tlv->value;
1880	ptr += sizeof(*tlv);
1881	ptr += sizeof(*cfg);
1882
1883	tlv = ptr;
1884	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1885	tlv->len = __cpu_to_le16(chunks_len);
1886	chunks = (void *)tlv->value;
1887
1888	ptr += sizeof(*tlv);
1889	ptr += chunks_len;
1890
1891	cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1892	cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1893	cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1894	cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1895	cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1896	cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1897	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1898
1899	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1900
1901	if (ar->hw_params.num_peers)
1902		cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
1903	else
1904		cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1905	cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
1906	cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
1907
1908	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1909		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1910		cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1911	} else {
1912		cfg->num_offload_peers = __cpu_to_le32(0);
1913		cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1914	}
1915
1916	cfg->num_peer_keys = __cpu_to_le32(2);
1917	if (ar->hw_params.num_peers)
1918		cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
1919	else
1920		cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1921	cfg->tx_chain_mask = __cpu_to_le32(0x7);
1922	cfg->rx_chain_mask = __cpu_to_le32(0x7);
1923	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1924	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1925	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1926	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1927	cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
1928	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1929	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1930	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1931	cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1932	cfg->num_mcast_groups = __cpu_to_le32(0);
1933	cfg->num_mcast_table_elems = __cpu_to_le32(0);
1934	cfg->mcast2ucast_mode = __cpu_to_le32(0);
1935	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1936	cfg->dma_burst_size = __cpu_to_le32(0);
1937	cfg->mac_aggr_delim = __cpu_to_le32(0);
1938	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1939	cfg->vow_config = __cpu_to_le32(0);
1940	cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1941	cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
1942	cfg->max_frag_entries = __cpu_to_le32(2);
1943	cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
1944	cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1945	cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1946	cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1947	cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
1948	cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1949	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1950	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1951	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1952	cfg->wmi_send_separate = __cpu_to_le32(0);
1953	cfg->num_ocb_vdevs = __cpu_to_le32(0);
1954	cfg->num_ocb_channels = __cpu_to_le32(0);
1955	cfg->num_ocb_schedules = __cpu_to_le32(0);
1956	cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
1957
1958	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
1959		cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
1960
1961	ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
1962
1963	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1964	return skb;
1965}
1966
1967static struct sk_buff *
1968ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1969				 const struct wmi_start_scan_arg *arg)
1970{
1971	struct wmi_tlv_start_scan_cmd *cmd;
1972	struct wmi_tlv *tlv;
1973	struct sk_buff *skb;
1974	size_t len, chan_len, ssid_len, bssid_len, ie_len;
1975	__le32 *chans;
1976	struct wmi_ssid *ssids;
1977	struct wmi_mac_addr *addrs;
1978	void *ptr;
1979	int i, ret;
1980
1981	ret = ath10k_wmi_start_scan_verify(arg);
1982	if (ret)
1983		return ERR_PTR(ret);
1984
1985	chan_len = arg->n_channels * sizeof(__le32);
1986	ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1987	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1988	ie_len = roundup(arg->ie_len, 4);
1989	len = (sizeof(*tlv) + sizeof(*cmd)) +
1990	      sizeof(*tlv) + chan_len +
1991	      sizeof(*tlv) + ssid_len +
1992	      sizeof(*tlv) + bssid_len +
1993	      sizeof(*tlv) + ie_len;
1994
1995	skb = ath10k_wmi_alloc_skb(ar, len);
1996	if (!skb)
1997		return ERR_PTR(-ENOMEM);
1998
1999	ptr = (void *)skb->data;
2000	tlv = ptr;
2001	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
2002	tlv->len = __cpu_to_le16(sizeof(*cmd));
2003	cmd = (void *)tlv->value;
2004
2005	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
2006	cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
2007	cmd->num_channels = __cpu_to_le32(arg->n_channels);
2008	cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
2009	cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
2010	cmd->ie_len = __cpu_to_le32(arg->ie_len);
2011	cmd->num_probes = __cpu_to_le32(3);
2012	ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
2013	ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
2014
2015	/* FIXME: There are some scan flag inconsistencies across firmwares,
2016	 * e.g. WMI-TLV inverts the logic behind the following flag.
2017	 */
2018	cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2019
2020	ptr += sizeof(*tlv);
2021	ptr += sizeof(*cmd);
2022
2023	tlv = ptr;
2024	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2025	tlv->len = __cpu_to_le16(chan_len);
2026	chans = (void *)tlv->value;
2027	for (i = 0; i < arg->n_channels; i++)
2028		chans[i] = __cpu_to_le32(arg->channels[i]);
2029
2030	ptr += sizeof(*tlv);
2031	ptr += chan_len;
2032
2033	tlv = ptr;
2034	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2035	tlv->len = __cpu_to_le16(ssid_len);
2036	ssids = (void *)tlv->value;
2037	for (i = 0; i < arg->n_ssids; i++) {
2038		ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
2039		memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
2040	}
2041
2042	ptr += sizeof(*tlv);
2043	ptr += ssid_len;
2044
2045	tlv = ptr;
2046	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2047	tlv->len = __cpu_to_le16(bssid_len);
2048	addrs = (void *)tlv->value;
2049	for (i = 0; i < arg->n_bssids; i++)
2050		ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
2051
2052	ptr += sizeof(*tlv);
2053	ptr += bssid_len;
2054
2055	tlv = ptr;
2056	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2057	tlv->len = __cpu_to_le16(ie_len);
2058	memcpy(tlv->value, arg->ie, arg->ie_len);
2059
2060	ptr += sizeof(*tlv);
2061	ptr += ie_len;
2062
2063	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
2064	return skb;
2065}
2066
2067static struct sk_buff *
2068ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
2069				const struct wmi_stop_scan_arg *arg)
2070{
2071	struct wmi_stop_scan_cmd *cmd;
2072	struct wmi_tlv *tlv;
2073	struct sk_buff *skb;
2074	u32 scan_id;
2075	u32 req_id;
2076
2077	if (arg->req_id > 0xFFF)
2078		return ERR_PTR(-EINVAL);
2079	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
2080		return ERR_PTR(-EINVAL);
2081
2082	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2083	if (!skb)
2084		return ERR_PTR(-ENOMEM);
2085
2086	scan_id = arg->u.scan_id;
2087	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
2088
2089	req_id = arg->req_id;
2090	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
2091
2092	tlv = (void *)skb->data;
2093	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
2094	tlv->len = __cpu_to_le16(sizeof(*cmd));
2095	cmd = (void *)tlv->value;
2096	cmd->req_type = __cpu_to_le32(arg->req_type);
2097	cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
2098	cmd->scan_id = __cpu_to_le32(scan_id);
2099	cmd->scan_req_id = __cpu_to_le32(req_id);
2100
2101	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
2102	return skb;
2103}
2104
2105static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
2106					      enum wmi_vdev_subtype subtype)
2107{
2108	switch (subtype) {
2109	case WMI_VDEV_SUBTYPE_NONE:
2110		return WMI_TLV_VDEV_SUBTYPE_NONE;
2111	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
2112		return WMI_TLV_VDEV_SUBTYPE_P2P_DEV;
2113	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
2114		return WMI_TLV_VDEV_SUBTYPE_P2P_CLI;
2115	case WMI_VDEV_SUBTYPE_P2P_GO:
2116		return WMI_TLV_VDEV_SUBTYPE_P2P_GO;
2117	case WMI_VDEV_SUBTYPE_PROXY_STA:
2118		return WMI_TLV_VDEV_SUBTYPE_PROXY_STA;
2119	case WMI_VDEV_SUBTYPE_MESH_11S:
2120		return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
2121	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
2122		return -ENOTSUPP;
2123	}
2124	return -ENOTSUPP;
2125}
2126
2127static struct sk_buff *
2128ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
2129				  u32 vdev_id,
2130				  enum wmi_vdev_type vdev_type,
2131				  enum wmi_vdev_subtype vdev_subtype,
2132				  const u8 mac_addr[ETH_ALEN])
2133{
2134	struct wmi_vdev_create_cmd *cmd;
2135	struct wmi_tlv *tlv;
2136	struct sk_buff *skb;
2137
2138	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2139	if (!skb)
2140		return ERR_PTR(-ENOMEM);
2141
2142	tlv = (void *)skb->data;
2143	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
2144	tlv->len = __cpu_to_le16(sizeof(*cmd));
2145	cmd = (void *)tlv->value;
2146	cmd->vdev_id = __cpu_to_le32(vdev_id);
2147	cmd->vdev_type = __cpu_to_le32(vdev_type);
2148	cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
2149	ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
2150
2151	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
2152	return skb;
2153}
2154
2155static struct sk_buff *
2156ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
2157{
2158	struct wmi_vdev_delete_cmd *cmd;
2159	struct wmi_tlv *tlv;
2160	struct sk_buff *skb;
2161
2162	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2163	if (!skb)
2164		return ERR_PTR(-ENOMEM);
2165
2166	tlv = (void *)skb->data;
2167	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
2168	tlv->len = __cpu_to_le16(sizeof(*cmd));
2169	cmd = (void *)tlv->value;
2170	cmd->vdev_id = __cpu_to_le32(vdev_id);
2171
2172	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
2173	return skb;
2174}
2175
2176static struct sk_buff *
2177ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
2178				 const struct wmi_vdev_start_request_arg *arg,
2179				 bool restart)
2180{
2181	struct wmi_tlv_vdev_start_cmd *cmd;
2182	struct wmi_channel *ch;
2183	struct wmi_tlv *tlv;
2184	struct sk_buff *skb;
2185	size_t len;
2186	void *ptr;
2187	u32 flags = 0;
2188
2189	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
2190		return ERR_PTR(-EINVAL);
2191	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
2192		return ERR_PTR(-EINVAL);
2193
2194	len = (sizeof(*tlv) + sizeof(*cmd)) +
2195	      (sizeof(*tlv) + sizeof(*ch)) +
2196	      (sizeof(*tlv) + 0);
2197	skb = ath10k_wmi_alloc_skb(ar, len);
2198	if (!skb)
2199		return ERR_PTR(-ENOMEM);
2200
2201	if (arg->hidden_ssid)
2202		flags |= WMI_VDEV_START_HIDDEN_SSID;
2203	if (arg->pmf_enabled)
2204		flags |= WMI_VDEV_START_PMF_ENABLED;
2205
2206	ptr = (void *)skb->data;
2207
2208	tlv = ptr;
2209	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
2210	tlv->len = __cpu_to_le16(sizeof(*cmd));
2211	cmd = (void *)tlv->value;
2212	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2213	cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
2214	cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
2215	cmd->flags = __cpu_to_le32(flags);
2216	cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
2217	cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
2218	cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
2219
2220	if (arg->ssid) {
2221		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
2222		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
2223	}
2224
2225	ptr += sizeof(*tlv);
2226	ptr += sizeof(*cmd);
2227
2228	tlv = ptr;
2229	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2230	tlv->len = __cpu_to_le16(sizeof(*ch));
2231	ch = (void *)tlv->value;
2232	ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
2233
2234	ptr += sizeof(*tlv);
2235	ptr += sizeof(*ch);
2236
2237	tlv = ptr;
2238	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2239	tlv->len = 0;
2240
2241	/* Note: This is a nested TLV containing:
2242	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
2243	 */
2244
2245	ptr += sizeof(*tlv);
2246	ptr += 0;
2247
2248	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
2249	return skb;
2250}
2251
2252static struct sk_buff *
2253ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
2254{
2255	struct wmi_vdev_stop_cmd *cmd;
2256	struct wmi_tlv *tlv;
2257	struct sk_buff *skb;
2258
2259	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2260	if (!skb)
2261		return ERR_PTR(-ENOMEM);
2262
2263	tlv = (void *)skb->data;
2264	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
2265	tlv->len = __cpu_to_le16(sizeof(*cmd));
2266	cmd = (void *)tlv->value;
2267	cmd->vdev_id = __cpu_to_le32(vdev_id);
2268
2269	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
2270	return skb;
2271}
2272
2273static struct sk_buff *
2274ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
2275			      const u8 *bssid)
2276
2277{
2278	struct wmi_vdev_up_cmd *cmd;
2279	struct wmi_tlv *tlv;
2280	struct sk_buff *skb;
2281
2282	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2283	if (!skb)
2284		return ERR_PTR(-ENOMEM);
2285
2286	tlv = (void *)skb->data;
2287	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
2288	tlv->len = __cpu_to_le16(sizeof(*cmd));
2289	cmd = (void *)tlv->value;
2290	cmd->vdev_id = __cpu_to_le32(vdev_id);
2291	cmd->vdev_assoc_id = __cpu_to_le32(aid);
2292	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
2293
2294	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
2295	return skb;
2296}
2297
2298static struct sk_buff *
2299ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
2300{
2301	struct wmi_vdev_down_cmd *cmd;
2302	struct wmi_tlv *tlv;
2303	struct sk_buff *skb;
2304
2305	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2306	if (!skb)
2307		return ERR_PTR(-ENOMEM);
2308
2309	tlv = (void *)skb->data;
2310	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
2311	tlv->len = __cpu_to_le16(sizeof(*cmd));
2312	cmd = (void *)tlv->value;
2313	cmd->vdev_id = __cpu_to_le32(vdev_id);
2314
2315	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
2316	return skb;
2317}
2318
2319static struct sk_buff *
2320ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
2321				     u32 param_id, u32 param_value)
2322{
2323	struct wmi_vdev_set_param_cmd *cmd;
2324	struct wmi_tlv *tlv;
2325	struct sk_buff *skb;
2326
2327	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2328	if (!skb)
2329		return ERR_PTR(-ENOMEM);
2330
2331	tlv = (void *)skb->data;
2332	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
2333	tlv->len = __cpu_to_le16(sizeof(*cmd));
2334	cmd = (void *)tlv->value;
2335	cmd->vdev_id = __cpu_to_le32(vdev_id);
2336	cmd->param_id = __cpu_to_le32(param_id);
2337	cmd->param_value = __cpu_to_le32(param_value);
2338
2339	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
2340		   vdev_id, param_id, param_value);
2341	return skb;
2342}
2343
2344static struct sk_buff *
2345ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
2346				       const struct wmi_vdev_install_key_arg *arg)
2347{
2348	struct wmi_vdev_install_key_cmd *cmd;
2349	struct wmi_tlv *tlv;
2350	struct sk_buff *skb;
2351	size_t len;
2352	void *ptr;
2353
2354	if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2355	    arg->key_data)
2356		return ERR_PTR(-EINVAL);
2357	if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2358	    !arg->key_data)
2359		return ERR_PTR(-EINVAL);
2360
2361	len = sizeof(*tlv) + sizeof(*cmd) +
2362	      sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
2363	skb = ath10k_wmi_alloc_skb(ar, len);
2364	if (!skb)
2365		return ERR_PTR(-ENOMEM);
2366
2367	ptr = (void *)skb->data;
2368	tlv = ptr;
2369	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
2370	tlv->len = __cpu_to_le16(sizeof(*cmd));
2371	cmd = (void *)tlv->value;
2372	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2373	cmd->key_idx = __cpu_to_le32(arg->key_idx);
2374	cmd->key_flags = __cpu_to_le32(arg->key_flags);
2375	cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
2376	cmd->key_len = __cpu_to_le32(arg->key_len);
2377	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
2378	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
2379
2380	if (arg->macaddr)
2381		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2382
2383	ptr += sizeof(*tlv);
2384	ptr += sizeof(*cmd);
2385
2386	tlv = ptr;
2387	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2388	tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
2389	if (arg->key_data)
2390		memcpy(tlv->value, arg->key_data, arg->key_len);
2391
2392	ptr += sizeof(*tlv);
2393	ptr += roundup(arg->key_len, sizeof(__le32));
2394
2395	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
2396	return skb;
2397}
2398
2399static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
2400					 const struct wmi_sta_uapsd_auto_trig_arg *arg)
2401{
2402	struct wmi_sta_uapsd_auto_trig_param *ac;
2403	struct wmi_tlv *tlv;
2404
2405	tlv = ptr;
2406	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
2407	tlv->len = __cpu_to_le16(sizeof(*ac));
2408	ac = (void *)tlv->value;
2409
2410	ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
2411	ac->user_priority = __cpu_to_le32(arg->user_priority);
2412	ac->service_interval = __cpu_to_le32(arg->service_interval);
2413	ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
2414	ac->delay_interval = __cpu_to_le32(arg->delay_interval);
2415
2416	ath10k_dbg(ar, ATH10K_DBG_WMI,
2417		   "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
2418		   ac->wmm_ac, ac->user_priority, ac->service_interval,
2419		   ac->suspend_interval, ac->delay_interval);
2420
2421	return ptr + sizeof(*tlv) + sizeof(*ac);
2422}
2423
2424static struct sk_buff *
2425ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
2426				     const u8 peer_addr[ETH_ALEN],
2427				     const struct wmi_sta_uapsd_auto_trig_arg *args,
2428				     u32 num_ac)
2429{
2430	struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
2431	struct wmi_sta_uapsd_auto_trig_param *ac;
2432	struct wmi_tlv *tlv;
2433	struct sk_buff *skb;
2434	size_t len;
2435	size_t ac_tlv_len;
2436	void *ptr;
2437	int i;
2438
2439	ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
2440	len = sizeof(*tlv) + sizeof(*cmd) +
2441	      sizeof(*tlv) + ac_tlv_len;
2442	skb = ath10k_wmi_alloc_skb(ar, len);
2443	if (!skb)
2444		return ERR_PTR(-ENOMEM);
2445
2446	ptr = (void *)skb->data;
2447	tlv = ptr;
2448	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
2449	tlv->len = __cpu_to_le16(sizeof(*cmd));
2450	cmd = (void *)tlv->value;
2451	cmd->vdev_id = __cpu_to_le32(vdev_id);
2452	cmd->num_ac = __cpu_to_le32(num_ac);
2453	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2454
2455	ptr += sizeof(*tlv);
2456	ptr += sizeof(*cmd);
2457
2458	tlv = ptr;
2459	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2460	tlv->len = __cpu_to_le16(ac_tlv_len);
2461	ac = (void *)tlv->value;
2462
2463	ptr += sizeof(*tlv);
2464	for (i = 0; i < num_ac; i++)
2465		ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
2466
2467	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
2468	return skb;
2469}
2470
2471static void *ath10k_wmi_tlv_put_wmm(void *ptr,
2472				    const struct wmi_wmm_params_arg *arg)
2473{
2474	struct wmi_wmm_params *wmm;
2475	struct wmi_tlv *tlv;
2476
2477	tlv = ptr;
2478	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
2479	tlv->len = __cpu_to_le16(sizeof(*wmm));
2480	wmm = (void *)tlv->value;
2481	ath10k_wmi_set_wmm_param(wmm, arg);
2482
2483	return ptr + sizeof(*tlv) + sizeof(*wmm);
2484}
2485
2486static struct sk_buff *
2487ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
2488				    const struct wmi_wmm_params_all_arg *arg)
2489{
2490	struct wmi_tlv_vdev_set_wmm_cmd *cmd;
2491	struct wmi_tlv *tlv;
2492	struct sk_buff *skb;
2493	size_t len;
2494	void *ptr;
2495
2496	len = sizeof(*tlv) + sizeof(*cmd);
2497	skb = ath10k_wmi_alloc_skb(ar, len);
2498	if (!skb)
2499		return ERR_PTR(-ENOMEM);
2500
2501	ptr = (void *)skb->data;
2502	tlv = ptr;
2503	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
2504	tlv->len = __cpu_to_le16(sizeof(*cmd));
2505	cmd = (void *)tlv->value;
2506	cmd->vdev_id = __cpu_to_le32(vdev_id);
2507
2508	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
2509	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
2510	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
2511	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
2512
2513	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
2514	return skb;
2515}
2516
2517static struct sk_buff *
2518ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
2519				    const struct wmi_sta_keepalive_arg *arg)
2520{
2521	struct wmi_tlv_sta_keepalive_cmd *cmd;
2522	struct wmi_sta_keepalive_arp_resp *arp;
2523	struct sk_buff *skb;
2524	struct wmi_tlv *tlv;
2525	void *ptr;
2526	size_t len;
2527
2528	len = sizeof(*tlv) + sizeof(*cmd) +
2529	      sizeof(*tlv) + sizeof(*arp);
2530	skb = ath10k_wmi_alloc_skb(ar, len);
2531	if (!skb)
2532		return ERR_PTR(-ENOMEM);
2533
2534	ptr = (void *)skb->data;
2535	tlv = ptr;
2536	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
2537	tlv->len = __cpu_to_le16(sizeof(*cmd));
2538	cmd = (void *)tlv->value;
2539	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2540	cmd->enabled = __cpu_to_le32(arg->enabled);
2541	cmd->method = __cpu_to_le32(arg->method);
2542	cmd->interval = __cpu_to_le32(arg->interval);
2543
2544	ptr += sizeof(*tlv);
2545	ptr += sizeof(*cmd);
2546
2547	tlv = ptr;
2548	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
2549	tlv->len = __cpu_to_le16(sizeof(*arp));
2550	arp = (void *)tlv->value;
2551
2552	arp->src_ip4_addr = arg->src_ip4_addr;
2553	arp->dest_ip4_addr = arg->dest_ip4_addr;
2554	ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
2555
2556	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
2557		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
2558	return skb;
2559}
2560
2561static struct sk_buff *
2562ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
2563				  const u8 peer_addr[ETH_ALEN],
2564				  enum wmi_peer_type peer_type)
2565{
2566	struct wmi_tlv_peer_create_cmd *cmd;
2567	struct wmi_tlv *tlv;
2568	struct sk_buff *skb;
2569
2570	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2571	if (!skb)
2572		return ERR_PTR(-ENOMEM);
2573
2574	tlv = (void *)skb->data;
2575	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
2576	tlv->len = __cpu_to_le16(sizeof(*cmd));
2577	cmd = (void *)tlv->value;
2578	cmd->vdev_id = __cpu_to_le32(vdev_id);
2579	cmd->peer_type = __cpu_to_le32(peer_type);
2580	ether_addr_copy(cmd->peer_addr.addr, peer_addr);
2581
2582	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
2583	return skb;
2584}
2585
2586static struct sk_buff *
2587ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
2588				  const u8 peer_addr[ETH_ALEN])
2589{
2590	struct wmi_peer_delete_cmd *cmd;
2591	struct wmi_tlv *tlv;
2592	struct sk_buff *skb;
2593
2594	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2595	if (!skb)
2596		return ERR_PTR(-ENOMEM);
2597
2598	tlv = (void *)skb->data;
2599	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
2600	tlv->len = __cpu_to_le16(sizeof(*cmd));
2601	cmd = (void *)tlv->value;
2602	cmd->vdev_id = __cpu_to_le32(vdev_id);
2603	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2604
2605	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
2606	return skb;
2607}
2608
2609static struct sk_buff *
2610ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
2611				 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
2612{
2613	struct wmi_peer_flush_tids_cmd *cmd;
2614	struct wmi_tlv *tlv;
2615	struct sk_buff *skb;
2616
2617	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2618	if (!skb)
2619		return ERR_PTR(-ENOMEM);
2620
2621	tlv = (void *)skb->data;
2622	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
2623	tlv->len = __cpu_to_le16(sizeof(*cmd));
2624	cmd = (void *)tlv->value;
2625	cmd->vdev_id = __cpu_to_le32(vdev_id);
2626	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
2627	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2628
2629	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
2630	return skb;
2631}
2632
2633static struct sk_buff *
2634ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
2635				     const u8 *peer_addr,
2636				     enum wmi_peer_param param_id,
2637				     u32 param_value)
2638{
2639	struct wmi_peer_set_param_cmd *cmd;
2640	struct wmi_tlv *tlv;
2641	struct sk_buff *skb;
2642
2643	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2644	if (!skb)
2645		return ERR_PTR(-ENOMEM);
2646
2647	tlv = (void *)skb->data;
2648	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
2649	tlv->len = __cpu_to_le16(sizeof(*cmd));
2650	cmd = (void *)tlv->value;
2651	cmd->vdev_id = __cpu_to_le32(vdev_id);
2652	cmd->param_id = __cpu_to_le32(param_id);
2653	cmd->param_value = __cpu_to_le32(param_value);
2654	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2655
2656	ath10k_dbg(ar, ATH10K_DBG_WMI,
2657		   "wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
2658		   vdev_id, peer_addr, param_id, param_value);
2659	return skb;
2660}
2661
2662static struct sk_buff *
2663ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
2664				 const struct wmi_peer_assoc_complete_arg *arg)
2665{
2666	struct wmi_tlv_peer_assoc_cmd *cmd;
2667	struct wmi_vht_rate_set *vht_rate;
2668	struct wmi_tlv *tlv;
2669	struct sk_buff *skb;
2670	size_t len, legacy_rate_len, ht_rate_len;
2671	void *ptr;
2672
2673	if (arg->peer_mpdu_density > 16)
2674		return ERR_PTR(-EINVAL);
2675	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2676		return ERR_PTR(-EINVAL);
2677	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2678		return ERR_PTR(-EINVAL);
2679
2680	legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
2681				  sizeof(__le32));
2682	ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
2683	len = (sizeof(*tlv) + sizeof(*cmd)) +
2684	      (sizeof(*tlv) + legacy_rate_len) +
2685	      (sizeof(*tlv) + ht_rate_len) +
2686	      (sizeof(*tlv) + sizeof(*vht_rate));
2687	skb = ath10k_wmi_alloc_skb(ar, len);
2688	if (!skb)
2689		return ERR_PTR(-ENOMEM);
2690
2691	ptr = (void *)skb->data;
2692	tlv = ptr;
2693	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
2694	tlv->len = __cpu_to_le16(sizeof(*cmd));
2695	cmd = (void *)tlv->value;
2696
2697	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2698	cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2699	cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
2700	cmd->flags = __cpu_to_le32(arg->peer_flags);
2701	cmd->caps = __cpu_to_le32(arg->peer_caps);
2702	cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2703	cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2704	cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2705	cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2706	cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2707	cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2708	cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2709	cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
2710	cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2711	cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
2712	ether_addr_copy(cmd->mac_addr.addr, arg->addr);
2713
2714	ptr += sizeof(*tlv);
2715	ptr += sizeof(*cmd);
2716
2717	tlv = ptr;
2718	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2719	tlv->len = __cpu_to_le16(legacy_rate_len);
2720	memcpy(tlv->value, arg->peer_legacy_rates.rates,
2721	       arg->peer_legacy_rates.num_rates);
2722
2723	ptr += sizeof(*tlv);
2724	ptr += legacy_rate_len;
2725
2726	tlv = ptr;
2727	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2728	tlv->len = __cpu_to_le16(ht_rate_len);
2729	memcpy(tlv->value, arg->peer_ht_rates.rates,
2730	       arg->peer_ht_rates.num_rates);
2731
2732	ptr += sizeof(*tlv);
2733	ptr += ht_rate_len;
2734
2735	tlv = ptr;
2736	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
2737	tlv->len = __cpu_to_le16(sizeof(*vht_rate));
2738	vht_rate = (void *)tlv->value;
2739
2740	vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2741	vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2742	vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2743	vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2744
2745	ptr += sizeof(*tlv);
2746	ptr += sizeof(*vht_rate);
2747
2748	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
2749	return skb;
2750}
2751
2752static struct sk_buff *
2753ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
2754				 enum wmi_sta_ps_mode psmode)
2755{
2756	struct wmi_sta_powersave_mode_cmd *cmd;
2757	struct wmi_tlv *tlv;
2758	struct sk_buff *skb;
2759
2760	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2761	if (!skb)
2762		return ERR_PTR(-ENOMEM);
2763
2764	tlv = (void *)skb->data;
2765	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
2766	tlv->len = __cpu_to_le16(sizeof(*cmd));
2767	cmd = (void *)tlv->value;
2768	cmd->vdev_id = __cpu_to_le32(vdev_id);
2769	cmd->sta_ps_mode = __cpu_to_le32(psmode);
2770
2771	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
2772	return skb;
2773}
2774
2775static struct sk_buff *
2776ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
2777				 enum wmi_sta_powersave_param param_id,
2778				 u32 param_value)
2779{
2780	struct wmi_sta_powersave_param_cmd *cmd;
2781	struct wmi_tlv *tlv;
2782	struct sk_buff *skb;
2783
2784	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2785	if (!skb)
2786		return ERR_PTR(-ENOMEM);
2787
2788	tlv = (void *)skb->data;
2789	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
2790	tlv->len = __cpu_to_le16(sizeof(*cmd));
2791	cmd = (void *)tlv->value;
2792	cmd->vdev_id = __cpu_to_le32(vdev_id);
2793	cmd->param_id = __cpu_to_le32(param_id);
2794	cmd->param_value = __cpu_to_le32(param_value);
2795
2796	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
2797	return skb;
2798}
2799
2800static struct sk_buff *
2801ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2802				enum wmi_ap_ps_peer_param param_id, u32 value)
2803{
2804	struct wmi_ap_ps_peer_cmd *cmd;
2805	struct wmi_tlv *tlv;
2806	struct sk_buff *skb;
2807
2808	if (!mac)
2809		return ERR_PTR(-EINVAL);
2810
2811	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2812	if (!skb)
2813		return ERR_PTR(-ENOMEM);
2814
2815	tlv = (void *)skb->data;
2816	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
2817	tlv->len = __cpu_to_le16(sizeof(*cmd));
2818	cmd = (void *)tlv->value;
2819	cmd->vdev_id = __cpu_to_le32(vdev_id);
2820	cmd->param_id = __cpu_to_le32(param_id);
2821	cmd->param_value = __cpu_to_le32(value);
2822	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2823
2824	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
2825	return skb;
2826}
2827
2828static struct sk_buff *
2829ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
2830				     const struct wmi_scan_chan_list_arg *arg)
2831{
2832	struct wmi_tlv_scan_chan_list_cmd *cmd;
2833	struct wmi_channel *ci;
2834	struct wmi_channel_arg *ch;
2835	struct wmi_tlv *tlv;
2836	struct sk_buff *skb;
2837	size_t chans_len, len;
2838	int i;
2839	void *ptr, *chans;
2840
2841	chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
2842	len = (sizeof(*tlv) + sizeof(*cmd)) +
2843	      (sizeof(*tlv) + chans_len);
2844
2845	skb = ath10k_wmi_alloc_skb(ar, len);
2846	if (!skb)
2847		return ERR_PTR(-ENOMEM);
2848
2849	ptr = (void *)skb->data;
2850	tlv = ptr;
2851	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
2852	tlv->len = __cpu_to_le16(sizeof(*cmd));
2853	cmd = (void *)tlv->value;
2854	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2855
2856	ptr += sizeof(*tlv);
2857	ptr += sizeof(*cmd);
2858
2859	tlv = ptr;
2860	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2861	tlv->len = __cpu_to_le16(chans_len);
2862	chans = (void *)tlv->value;
2863
2864	for (i = 0; i < arg->n_channels; i++) {
2865		ch = &arg->channels[i];
2866
2867		tlv = chans;
2868		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2869		tlv->len = __cpu_to_le16(sizeof(*ci));
2870		ci = (void *)tlv->value;
2871
2872		ath10k_wmi_put_wmi_channel(ar, ci, ch);
2873
2874		chans += sizeof(*tlv);
2875		chans += sizeof(*ci);
2876	}
2877
2878	ptr += sizeof(*tlv);
2879	ptr += chans_len;
2880
2881	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2882	return skb;
2883}
2884
2885static struct sk_buff *
2886ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
2887{
2888	struct wmi_scan_prob_req_oui_cmd *cmd;
2889	struct wmi_tlv *tlv;
2890	struct sk_buff *skb;
2891
2892	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2893	if (!skb)
2894		return ERR_PTR(-ENOMEM);
2895
2896	tlv = (void *)skb->data;
2897	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
2898	tlv->len = __cpu_to_le16(sizeof(*cmd));
2899	cmd = (void *)tlv->value;
2900	cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
2901
2902	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
2903	return skb;
2904}
2905
2906static struct sk_buff *
2907ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2908				 const void *bcn, size_t bcn_len,
2909				 u32 bcn_paddr, bool dtim_zero,
2910				 bool deliver_cab)
2911
2912{
2913	struct wmi_bcn_tx_ref_cmd *cmd;
2914	struct wmi_tlv *tlv;
2915	struct sk_buff *skb;
2916	struct ieee80211_hdr *hdr;
2917	u16 fc;
2918
2919	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2920	if (!skb)
2921		return ERR_PTR(-ENOMEM);
2922
2923	hdr = (struct ieee80211_hdr *)bcn;
2924	fc = le16_to_cpu(hdr->frame_control);
2925
2926	tlv = (void *)skb->data;
2927	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2928	tlv->len = __cpu_to_le16(sizeof(*cmd));
2929	cmd = (void *)tlv->value;
2930	cmd->vdev_id = __cpu_to_le32(vdev_id);
2931	cmd->data_len = __cpu_to_le32(bcn_len);
2932	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2933	cmd->msdu_id = 0;
2934	cmd->frame_control = __cpu_to_le32(fc);
2935	cmd->flags = 0;
2936
2937	if (dtim_zero)
2938		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2939
2940	if (deliver_cab)
2941		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2942
2943	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2944	return skb;
2945}
2946
2947static struct sk_buff *
2948ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2949				   const struct wmi_wmm_params_all_arg *arg)
2950{
2951	struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2952	struct wmi_wmm_params *wmm;
2953	struct wmi_tlv *tlv;
2954	struct sk_buff *skb;
2955	size_t len;
2956	void *ptr;
2957
2958	len = (sizeof(*tlv) + sizeof(*cmd)) +
2959	      (4 * (sizeof(*tlv) + sizeof(*wmm)));
2960	skb = ath10k_wmi_alloc_skb(ar, len);
2961	if (!skb)
2962		return ERR_PTR(-ENOMEM);
2963
2964	ptr = (void *)skb->data;
2965
2966	tlv = ptr;
2967	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2968	tlv->len = __cpu_to_le16(sizeof(*cmd));
2969	cmd = (void *)tlv->value;
2970
2971	/* nothing to set here */
2972
2973	ptr += sizeof(*tlv);
2974	ptr += sizeof(*cmd);
2975
2976	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2977	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2978	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2979	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2980
2981	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2982	return skb;
2983}
2984
2985static struct sk_buff *
2986ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
2987{
2988	struct wmi_request_stats_cmd *cmd;
2989	struct wmi_tlv *tlv;
2990	struct sk_buff *skb;
2991
2992	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2993	if (!skb)
2994		return ERR_PTR(-ENOMEM);
2995
2996	tlv = (void *)skb->data;
2997	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
2998	tlv->len = __cpu_to_le16(sizeof(*cmd));
2999	cmd = (void *)tlv->value;
3000	cmd->stats_id = __cpu_to_le32(stats_mask);
3001
3002	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
3003	return skb;
3004}
3005
3006static struct sk_buff *
3007ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
3008					      u32 vdev_id,
3009					      enum wmi_peer_stats_info_request_type type,
3010					      u8 *addr,
3011					      u32 reset)
3012{
3013	struct wmi_tlv_request_peer_stats_info *cmd;
3014	struct wmi_tlv *tlv;
3015	struct sk_buff *skb;
3016
3017	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3018	if (!skb)
3019		return ERR_PTR(-ENOMEM);
3020
3021	tlv = (void *)skb->data;
3022	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
3023	tlv->len = __cpu_to_le16(sizeof(*cmd));
3024	cmd = (void *)tlv->value;
3025	cmd->vdev_id = __cpu_to_le32(vdev_id);
3026	cmd->request_type = __cpu_to_le32(type);
3027
3028	if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
3029		ether_addr_copy(cmd->peer_macaddr.addr, addr);
3030
3031	cmd->reset_after_request = __cpu_to_le32(reset);
3032	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
3033	return skb;
3034}
3035
3036static int
3037ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
3038				       struct sk_buff *msdu)
3039{
3040	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
 
3041	struct ath10k_wmi *wmi = &ar->wmi;
3042
3043	idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
 
 
 
 
3044
3045	return 0;
3046}
3047
3048static int
3049ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
3050				 dma_addr_t paddr)
3051{
3052	struct ath10k_wmi *wmi = &ar->wmi;
3053	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
3054	int ret;
3055
3056	pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
3057	if (!pkt_addr)
3058		return -ENOMEM;
3059
3060	pkt_addr->vaddr = skb;
3061	pkt_addr->paddr = paddr;
3062
3063	spin_lock_bh(&ar->data_lock);
3064	ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
3065			wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
3066	spin_unlock_bh(&ar->data_lock);
3067
3068	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
3069	return ret;
3070}
3071
3072static struct sk_buff *
3073ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
3074				   dma_addr_t paddr)
3075{
3076	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3077	struct wmi_tlv_mgmt_tx_cmd *cmd;
3078	struct ieee80211_hdr *hdr;
3079	struct ath10k_vif *arvif;
3080	u32 buf_len = msdu->len;
3081	struct wmi_tlv *tlv;
3082	struct sk_buff *skb;
3083	int len, desc_id;
3084	u32 vdev_id;
3085	void *ptr;
3086
3087	if (!cb->vif)
3088		return ERR_PTR(-EINVAL);
3089
3090	hdr = (struct ieee80211_hdr *)msdu->data;
3091	arvif = (void *)cb->vif->drv_priv;
3092	vdev_id = arvif->vdev_id;
3093
3094	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
3095			 (!(ieee80211_is_nullfunc(hdr->frame_control) ||
3096			 ieee80211_is_qos_nullfunc(hdr->frame_control)))))
3097		return ERR_PTR(-EINVAL);
3098
3099	len = sizeof(*cmd) + 2 * sizeof(*tlv);
3100
3101	if ((ieee80211_is_action(hdr->frame_control) ||
3102	     ieee80211_is_deauth(hdr->frame_control) ||
3103	     ieee80211_is_disassoc(hdr->frame_control)) &&
3104	     ieee80211_has_protected(hdr->frame_control)) {
3105		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
3106		buf_len += IEEE80211_CCMP_MIC_LEN;
3107	}
3108
3109	buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
3110	buf_len = round_up(buf_len, 4);
3111
3112	len += buf_len;
3113	len = round_up(len, 4);
3114	skb = ath10k_wmi_alloc_skb(ar, len);
3115	if (!skb)
3116		return ERR_PTR(-ENOMEM);
3117
3118	desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
3119	if (desc_id < 0)
3120		goto err_free_skb;
3121
3122	cb->msdu_id = desc_id;
3123
3124	ptr = (void *)skb->data;
3125	tlv = ptr;
3126	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
3127	tlv->len = __cpu_to_le16(sizeof(*cmd));
3128	cmd = (void *)tlv->value;
3129	cmd->vdev_id = __cpu_to_le32(vdev_id);
3130	cmd->desc_id = __cpu_to_le32(desc_id);
3131	cmd->chanfreq = 0;
3132	cmd->buf_len = __cpu_to_le32(buf_len);
3133	cmd->frame_len = __cpu_to_le32(msdu->len);
3134	cmd->paddr = __cpu_to_le64(paddr);
3135
3136	ptr += sizeof(*tlv);
3137	ptr += sizeof(*cmd);
3138
3139	tlv = ptr;
3140	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3141	tlv->len = __cpu_to_le16(buf_len);
3142
3143	ptr += sizeof(*tlv);
3144	memcpy(ptr, msdu->data, buf_len);
3145
3146	return skb;
3147
3148err_free_skb:
3149	dev_kfree_skb(skb);
3150	return ERR_PTR(desc_id);
3151}
3152
3153static struct sk_buff *
3154ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
3155				    enum wmi_force_fw_hang_type type,
3156				    u32 delay_ms)
3157{
3158	struct wmi_force_fw_hang_cmd *cmd;
3159	struct wmi_tlv *tlv;
3160	struct sk_buff *skb;
3161
3162	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3163	if (!skb)
3164		return ERR_PTR(-ENOMEM);
3165
3166	tlv = (void *)skb->data;
3167	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
3168	tlv->len = __cpu_to_le16(sizeof(*cmd));
3169	cmd = (void *)tlv->value;
3170	cmd->type = __cpu_to_le32(type);
3171	cmd->delay_ms = __cpu_to_le32(delay_ms);
3172
3173	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
3174	return skb;
3175}
3176
3177static struct sk_buff *
3178ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
3179				 u32 log_level)
3180{
3181	struct wmi_tlv_dbglog_cmd *cmd;
3182	struct wmi_tlv *tlv;
3183	struct sk_buff *skb;
3184	size_t len, bmap_len;
3185	u32 value;
3186	void *ptr;
3187
3188	if (module_enable) {
3189		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3190				module_enable,
3191				WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
3192	} else {
3193		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3194				WMI_TLV_DBGLOG_ALL_MODULES,
3195				WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
3196	}
3197
3198	bmap_len = 0;
3199	len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
3200	skb = ath10k_wmi_alloc_skb(ar, len);
3201	if (!skb)
3202		return ERR_PTR(-ENOMEM);
3203
3204	ptr = (void *)skb->data;
3205
3206	tlv = ptr;
3207	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
3208	tlv->len = __cpu_to_le16(sizeof(*cmd));
3209	cmd = (void *)tlv->value;
3210	cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
3211	cmd->value = __cpu_to_le32(value);
3212
3213	ptr += sizeof(*tlv);
3214	ptr += sizeof(*cmd);
3215
3216	tlv = ptr;
3217	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3218	tlv->len = __cpu_to_le16(bmap_len);
3219
3220	/* nothing to do here */
3221
3222	ptr += sizeof(*tlv);
3223	ptr += sizeof(bmap_len);
3224
3225	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
3226	return skb;
3227}
3228
3229static struct sk_buff *
3230ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
3231{
3232	struct wmi_tlv_pktlog_enable *cmd;
3233	struct wmi_tlv *tlv;
3234	struct sk_buff *skb;
3235	void *ptr;
3236	size_t len;
3237
3238	len = sizeof(*tlv) + sizeof(*cmd);
3239	skb = ath10k_wmi_alloc_skb(ar, len);
3240	if (!skb)
3241		return ERR_PTR(-ENOMEM);
3242
3243	ptr = (void *)skb->data;
3244	tlv = ptr;
3245	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
3246	tlv->len = __cpu_to_le16(sizeof(*cmd));
3247	cmd = (void *)tlv->value;
3248	cmd->filter = __cpu_to_le32(filter);
3249
3250	ptr += sizeof(*tlv);
3251	ptr += sizeof(*cmd);
3252
3253	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
3254		   filter);
3255	return skb;
3256}
3257
3258static struct sk_buff *
3259ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar)
3260{
3261	struct wmi_tlv_pdev_get_temp_cmd *cmd;
3262	struct wmi_tlv *tlv;
3263	struct sk_buff *skb;
3264
3265	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3266	if (!skb)
3267		return ERR_PTR(-ENOMEM);
3268
3269	tlv = (void *)skb->data;
3270	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD);
3271	tlv->len = __cpu_to_le16(sizeof(*cmd));
3272	cmd = (void *)tlv->value;
3273	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n");
3274	return skb;
3275}
3276
3277static struct sk_buff *
3278ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
3279{
3280	struct wmi_tlv_pktlog_disable *cmd;
3281	struct wmi_tlv *tlv;
3282	struct sk_buff *skb;
3283	void *ptr;
3284	size_t len;
3285
3286	len = sizeof(*tlv) + sizeof(*cmd);
3287	skb = ath10k_wmi_alloc_skb(ar, len);
3288	if (!skb)
3289		return ERR_PTR(-ENOMEM);
3290
3291	ptr = (void *)skb->data;
3292	tlv = ptr;
3293	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
3294	tlv->len = __cpu_to_le16(sizeof(*cmd));
3295	cmd = (void *)tlv->value;
3296
3297	ptr += sizeof(*tlv);
3298	ptr += sizeof(*cmd);
3299
3300	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
3301	return skb;
3302}
3303
3304static struct sk_buff *
3305ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
3306			       u32 tim_ie_offset, struct sk_buff *bcn,
3307			       u32 prb_caps, u32 prb_erp, void *prb_ies,
3308			       size_t prb_ies_len)
3309{
3310	struct wmi_tlv_bcn_tmpl_cmd *cmd;
3311	struct wmi_tlv_bcn_prb_info *info;
3312	struct wmi_tlv *tlv;
3313	struct sk_buff *skb;
3314	void *ptr;
3315	size_t len;
3316
3317	if (WARN_ON(prb_ies_len > 0 && !prb_ies))
3318		return ERR_PTR(-EINVAL);
3319
3320	len = sizeof(*tlv) + sizeof(*cmd) +
3321	      sizeof(*tlv) + sizeof(*info) + prb_ies_len +
3322	      sizeof(*tlv) + roundup(bcn->len, 4);
3323	skb = ath10k_wmi_alloc_skb(ar, len);
3324	if (!skb)
3325		return ERR_PTR(-ENOMEM);
3326
3327	ptr = (void *)skb->data;
3328	tlv = ptr;
3329	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
3330	tlv->len = __cpu_to_le16(sizeof(*cmd));
3331	cmd = (void *)tlv->value;
3332	cmd->vdev_id = __cpu_to_le32(vdev_id);
3333	cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
3334	cmd->buf_len = __cpu_to_le32(bcn->len);
3335
3336	ptr += sizeof(*tlv);
3337	ptr += sizeof(*cmd);
3338
3339	/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
3340	 * then it is then impossible to pass original ie len.
3341	 * This chunk is not used yet so if setting probe resp template yields
3342	 * problems with beaconing or crashes firmware look here.
3343	 */
3344	tlv = ptr;
3345	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3346	tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
3347	info = (void *)tlv->value;
3348	info->caps = __cpu_to_le32(prb_caps);
3349	info->erp = __cpu_to_le32(prb_erp);
3350	memcpy(info->ies, prb_ies, prb_ies_len);
3351
3352	ptr += sizeof(*tlv);
3353	ptr += sizeof(*info);
3354	ptr += prb_ies_len;
3355
3356	tlv = ptr;
3357	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3358	tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
3359	memcpy(tlv->value, bcn->data, bcn->len);
3360
3361	/* FIXME: Adjust TSF? */
3362
3363	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
3364		   vdev_id);
3365	return skb;
3366}
3367
3368static struct sk_buff *
3369ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
3370			       struct sk_buff *prb)
3371{
3372	struct wmi_tlv_prb_tmpl_cmd *cmd;
3373	struct wmi_tlv_bcn_prb_info *info;
3374	struct wmi_tlv *tlv;
3375	struct sk_buff *skb;
3376	void *ptr;
3377	size_t len;
3378
3379	len = sizeof(*tlv) + sizeof(*cmd) +
3380	      sizeof(*tlv) + sizeof(*info) +
3381	      sizeof(*tlv) + roundup(prb->len, 4);
3382	skb = ath10k_wmi_alloc_skb(ar, len);
3383	if (!skb)
3384		return ERR_PTR(-ENOMEM);
3385
3386	ptr = (void *)skb->data;
3387	tlv = ptr;
3388	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
3389	tlv->len = __cpu_to_le16(sizeof(*cmd));
3390	cmd = (void *)tlv->value;
3391	cmd->vdev_id = __cpu_to_le32(vdev_id);
3392	cmd->buf_len = __cpu_to_le32(prb->len);
3393
3394	ptr += sizeof(*tlv);
3395	ptr += sizeof(*cmd);
3396
3397	tlv = ptr;
3398	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3399	tlv->len = __cpu_to_le16(sizeof(*info));
3400	info = (void *)tlv->value;
3401	info->caps = 0;
3402	info->erp = 0;
3403
3404	ptr += sizeof(*tlv);
3405	ptr += sizeof(*info);
3406
3407	tlv = ptr;
3408	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3409	tlv->len = __cpu_to_le16(roundup(prb->len, 4));
3410	memcpy(tlv->value, prb->data, prb->len);
3411
3412	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
3413		   vdev_id);
3414	return skb;
3415}
3416
3417static struct sk_buff *
3418ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
3419				    const u8 *p2p_ie)
3420{
3421	struct wmi_tlv_p2p_go_bcn_ie *cmd;
3422	struct wmi_tlv *tlv;
3423	struct sk_buff *skb;
3424	void *ptr;
3425	size_t len;
3426
3427	len = sizeof(*tlv) + sizeof(*cmd) +
3428	      sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
3429	skb = ath10k_wmi_alloc_skb(ar, len);
3430	if (!skb)
3431		return ERR_PTR(-ENOMEM);
3432
3433	ptr = (void *)skb->data;
3434	tlv = ptr;
3435	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
3436	tlv->len = __cpu_to_le16(sizeof(*cmd));
3437	cmd = (void *)tlv->value;
3438	cmd->vdev_id = __cpu_to_le32(vdev_id);
3439	cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
3440
3441	ptr += sizeof(*tlv);
3442	ptr += sizeof(*cmd);
3443
3444	tlv = ptr;
3445	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3446	tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
3447	memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
3448
3449	ptr += sizeof(*tlv);
3450	ptr += roundup(p2p_ie[1] + 2, 4);
3451
3452	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
3453		   vdev_id);
3454	return skb;
3455}
3456
3457static struct sk_buff *
3458ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
3459					   enum wmi_tdls_state state)
3460{
3461	struct wmi_tdls_set_state_cmd *cmd;
3462	struct wmi_tlv *tlv;
3463	struct sk_buff *skb;
3464	void *ptr;
3465	size_t len;
3466	/* Set to options from wmi_tlv_tdls_options,
3467	 * for now none of them are enabled.
3468	 */
3469	u32 options = 0;
3470
3471	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
3472		options |=  WMI_TLV_TDLS_BUFFER_STA_EN;
3473
3474	/* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS
3475	 * link inactivity detecting logic.
3476	 */
3477	if (state == WMI_TDLS_ENABLE_ACTIVE)
3478		state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL;
3479
3480	len = sizeof(*tlv) + sizeof(*cmd);
3481	skb = ath10k_wmi_alloc_skb(ar, len);
3482	if (!skb)
3483		return ERR_PTR(-ENOMEM);
3484
3485	ptr = (void *)skb->data;
3486	tlv = ptr;
3487	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
3488	tlv->len = __cpu_to_le16(sizeof(*cmd));
3489
3490	cmd = (void *)tlv->value;
3491	cmd->vdev_id = __cpu_to_le32(vdev_id);
3492	cmd->state = __cpu_to_le32(state);
3493	cmd->notification_interval_ms = __cpu_to_le32(5000);
3494	cmd->tx_discovery_threshold = __cpu_to_le32(100);
3495	cmd->tx_teardown_threshold = __cpu_to_le32(5);
3496	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
3497	cmd->rssi_delta = __cpu_to_le32(-20);
3498	cmd->tdls_options = __cpu_to_le32(options);
3499	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
3500	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
3501	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
3502	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
3503	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
3504
3505	ptr += sizeof(*tlv);
3506	ptr += sizeof(*cmd);
3507
3508	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
3509		   state, vdev_id);
3510	return skb;
3511}
3512
3513static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
3514{
3515	u32 peer_qos = 0;
3516
3517	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
3518		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
3519	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
3520		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
3521	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
3522		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
3523	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
3524		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
3525
3526	peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
3527
3528	return peer_qos;
3529}
3530
3531static struct sk_buff *
3532ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
3533				       const struct wmi_tdls_peer_update_cmd_arg *arg,
3534				       const struct wmi_tdls_peer_capab_arg *cap,
3535				       const struct wmi_channel_arg *chan_arg)
3536{
3537	struct wmi_tdls_peer_update_cmd *cmd;
3538	struct wmi_tdls_peer_capab *peer_cap;
3539	struct wmi_channel *chan;
3540	struct wmi_tlv *tlv;
3541	struct sk_buff *skb;
3542	u32 peer_qos;
3543	void *ptr;
3544	int len;
3545	int i;
3546
3547	len = sizeof(*tlv) + sizeof(*cmd) +
3548	      sizeof(*tlv) + sizeof(*peer_cap) +
3549	      sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
3550
3551	skb = ath10k_wmi_alloc_skb(ar, len);
3552	if (!skb)
3553		return ERR_PTR(-ENOMEM);
3554
3555	ptr = (void *)skb->data;
3556	tlv = ptr;
3557	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
3558	tlv->len = __cpu_to_le16(sizeof(*cmd));
3559
3560	cmd = (void *)tlv->value;
3561	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
3562	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
3563	cmd->peer_state = __cpu_to_le32(arg->peer_state);
3564
3565	ptr += sizeof(*tlv);
3566	ptr += sizeof(*cmd);
3567
3568	tlv = ptr;
3569	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
3570	tlv->len = __cpu_to_le16(sizeof(*peer_cap));
3571	peer_cap = (void *)tlv->value;
3572	peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
3573						   cap->peer_max_sp);
3574	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
3575	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
3576	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
3577	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
3578	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
3579	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
3580	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
3581
3582	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
3583		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
3584
3585	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
3586	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
3587	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
3588
3589	ptr += sizeof(*tlv);
3590	ptr += sizeof(*peer_cap);
3591
3592	tlv = ptr;
3593	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3594	tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
3595
3596	ptr += sizeof(*tlv);
3597
3598	for (i = 0; i < cap->peer_chan_len; i++) {
3599		tlv = ptr;
3600		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
3601		tlv->len = __cpu_to_le16(sizeof(*chan));
3602		chan = (void *)tlv->value;
3603		ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
3604
3605		ptr += sizeof(*tlv);
3606		ptr += sizeof(*chan);
3607	}
3608
3609	ath10k_dbg(ar, ATH10K_DBG_WMI,
3610		   "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
3611		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
3612	return skb;
3613}
3614
3615static struct sk_buff *
3616ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
3617					  u32 duration, u32 next_offset,
3618					  u32 enabled)
3619{
3620	struct wmi_tlv_set_quiet_cmd *cmd;
3621	struct wmi_tlv *tlv;
3622	struct sk_buff *skb;
3623
3624	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3625	if (!skb)
3626		return ERR_PTR(-ENOMEM);
3627
3628	tlv = (void *)skb->data;
3629	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD);
3630	tlv->len = __cpu_to_le16(sizeof(*cmd));
3631	cmd = (void *)tlv->value;
3632
3633	/* vdev_id is not in use, set to 0 */
3634	cmd->vdev_id = __cpu_to_le32(0);
3635	cmd->period = __cpu_to_le32(period);
3636	cmd->duration = __cpu_to_le32(duration);
3637	cmd->next_start = __cpu_to_le32(next_offset);
3638	cmd->enabled = __cpu_to_le32(enabled);
3639
3640	ath10k_dbg(ar, ATH10K_DBG_WMI,
3641		   "wmi tlv quiet param: period %u duration %u enabled %d\n",
3642		   period, duration, enabled);
3643	return skb;
3644}
3645
3646static struct sk_buff *
3647ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
3648{
3649	struct wmi_tlv_wow_enable_cmd *cmd;
3650	struct wmi_tlv *tlv;
3651	struct sk_buff *skb;
3652	size_t len;
3653
3654	len = sizeof(*tlv) + sizeof(*cmd);
3655	skb = ath10k_wmi_alloc_skb(ar, len);
3656	if (!skb)
3657		return ERR_PTR(-ENOMEM);
3658
3659	tlv = (struct wmi_tlv *)skb->data;
3660	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
3661	tlv->len = __cpu_to_le16(sizeof(*cmd));
3662	cmd = (void *)tlv->value;
3663
3664	cmd->enable = __cpu_to_le32(1);
3665	if (!ar->bus_param.link_can_suspend)
3666		cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED);
3667
3668	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
3669	return skb;
3670}
3671
3672static struct sk_buff *
3673ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
3674					   u32 vdev_id,
3675					   enum wmi_wow_wakeup_event event,
3676					   u32 enable)
3677{
3678	struct wmi_tlv_wow_add_del_event_cmd *cmd;
3679	struct wmi_tlv *tlv;
3680	struct sk_buff *skb;
3681	size_t len;
3682
3683	len = sizeof(*tlv) + sizeof(*cmd);
3684	skb = ath10k_wmi_alloc_skb(ar, len);
3685	if (!skb)
3686		return ERR_PTR(-ENOMEM);
3687
3688	tlv = (struct wmi_tlv *)skb->data;
3689	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
3690	tlv->len = __cpu_to_le16(sizeof(*cmd));
3691	cmd = (void *)tlv->value;
3692
3693	cmd->vdev_id = __cpu_to_le32(vdev_id);
3694	cmd->is_add = __cpu_to_le32(enable);
3695	cmd->event_bitmap = __cpu_to_le32(1 << event);
3696
3697	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
3698		   wow_wakeup_event(event), enable, vdev_id);
3699	return skb;
3700}
3701
3702static struct sk_buff *
3703ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
3704{
3705	struct wmi_tlv_wow_host_wakeup_ind *cmd;
3706	struct wmi_tlv *tlv;
3707	struct sk_buff *skb;
3708	size_t len;
3709
3710	len = sizeof(*tlv) + sizeof(*cmd);
3711	skb = ath10k_wmi_alloc_skb(ar, len);
3712	if (!skb)
3713		return ERR_PTR(-ENOMEM);
3714
3715	tlv = (struct wmi_tlv *)skb->data;
3716	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
3717	tlv->len = __cpu_to_le16(sizeof(*cmd));
3718	cmd = (void *)tlv->value;
3719
3720	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
3721	return skb;
3722}
3723
3724static struct sk_buff *
3725ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
3726				      u32 pattern_id, const u8 *pattern,
3727				      const u8 *bitmask, int pattern_len,
3728				      int pattern_offset)
3729{
3730	struct wmi_tlv_wow_add_pattern_cmd *cmd;
3731	struct wmi_tlv_wow_bitmap_pattern *bitmap;
3732	struct wmi_tlv *tlv;
3733	struct sk_buff *skb;
3734	void *ptr;
3735	size_t len;
3736
3737	len = sizeof(*tlv) + sizeof(*cmd) +
3738	      sizeof(*tlv) +			/* array struct */
3739	      sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
3740	      sizeof(*tlv) +			/* empty ipv4 sync */
3741	      sizeof(*tlv) +			/* empty ipv6 sync */
3742	      sizeof(*tlv) +			/* empty magic */
3743	      sizeof(*tlv) +			/* empty info timeout */
3744	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
3745
3746	skb = ath10k_wmi_alloc_skb(ar, len);
3747	if (!skb)
3748		return ERR_PTR(-ENOMEM);
3749
3750	/* cmd */
3751	ptr = (void *)skb->data;
3752	tlv = ptr;
3753	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
3754	tlv->len = __cpu_to_le16(sizeof(*cmd));
3755	cmd = (void *)tlv->value;
3756
3757	cmd->vdev_id = __cpu_to_le32(vdev_id);
3758	cmd->pattern_id = __cpu_to_le32(pattern_id);
3759	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3760
3761	ptr += sizeof(*tlv);
3762	ptr += sizeof(*cmd);
3763
3764	/* bitmap */
3765	tlv = ptr;
3766	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3767	tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
3768
3769	ptr += sizeof(*tlv);
3770
3771	tlv = ptr;
3772	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
3773	tlv->len = __cpu_to_le16(sizeof(*bitmap));
3774	bitmap = (void *)tlv->value;
3775
3776	memcpy(bitmap->patternbuf, pattern, pattern_len);
3777	memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
3778	bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
3779	bitmap->pattern_len = __cpu_to_le32(pattern_len);
3780	bitmap->bitmask_len = __cpu_to_le32(pattern_len);
3781	bitmap->pattern_id = __cpu_to_le32(pattern_id);
3782
3783	ptr += sizeof(*tlv);
3784	ptr += sizeof(*bitmap);
3785
3786	/* ipv4 sync */
3787	tlv = ptr;
3788	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3789	tlv->len = __cpu_to_le16(0);
3790
3791	ptr += sizeof(*tlv);
3792
3793	/* ipv6 sync */
3794	tlv = ptr;
3795	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3796	tlv->len = __cpu_to_le16(0);
3797
3798	ptr += sizeof(*tlv);
3799
3800	/* magic */
3801	tlv = ptr;
3802	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3803	tlv->len = __cpu_to_le16(0);
3804
3805	ptr += sizeof(*tlv);
3806
3807	/* pattern info timeout */
3808	tlv = ptr;
3809	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3810	tlv->len = __cpu_to_le16(0);
3811
3812	ptr += sizeof(*tlv);
3813
3814	/* ratelimit interval */
3815	tlv = ptr;
3816	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3817	tlv->len = __cpu_to_le16(sizeof(u32));
3818
3819	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
3820		   vdev_id, pattern_id, pattern_offset);
3821	return skb;
3822}
3823
3824static struct sk_buff *
3825ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
3826				      u32 pattern_id)
3827{
3828	struct wmi_tlv_wow_del_pattern_cmd *cmd;
3829	struct wmi_tlv *tlv;
3830	struct sk_buff *skb;
3831	size_t len;
3832
3833	len = sizeof(*tlv) + sizeof(*cmd);
3834	skb = ath10k_wmi_alloc_skb(ar, len);
3835	if (!skb)
3836		return ERR_PTR(-ENOMEM);
3837
3838	tlv = (struct wmi_tlv *)skb->data;
3839	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
3840	tlv->len = __cpu_to_le16(sizeof(*cmd));
3841	cmd = (void *)tlv->value;
3842
3843	cmd->vdev_id = __cpu_to_le32(vdev_id);
3844	cmd->pattern_id = __cpu_to_le32(pattern_id);
3845	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3846
3847	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
3848		   vdev_id, pattern_id);
3849	return skb;
3850}
3851
3852/* Request FW to start PNO operation */
3853static struct sk_buff *
3854ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
3855				       u32 vdev_id,
3856				       struct wmi_pno_scan_req *pno)
3857{
3858	struct nlo_configured_parameters *nlo_list;
3859	struct wmi_tlv_wow_nlo_config_cmd *cmd;
3860	struct wmi_tlv *tlv;
3861	struct sk_buff *skb;
3862	__le32 *channel_list;
3863	u16 tlv_len;
3864	size_t len;
3865	void *ptr;
3866	u32 i;
3867
3868	len = sizeof(*tlv) + sizeof(*cmd) +
3869	      sizeof(*tlv) +
3870	      /* TLV place holder for array of structures
3871	       * nlo_configured_parameters(nlo_list)
3872	       */
3873	      sizeof(*tlv);
3874	      /* TLV place holder for array of uint32 channel_list */
3875
3876	len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
3877				   WMI_NLO_MAX_CHAN);
3878	len += sizeof(struct nlo_configured_parameters) *
3879				min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
3880
3881	skb = ath10k_wmi_alloc_skb(ar, len);
3882	if (!skb)
3883		return ERR_PTR(-ENOMEM);
3884
3885	ptr = (void *)skb->data;
3886	tlv = ptr;
3887	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
3888	tlv->len = __cpu_to_le16(sizeof(*cmd));
3889	cmd = (void *)tlv->value;
3890
3891	/* wmi_tlv_wow_nlo_config_cmd parameters*/
3892	cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
3893	cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
3894
3895	/* current FW does not support min-max range for dwell time */
3896	cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
3897	cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
3898
3899	if (pno->do_passive_scan)
3900		cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
3901
3902	/* copy scan interval */
3903	cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
3904	cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
3905	cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
3906	cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
3907
3908	if (pno->enable_pno_scan_randomization) {
3909		cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
3910				WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
3911		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
3912		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
3913	}
3914
3915	ptr += sizeof(*tlv);
3916	ptr += sizeof(*cmd);
3917
3918	/* nlo_configured_parameters(nlo_list) */
3919	cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
3920					       WMI_NLO_MAX_SSIDS));
3921	tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
3922		sizeof(struct nlo_configured_parameters);
3923
3924	tlv = ptr;
3925	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3926	tlv->len = __cpu_to_le16(tlv_len);
3927
3928	ptr += sizeof(*tlv);
3929	nlo_list = ptr;
3930	for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
3931		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
3932		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3933		tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
3934					 sizeof(*tlv));
3935
3936		/* copy ssid and it's length */
3937		nlo_list[i].ssid.valid = __cpu_to_le32(true);
3938		nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
3939		memcpy(nlo_list[i].ssid.ssid.ssid,
3940		       pno->a_networks[i].ssid.ssid,
3941		       __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
3942
3943		/* copy rssi threshold */
3944		if (pno->a_networks[i].rssi_threshold &&
3945		    pno->a_networks[i].rssi_threshold > -300) {
3946			nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
3947			nlo_list[i].rssi_cond.rssi =
3948				__cpu_to_le32(pno->a_networks[i].rssi_threshold);
3949		}
3950
3951		nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
3952		nlo_list[i].bcast_nw_type.bcast_nw_type =
3953			__cpu_to_le32(pno->a_networks[i].bcast_nw_type);
3954	}
3955
3956	ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
3957
3958	/* copy channel info */
3959	cmd->num_of_channels = __cpu_to_le32(min_t(u8,
3960						   pno->a_networks[0].channel_count,
3961						   WMI_NLO_MAX_CHAN));
3962
3963	tlv = ptr;
3964	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3965	tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
3966				 sizeof(u_int32_t));
3967	ptr += sizeof(*tlv);
3968
3969	channel_list = (__le32 *)ptr;
3970	for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
3971		channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
3972
3973	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
3974		   vdev_id);
3975
3976	return skb;
3977}
3978
3979/* Request FW to stop ongoing PNO operation */
3980static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
3981							     u32 vdev_id)
3982{
3983	struct wmi_tlv_wow_nlo_config_cmd *cmd;
3984	struct wmi_tlv *tlv;
3985	struct sk_buff *skb;
3986	void *ptr;
3987	size_t len;
3988
3989	len = sizeof(*tlv) + sizeof(*cmd) +
3990	      sizeof(*tlv) +
3991	      /* TLV place holder for array of structures
3992	       * nlo_configured_parameters(nlo_list)
3993	       */
3994	      sizeof(*tlv);
3995	      /* TLV place holder for array of uint32 channel_list */
3996	skb = ath10k_wmi_alloc_skb(ar, len);
3997	if (!skb)
3998		return ERR_PTR(-ENOMEM);
3999
4000	ptr = (void *)skb->data;
4001	tlv = ptr;
4002	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
4003	tlv->len = __cpu_to_le16(sizeof(*cmd));
4004	cmd = (void *)tlv->value;
4005
4006	cmd->vdev_id = __cpu_to_le32(vdev_id);
4007	cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
4008
4009	ptr += sizeof(*tlv);
4010	ptr += sizeof(*cmd);
4011
4012	/* nlo_configured_parameters(nlo_list) */
4013	tlv = ptr;
4014	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
4015	tlv->len = __cpu_to_le16(0);
4016
4017	ptr += sizeof(*tlv);
4018
4019	/* channel list */
4020	tlv = ptr;
4021	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
4022	tlv->len = __cpu_to_le16(0);
4023
4024	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
4025	return skb;
4026}
4027
4028static struct sk_buff *
4029ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
4030				 struct wmi_pno_scan_req *pno_scan)
4031{
4032	if (pno_scan->enable)
4033		return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
4034	else
4035		return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
4036}
4037
4038static struct sk_buff *
4039ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
4040{
4041	struct wmi_tlv_adaptive_qcs *cmd;
4042	struct wmi_tlv *tlv;
4043	struct sk_buff *skb;
4044	void *ptr;
4045	size_t len;
4046
4047	len = sizeof(*tlv) + sizeof(*cmd);
4048	skb = ath10k_wmi_alloc_skb(ar, len);
4049	if (!skb)
4050		return ERR_PTR(-ENOMEM);
4051
4052	ptr = (void *)skb->data;
4053	tlv = ptr;
4054	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
4055	tlv->len = __cpu_to_le16(sizeof(*cmd));
4056	cmd = (void *)tlv->value;
4057	cmd->enable = __cpu_to_le32(enable ? 1 : 0);
4058
4059	ptr += sizeof(*tlv);
4060	ptr += sizeof(*cmd);
4061
4062	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
4063	return skb;
4064}
4065
4066static struct sk_buff *
4067ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
4068{
4069	struct wmi_echo_cmd *cmd;
4070	struct wmi_tlv *tlv;
4071	struct sk_buff *skb;
4072	void *ptr;
4073	size_t len;
4074
4075	len = sizeof(*tlv) + sizeof(*cmd);
4076	skb = ath10k_wmi_alloc_skb(ar, len);
4077	if (!skb)
4078		return ERR_PTR(-ENOMEM);
4079
4080	ptr = (void *)skb->data;
4081	tlv = ptr;
4082	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
4083	tlv->len = __cpu_to_le16(sizeof(*cmd));
4084	cmd = (void *)tlv->value;
4085	cmd->value = cpu_to_le32(value);
4086
4087	ptr += sizeof(*tlv);
4088	ptr += sizeof(*cmd);
4089
4090	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
4091	return skb;
4092}
4093
4094static struct sk_buff *
4095ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
4096					 const struct wmi_vdev_spectral_conf_arg *arg)
4097{
4098	struct wmi_vdev_spectral_conf_cmd *cmd;
4099	struct sk_buff *skb;
4100	struct wmi_tlv *tlv;
4101	void *ptr;
4102	size_t len;
4103
4104	len = sizeof(*tlv) + sizeof(*cmd);
4105	skb = ath10k_wmi_alloc_skb(ar, len);
4106	if (!skb)
4107		return ERR_PTR(-ENOMEM);
4108
4109	ptr = (void *)skb->data;
4110	tlv = ptr;
4111	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
4112	tlv->len = __cpu_to_le16(sizeof(*cmd));
4113	cmd = (void *)tlv->value;
4114	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
4115	cmd->scan_count = __cpu_to_le32(arg->scan_count);
4116	cmd->scan_period = __cpu_to_le32(arg->scan_period);
4117	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
4118	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
4119	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
4120	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
4121	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
4122	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
4123	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
4124	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
4125	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
4126	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
4127	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
4128	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
4129	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
4130	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
4131	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
4132	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
4133
4134	return skb;
4135}
4136
4137static struct sk_buff *
4138ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
4139					   u32 trigger, u32 enable)
4140{
4141	struct wmi_vdev_spectral_enable_cmd *cmd;
4142	struct sk_buff *skb;
4143	struct wmi_tlv *tlv;
4144	void *ptr;
4145	size_t len;
4146
4147	len = sizeof(*tlv) + sizeof(*cmd);
4148	skb = ath10k_wmi_alloc_skb(ar, len);
4149	if (!skb)
4150		return ERR_PTR(-ENOMEM);
4151
4152	ptr = (void *)skb->data;
4153	tlv = ptr;
4154	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
4155	tlv->len = __cpu_to_le16(sizeof(*cmd));
4156	cmd = (void *)tlv->value;
4157	cmd->vdev_id = __cpu_to_le32(vdev_id);
4158	cmd->trigger_cmd = __cpu_to_le32(trigger);
4159	cmd->enable_cmd = __cpu_to_le32(enable);
4160
4161	return skb;
4162}
4163
4164/****************/
4165/* TLV mappings */
4166/****************/
4167
4168static struct wmi_cmd_map wmi_tlv_cmd_map = {
4169	.init_cmdid = WMI_TLV_INIT_CMDID,
4170	.start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
4171	.stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
4172	.scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
4173	.scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
4174	.scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
4175	.pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
4176	.pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
4177	.pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
4178	.pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
4179	.pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
4180	.pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
4181	.pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
4182	.pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
4183	.pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
4184	.pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
4185	.pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
4186	.pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
4187	.pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
4188	.vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
4189	.vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
4190	.vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
4191	.vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
4192	.vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
4193	.vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
4194	.vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
4195	.vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
4196	.vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
4197	.peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
4198	.peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
4199	.peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
4200	.peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
4201	.peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
4202	.peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
4203	.peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
4204	.peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
4205	.bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
4206	.pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
4207	.bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
4208	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
4209	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
4210	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
4211	.mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
4212	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
4213	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
4214	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
4215	.addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
4216	.delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
4217	.addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
4218	.send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
4219	.sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
4220	.sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
4221	.sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
4222	.pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
4223	.pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
4224	.roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
4225	.roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
4226	.roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
4227	.roam_scan_rssi_change_threshold =
4228				WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
4229	.roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4230	.ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4231	.ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
4232	.ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
4233	.p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
4234	.p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
4235	.p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
4236	.p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
4237	.p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
4238	.ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
4239	.ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
4240	.peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
4241	.wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
4242	.wlan_profile_set_hist_intvl_cmdid =
4243				WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
4244	.wlan_profile_get_profile_data_cmdid =
4245				WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
4246	.wlan_profile_enable_profile_id_cmdid =
4247				WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
4248	.wlan_profile_list_profile_id_cmdid =
4249				WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
4250	.pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
4251	.pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
4252	.add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
4253	.rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
4254	.wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
4255	.wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
4256	.wow_enable_disable_wake_event_cmdid =
4257				WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
4258	.wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
4259	.wow_hostwakeup_from_sleep_cmdid =
4260				WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
4261	.rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
4262	.rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
4263	.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
4264	.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
4265	.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
4266	.request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
4267	.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
4268	.network_list_offload_config_cmdid =
4269				WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
4270	.gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
4271	.csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
4272	.csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
4273	.chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
4274	.peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
4275	.peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
4276	.sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
4277	.sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
4278	.sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
4279	.echo_cmdid = WMI_TLV_ECHO_CMDID,
4280	.pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
4281	.dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
4282	.pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
4283	.pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
4284	.vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
4285	.vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
4286	.force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
4287	.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
4288	.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
4289	.pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
4290	.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
4291	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
4292	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
4293	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
4294	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
4295	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
4296	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
4297	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
4298	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
4299	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
4300	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
4301	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
4302	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
4303	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
4304	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
4305	.nan_cmdid = WMI_CMD_UNSUPPORTED,
4306	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
4307	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
4308	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
4309	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4310	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4311	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
4312	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
4313	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
4314	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
4315	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
4316	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
4317	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
4318	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
4319	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
4320	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
4321	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4322	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4323	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
4324	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
4325	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
4326};
4327
4328static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
4329	.tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
4330	.rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
4331	.txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
4332	.txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
4333	.txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
4334	.beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
4335	.beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
4336	.resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
4337	.protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
4338	.dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
4339	.non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
4340	.agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
4341	.sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
4342	.ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
4343	.ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
4344	.ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
4345	.ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
4346	.ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
4347	.ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
4348	.ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
4349	.ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
4350	.ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
4351	.ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
4352	.l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
4353	.dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
4354	.pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
4355	.pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4356	.pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4357	.pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
4358	.pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
4359	.vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
4360	.peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
4361	.bcnflt_stats_update_period =
4362				WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
4363	.pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
4364	.arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
4365	.dcs = WMI_TLV_PDEV_PARAM_DCS,
4366	.ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
4367	.ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
4368	.ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
4369	.ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
4370	.ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
4371	.dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
4372	.proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
4373	.idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
4374	.power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
4375	.fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
4376	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
4377	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
4378	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
4379	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
4380	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4381	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
4382	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
4383	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4384	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
4385	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
4386	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4387	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4388	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4389	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4390	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4391	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4392	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
4393	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
4394	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4395	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4396	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4397	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4398	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4399	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4400	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
4401	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
4402	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
4403	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4404	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4405	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
4406	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
4407	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
4408	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
4409	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
4410	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
4411	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
4412	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
4413	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
4414	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
4415	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4416	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
4417	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
4418	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
4419	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4420	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4421	.rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
4422	.rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
4423	.peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
4424};
4425
4426static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
4427	.smps_state = WMI_TLV_PEER_SMPS_STATE,
4428	.ampdu = WMI_TLV_PEER_AMPDU,
4429	.authorize = WMI_TLV_PEER_AUTHORIZE,
4430	.chan_width = WMI_TLV_PEER_CHAN_WIDTH,
4431	.nss = WMI_TLV_PEER_NSS,
4432	.use_4addr = WMI_TLV_PEER_USE_4ADDR,
4433	.membership = WMI_TLV_PEER_MEMBERSHIP,
4434	.user_pos = WMI_TLV_PEER_USERPOS,
4435	.crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
4436	.tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
4437	.set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
4438	.ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
4439	.phymode = WMI_TLV_PEER_PHYMODE,
4440	.use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
4441	.dummy_var = WMI_TLV_PEER_DUMMY_VAR,
4442};
4443
4444static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
4445	.rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
4446	.fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
4447	.beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
4448	.listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
4449	.multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
4450	.mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
4451	.slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
4452	.preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
4453	.swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
4454	.wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
4455	.wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
4456	.wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
4457	.dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
4458	.wmi_vdev_oc_scheduler_air_time_limit =
4459				WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
4460	.wds = WMI_TLV_VDEV_PARAM_WDS,
4461	.atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
4462	.bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
4463	.bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
4464	.bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
4465	.feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
4466	.chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
4467	.chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
4468	.disable_htprotection =	WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
4469	.sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
4470	.mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
4471	.protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
4472	.fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
4473	.sgi = WMI_TLV_VDEV_PARAM_SGI,
4474	.ldpc = WMI_TLV_VDEV_PARAM_LDPC,
4475	.tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
4476	.rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
4477	.intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
4478	.def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
4479	.nss = WMI_TLV_VDEV_PARAM_NSS,
4480	.bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
4481	.mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
4482	.mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
4483	.dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
4484	.unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
4485	.ap_keepalive_min_idle_inactive_time_secs =
4486		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
4487	.ap_keepalive_max_idle_inactive_time_secs =
4488		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
4489	.ap_keepalive_max_unresponsive_time_secs =
4490		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
4491	.ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
4492	.mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4493	.enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
4494	.txbf = WMI_TLV_VDEV_PARAM_TXBF,
4495	.packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
4496	.drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
4497	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
4498	.ap_detect_out_of_sync_sleeping_sta_time_secs =
4499					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4500	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
4501	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
4502	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
4503	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
4504	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
4505	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4506	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
4507	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
4508	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
4509	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
4510	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
4511	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
4512	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
4513	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
4514	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
4515	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4516};
4517
4518static const struct wmi_ops wmi_tlv_ops = {
4519	.rx = ath10k_wmi_tlv_op_rx,
4520	.map_svc = wmi_tlv_svc_map,
4521	.map_svc_ext = wmi_tlv_svc_map_ext,
4522
4523	.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
4524	.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
4525	.pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
4526	.pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev,
4527	.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
4528	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
4529	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
4530	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
4531	.pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
4532	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
4533	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
4534	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
4535	.pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
4536	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
4537	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
4538	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
4539	.pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
4540	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
4541
4542	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
4543	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
4544	.gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
4545	.gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
4546	.gen_init = ath10k_wmi_tlv_op_gen_init,
4547	.gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
4548	.gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
4549	.gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
4550	.gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
4551	.gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
4552	.gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
4553	.gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
4554	.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
4555	.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
4556	.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
4557	.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
4558	.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
4559	.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
4560	.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
4561	.gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
4562	.gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
4563	.gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
4564	.gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
4565	.gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
4566	.gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
4567	.gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
4568	.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
4569	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
4570	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
4571	.gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
4572	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
4573	/* .gen_mgmt_tx = not implemented; HTT is used */
4574	.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
4575	.cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
4576	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
4577	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
4578	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
4579	.gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode,
4580	.gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature,
4581	/* .gen_addba_clear_resp not implemented */
4582	/* .gen_addba_send not implemented */
4583	/* .gen_addba_set_resp not implemented */
4584	/* .gen_delba_send not implemented */
4585	.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
4586	.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
4587	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
4588	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
4589	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
4590	.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
4591	.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
4592	.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
4593	.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
4594	.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
4595	.gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
4596	.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
4597	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
4598	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
4599	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
4600	.get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype,
4601	.gen_echo = ath10k_wmi_tlv_op_gen_echo,
4602	.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
4603	.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
 
 
4604};
4605
4606static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
4607	.auth = WMI_TLV_PEER_AUTH,
4608	.qos = WMI_TLV_PEER_QOS,
4609	.need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
4610	.need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
4611	.apsd = WMI_TLV_PEER_APSD,
4612	.ht = WMI_TLV_PEER_HT,
4613	.bw40 = WMI_TLV_PEER_40MHZ,
4614	.stbc = WMI_TLV_PEER_STBC,
4615	.ldbc = WMI_TLV_PEER_LDPC,
4616	.dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
4617	.static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
4618	.spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
4619	.vht = WMI_TLV_PEER_VHT,
4620	.bw80 = WMI_TLV_PEER_80MHZ,
4621	.pmf = WMI_TLV_PEER_PMF,
4622	.bw160 = WMI_TLV_PEER_160MHZ,
4623};
4624
4625/************/
4626/* TLV init */
4627/************/
4628
4629void ath10k_wmi_tlv_attach(struct ath10k *ar)
4630{
4631	ar->wmi.cmd = &wmi_tlv_cmd_map;
4632	ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
4633	ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
4634	ar->wmi.peer_param = &wmi_tlv_peer_param_map;
4635	ar->wmi.ops = &wmi_tlv_ops;
4636	ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
4637}
v6.13.7
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
   6 * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
   7 */
   8#include "core.h"
   9#include "debug.h"
  10#include "mac.h"
  11#include "hw.h"
  12#include "wmi.h"
  13#include "wmi-ops.h"
  14#include "wmi-tlv.h"
  15#include "p2p.h"
  16#include "testmode.h"
  17#include <linux/bitfield.h>
  18
  19/***************/
  20/* TLV helpers */
  21/**************/
  22
  23struct wmi_tlv_policy {
  24	size_t min_len;
  25};
  26
  27static const struct wmi_tlv_policy wmi_tlv_policies[] = {
  28	[WMI_TLV_TAG_ARRAY_BYTE]
  29		= { .min_len = 0 },
  30	[WMI_TLV_TAG_ARRAY_UINT32]
  31		= { .min_len = 0 },
  32	[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
  33		= { .min_len = sizeof(struct wmi_scan_event) },
  34	[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
  35		= { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
  36	[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
  37		= { .min_len = sizeof(struct wmi_chan_info_event) },
  38	[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
  39		= { .min_len = sizeof(struct wmi_vdev_start_response_event) },
  40	[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
  41		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
  42	[WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
  43		= { .min_len = sizeof(struct wmi_host_swba_event) },
  44	[WMI_TLV_TAG_STRUCT_TIM_INFO]
  45		= { .min_len = sizeof(struct wmi_tim_info) },
  46	[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
  47		= { .min_len = sizeof(struct wmi_p2p_noa_info) },
  48	[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
  49		= { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
  50	[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
  51		= { .min_len = sizeof(struct hal_reg_capabilities) },
  52	[WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
  53		= { .min_len = sizeof(struct wlan_host_mem_req) },
  54	[WMI_TLV_TAG_STRUCT_READY_EVENT]
  55		= { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
  56	[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
  57		= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
  58	[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
  59		= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
  60	[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
  61		= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
  62	[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
  63		= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
  64	[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
  65		= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
  66	[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
  67		= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
  68};
  69
  70static int
  71ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
  72		    int (*iter)(struct ath10k *ar, u16 tag, u16 len,
  73				const void *ptr, void *data),
  74		    void *data)
  75{
  76	const void *begin = ptr;
  77	const struct wmi_tlv *tlv;
  78	u16 tlv_tag, tlv_len;
  79	int ret;
  80
  81	while (len > 0) {
  82		if (len < sizeof(*tlv)) {
  83			ath10k_dbg(ar, ATH10K_DBG_WMI,
  84				   "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
  85				   ptr - begin, len, sizeof(*tlv));
  86			return -EINVAL;
  87		}
  88
  89		tlv = ptr;
  90		tlv_tag = __le16_to_cpu(tlv->tag);
  91		tlv_len = __le16_to_cpu(tlv->len);
  92		ptr += sizeof(*tlv);
  93		len -= sizeof(*tlv);
  94
  95		if (tlv_len > len) {
  96			ath10k_dbg(ar, ATH10K_DBG_WMI,
  97				   "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
  98				   tlv_tag, ptr - begin, len, tlv_len);
  99			return -EINVAL;
 100		}
 101
 102		if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
 103		    wmi_tlv_policies[tlv_tag].min_len &&
 104		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
 105			ath10k_dbg(ar, ATH10K_DBG_WMI,
 106				   "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
 107				   tlv_tag, ptr - begin, tlv_len,
 108				   wmi_tlv_policies[tlv_tag].min_len);
 109			return -EINVAL;
 110		}
 111
 112		ret = iter(ar, tlv_tag, tlv_len, ptr, data);
 113		if (ret)
 114			return ret;
 115
 116		ptr += tlv_len;
 117		len -= tlv_len;
 118	}
 119
 120	return 0;
 121}
 122
 123static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
 124				     const void *ptr, void *data)
 125{
 126	const void **tb = data;
 127
 128	if (tag < WMI_TLV_TAG_MAX)
 129		tb[tag] = ptr;
 130
 131	return 0;
 132}
 133
 134static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
 135				const void *ptr, size_t len)
 136{
 137	return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
 138				   (void *)tb);
 139}
 140
 141static const void **
 142ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
 143			   size_t len, gfp_t gfp)
 144{
 145	const void **tb;
 146	int ret;
 147
 148	tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
 149	if (!tb)
 150		return ERR_PTR(-ENOMEM);
 151
 152	ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
 153	if (ret) {
 154		kfree(tb);
 155		return ERR_PTR(ret);
 156	}
 157
 158	return tb;
 159}
 160
 161static u16 ath10k_wmi_tlv_len(const void *ptr)
 162{
 163	return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
 164}
 165
 166/**************/
 167/* TLV events */
 168/**************/
 169static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
 170					      struct sk_buff *skb)
 171{
 172	const void **tb;
 173	const struct wmi_tlv_bcn_tx_status_ev *ev;
 174	struct ath10k_vif *arvif;
 175	u32 vdev_id, tx_status;
 176	int ret;
 177
 178	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 179	if (IS_ERR(tb)) {
 180		ret = PTR_ERR(tb);
 181		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 182		return ret;
 183	}
 184
 185	ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
 186	if (!ev) {
 187		kfree(tb);
 188		return -EPROTO;
 189	}
 190
 191	tx_status = __le32_to_cpu(ev->tx_status);
 192	vdev_id = __le32_to_cpu(ev->vdev_id);
 193
 194	switch (tx_status) {
 195	case WMI_TLV_BCN_TX_STATUS_OK:
 196		break;
 197	case WMI_TLV_BCN_TX_STATUS_XRETRY:
 198	case WMI_TLV_BCN_TX_STATUS_DROP:
 199	case WMI_TLV_BCN_TX_STATUS_FILTERED:
 200		/* FIXME: It's probably worth telling mac80211 to stop the
 201		 * interface as it is crippled.
 202		 */
 203		ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
 204			    vdev_id, tx_status);
 205		break;
 206	}
 207
 208	arvif = ath10k_get_arvif(ar, vdev_id);
 209	if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
 210		ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
 211
 212	kfree(tb);
 213	return 0;
 214}
 215
 216static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
 217						  struct sk_buff *skb)
 218{
 219	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
 220	complete(&ar->vdev_delete_done);
 221}
 222
 223static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
 224						const void *ptr, void *data)
 225{
 226	const struct wmi_tlv_peer_stats_info *stat = ptr;
 227	struct ieee80211_sta *sta;
 228	struct ath10k_sta *arsta;
 229
 230	if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
 231		return -EPROTO;
 232
 233	ath10k_dbg(ar, ATH10K_DBG_WMI,
 234		   "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
 235		   stat->peer_macaddr.addr,
 236		   __le32_to_cpu(stat->last_rx_rate_code),
 237		   __le32_to_cpu(stat->last_rx_bitrate_kbps));
 238
 239	ath10k_dbg(ar, ATH10K_DBG_WMI,
 240		   "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
 241		   __le32_to_cpu(stat->last_tx_rate_code),
 242		   __le32_to_cpu(stat->last_tx_bitrate_kbps));
 243
 244	rcu_read_lock();
 245	sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
 246	if (!sta) {
 247		rcu_read_unlock();
 248		ath10k_warn(ar, "not found station for peer stats\n");
 249		return -EINVAL;
 250	}
 251
 252	arsta = (struct ath10k_sta *)sta->drv_priv;
 253	arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
 254	arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
 255	arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
 256	arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
 257	rcu_read_unlock();
 258
 259	return 0;
 260}
 261
 262static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
 263						  struct sk_buff *skb)
 264{
 265	const void **tb;
 266	const struct wmi_tlv_peer_stats_info_ev *ev;
 267	const void *data;
 268	u32 num_peer_stats;
 269	int ret;
 270
 271	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 272	if (IS_ERR(tb)) {
 273		ret = PTR_ERR(tb);
 274		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 275		return ret;
 276	}
 277
 278	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
 279	data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
 280
 281	if (!ev || !data) {
 282		kfree(tb);
 283		return -EPROTO;
 284	}
 285
 286	num_peer_stats = __le32_to_cpu(ev->num_peers);
 287
 288	ath10k_dbg(ar, ATH10K_DBG_WMI,
 289		   "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
 290		   __le32_to_cpu(ev->vdev_id),
 291		   num_peer_stats,
 292		   __le32_to_cpu(ev->more_data));
 293
 294	ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
 295				  ath10k_wmi_tlv_parse_peer_stats_info, NULL);
 296	if (ret)
 297		ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
 298
 299	kfree(tb);
 300	return 0;
 301}
 302
 303static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
 304						 struct sk_buff *skb)
 305{
 306	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
 307	ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
 308	complete(&ar->peer_stats_info_complete);
 309}
 310
 311static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
 312					  struct sk_buff *skb)
 313{
 314	const void **tb;
 315	const struct wmi_tlv_diag_data_ev *ev;
 316	const struct wmi_tlv_diag_item *item;
 317	const void *data;
 318	int ret, num_items, len;
 319
 320	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 321	if (IS_ERR(tb)) {
 322		ret = PTR_ERR(tb);
 323		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 324		return ret;
 325	}
 326
 327	ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
 328	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
 329	if (!ev || !data) {
 330		kfree(tb);
 331		return -EPROTO;
 332	}
 333
 334	num_items = __le32_to_cpu(ev->num_items);
 335	len = ath10k_wmi_tlv_len(data);
 336
 337	while (num_items--) {
 338		if (len == 0)
 339			break;
 340		if (len < sizeof(*item)) {
 341			ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
 342			break;
 343		}
 344
 345		item = data;
 346
 347		if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
 348			ath10k_warn(ar, "failed to parse diag data: item is too long\n");
 349			break;
 350		}
 351
 352		trace_ath10k_wmi_diag_container(ar,
 353						item->type,
 354						__le32_to_cpu(item->timestamp),
 355						__le32_to_cpu(item->code),
 356						__le16_to_cpu(item->len),
 357						item->payload);
 358
 359		len -= sizeof(*item);
 360		len -= roundup(__le16_to_cpu(item->len), 4);
 361
 362		data += sizeof(*item);
 363		data += roundup(__le16_to_cpu(item->len), 4);
 364	}
 365
 366	if (num_items != -1 || len != 0)
 367		ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
 368			    num_items, len);
 369
 370	kfree(tb);
 371	return 0;
 372}
 373
 374static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
 375				     struct sk_buff *skb)
 376{
 377	const void **tb;
 378	const void *data;
 379	int ret, len;
 380
 381	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 382	if (IS_ERR(tb)) {
 383		ret = PTR_ERR(tb);
 384		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 385		return ret;
 386	}
 387
 388	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
 389	if (!data) {
 390		kfree(tb);
 391		return -EPROTO;
 392	}
 393	len = ath10k_wmi_tlv_len(data);
 394
 395	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
 396	trace_ath10k_wmi_diag(ar, data, len);
 397
 398	kfree(tb);
 399	return 0;
 400}
 401
 402static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
 403					struct sk_buff *skb)
 404{
 405	const void **tb;
 406	const struct wmi_tlv_p2p_noa_ev *ev;
 407	const struct wmi_p2p_noa_info *noa;
 408	int ret, vdev_id;
 409
 410	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 411	if (IS_ERR(tb)) {
 412		ret = PTR_ERR(tb);
 413		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 414		return ret;
 415	}
 416
 417	ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
 418	noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
 419
 420	if (!ev || !noa) {
 421		kfree(tb);
 422		return -EPROTO;
 423	}
 424
 425	vdev_id = __le32_to_cpu(ev->vdev_id);
 426
 427	ath10k_dbg(ar, ATH10K_DBG_WMI,
 428		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
 429		   vdev_id, noa->num_descriptors);
 430
 431	ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
 432	kfree(tb);
 433	return 0;
 434}
 435
 436static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
 437					 struct sk_buff *skb)
 438{
 439	const void **tb;
 440	const struct wmi_tlv_tx_pause_ev *ev;
 441	int ret, vdev_id;
 442	u32 pause_id, action, vdev_map, peer_id, tid_map;
 443
 444	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 445	if (IS_ERR(tb)) {
 446		ret = PTR_ERR(tb);
 447		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 448		return ret;
 449	}
 450
 451	ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
 452	if (!ev) {
 453		kfree(tb);
 454		return -EPROTO;
 455	}
 456
 457	pause_id = __le32_to_cpu(ev->pause_id);
 458	action = __le32_to_cpu(ev->action);
 459	vdev_map = __le32_to_cpu(ev->vdev_map);
 460	peer_id = __le32_to_cpu(ev->peer_id);
 461	tid_map = __le32_to_cpu(ev->tid_map);
 462
 463	ath10k_dbg(ar, ATH10K_DBG_WMI,
 464		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
 465		   pause_id, action, vdev_map, peer_id, tid_map);
 466
 467	switch (pause_id) {
 468	case WMI_TLV_TX_PAUSE_ID_MCC:
 469	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
 470	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
 471	case WMI_TLV_TX_PAUSE_ID_AP_PS:
 472	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
 473		for (vdev_id = 0; vdev_map; vdev_id++) {
 474			if (!(vdev_map & BIT(vdev_id)))
 475				continue;
 476
 477			vdev_map &= ~BIT(vdev_id);
 478			ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
 479							action);
 480		}
 481		break;
 482	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
 483	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
 484	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
 485	case WMI_TLV_TX_PAUSE_ID_HOST:
 486		ath10k_dbg(ar, ATH10K_DBG_MAC,
 487			   "mac ignoring unsupported tx pause id %d\n",
 488			   pause_id);
 489		break;
 490	default:
 491		ath10k_dbg(ar, ATH10K_DBG_MAC,
 492			   "mac ignoring unknown tx pause vdev %d\n",
 493			   pause_id);
 494		break;
 495	}
 496
 497	kfree(tb);
 498	return 0;
 499}
 500
 501static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
 502						     struct sk_buff *skb)
 503{
 504	const struct wmi_tlv_rfkill_state_change_ev *ev;
 505	const void **tb;
 506	bool radio;
 507	int ret;
 508
 509	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 510	if (IS_ERR(tb)) {
 511		ret = PTR_ERR(tb);
 512		ath10k_warn(ar,
 513			    "failed to parse rfkill state change event: %d\n",
 514			    ret);
 515		return;
 516	}
 517
 518	ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
 519	if (!ev) {
 520		kfree(tb);
 521		return;
 522	}
 523
 524	ath10k_dbg(ar, ATH10K_DBG_MAC,
 525		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
 526		   __le32_to_cpu(ev->gpio_pin_num),
 527		   __le32_to_cpu(ev->int_type),
 528		   __le32_to_cpu(ev->radio_state));
 529
 530	radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
 531
 532	spin_lock_bh(&ar->data_lock);
 533
 534	if (!radio)
 535		ar->hw_rfkill_on = true;
 536
 537	spin_unlock_bh(&ar->data_lock);
 538
 539	/* notify cfg80211 radio state change */
 540	ath10k_mac_rfkill_enable_radio(ar, radio);
 541	wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
 542}
 543
 544static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
 545					    struct sk_buff *skb)
 546{
 547	const struct wmi_tlv_pdev_temperature_event *ev;
 548
 549	ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
 550	if (WARN_ON(skb->len < sizeof(*ev)))
 551		return -EPROTO;
 552
 553	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
 554	return 0;
 555}
 556
 557static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
 558{
 559	struct ieee80211_sta *station;
 560	const struct wmi_tlv_tdls_peer_event *ev;
 561	const void **tb;
 562	struct ath10k_vif *arvif;
 563
 564	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 565	if (IS_ERR(tb)) {
 566		ath10k_warn(ar, "tdls peer failed to parse tlv");
 567		return;
 568	}
 569	ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
 570	if (!ev) {
 571		kfree(tb);
 572		ath10k_warn(ar, "tdls peer NULL event");
 573		return;
 574	}
 575
 576	switch (__le32_to_cpu(ev->peer_reason)) {
 577	case WMI_TDLS_TEARDOWN_REASON_TX:
 578	case WMI_TDLS_TEARDOWN_REASON_RSSI:
 579	case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
 580		rcu_read_lock();
 581		station = ieee80211_find_sta_by_ifaddr(ar->hw,
 582						       ev->peer_macaddr.addr,
 583						       NULL);
 584		if (!station) {
 585			ath10k_warn(ar, "did not find station from tdls peer event");
 586			goto exit;
 587		}
 588
 589		arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
 590		if (!arvif) {
 591			ath10k_warn(ar, "no vif for vdev_id %d found",
 592				    __le32_to_cpu(ev->vdev_id));
 593			goto exit;
 594		}
 595
 596		ieee80211_tdls_oper_request(
 597					arvif->vif, station->addr,
 598					NL80211_TDLS_TEARDOWN,
 599					WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
 600					GFP_ATOMIC
 601					);
 602		break;
 603	default:
 604		kfree(tb);
 605		return;
 606	}
 607
 608exit:
 609	rcu_read_unlock();
 610	kfree(tb);
 611}
 612
 613static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
 614						 struct sk_buff *skb)
 615{
 616	struct wmi_peer_delete_resp_ev_arg *arg;
 617	struct wmi_tlv *tlv_hdr;
 618
 619	tlv_hdr = (struct wmi_tlv *)skb->data;
 620	arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value;
 621
 622	ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id);
 623	ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr);
 624	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n");
 625
 626	complete(&ar->peer_delete_done);
 627
 628	return 0;
 629}
 630
 631/***********/
 632/* TLV ops */
 633/***********/
 634
 635static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
 636{
 637	struct wmi_cmd_hdr *cmd_hdr;
 638	enum wmi_tlv_event_id id;
 639	bool consumed;
 640
 641	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 642	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 643
 644	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
 645		goto out;
 646
 647	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 648
 649	consumed = ath10k_tm_event_wmi(ar, id, skb);
 650
 651	/* Ready event must be handled normally also in UTF mode so that we
 652	 * know the UTF firmware has booted, others we are just bypass WMI
 653	 * events to testmode.
 654	 */
 655	if (consumed && id != WMI_TLV_READY_EVENTID) {
 656		ath10k_dbg(ar, ATH10K_DBG_WMI,
 657			   "wmi tlv testmode consumed 0x%x\n", id);
 658		goto out;
 659	}
 660
 661	switch (id) {
 662	case WMI_TLV_MGMT_RX_EVENTID:
 663		ath10k_wmi_event_mgmt_rx(ar, skb);
 664		/* mgmt_rx() owns the skb now! */
 665		return;
 666	case WMI_TLV_SCAN_EVENTID:
 667		ath10k_wmi_event_scan(ar, skb);
 668		break;
 669	case WMI_TLV_CHAN_INFO_EVENTID:
 670		ath10k_wmi_event_chan_info(ar, skb);
 671		break;
 672	case WMI_TLV_ECHO_EVENTID:
 673		ath10k_wmi_event_echo(ar, skb);
 674		break;
 675	case WMI_TLV_DEBUG_MESG_EVENTID:
 676		ath10k_wmi_event_debug_mesg(ar, skb);
 677		break;
 678	case WMI_TLV_UPDATE_STATS_EVENTID:
 679		ath10k_wmi_event_update_stats(ar, skb);
 680		break;
 681	case WMI_TLV_PEER_STATS_INFO_EVENTID:
 682		ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
 683		break;
 684	case WMI_TLV_VDEV_START_RESP_EVENTID:
 685		ath10k_wmi_event_vdev_start_resp(ar, skb);
 686		break;
 687	case WMI_TLV_VDEV_STOPPED_EVENTID:
 688		ath10k_wmi_event_vdev_stopped(ar, skb);
 689		break;
 690	case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
 691		ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
 692		break;
 693	case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
 694		ath10k_wmi_event_peer_sta_kickout(ar, skb);
 695		break;
 696	case WMI_TLV_HOST_SWBA_EVENTID:
 697		ath10k_wmi_event_host_swba(ar, skb);
 698		break;
 699	case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
 700		ath10k_wmi_event_tbttoffset_update(ar, skb);
 701		break;
 702	case WMI_TLV_PHYERR_EVENTID:
 703		ath10k_wmi_event_phyerr(ar, skb);
 704		break;
 705	case WMI_TLV_ROAM_EVENTID:
 706		ath10k_wmi_event_roam(ar, skb);
 707		break;
 708	case WMI_TLV_PROFILE_MATCH:
 709		ath10k_wmi_event_profile_match(ar, skb);
 710		break;
 711	case WMI_TLV_DEBUG_PRINT_EVENTID:
 712		ath10k_wmi_event_debug_print(ar, skb);
 713		break;
 714	case WMI_TLV_PDEV_QVIT_EVENTID:
 715		ath10k_wmi_event_pdev_qvit(ar, skb);
 716		break;
 717	case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
 718		ath10k_wmi_event_wlan_profile_data(ar, skb);
 719		break;
 720	case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
 721		ath10k_wmi_event_rtt_measurement_report(ar, skb);
 722		break;
 723	case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
 724		ath10k_wmi_event_tsf_measurement_report(ar, skb);
 725		break;
 726	case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
 727		ath10k_wmi_event_rtt_error_report(ar, skb);
 728		break;
 729	case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
 730		ath10k_wmi_event_wow_wakeup_host(ar, skb);
 731		break;
 732	case WMI_TLV_DCS_INTERFERENCE_EVENTID:
 733		ath10k_wmi_event_dcs_interference(ar, skb);
 734		break;
 735	case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
 736		ath10k_wmi_event_pdev_tpc_config(ar, skb);
 737		break;
 738	case WMI_TLV_PDEV_FTM_INTG_EVENTID:
 739		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
 740		break;
 741	case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
 742		ath10k_wmi_event_gtk_offload_status(ar, skb);
 743		break;
 744	case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
 745		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
 746		break;
 747	case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
 748		ath10k_wmi_event_delba_complete(ar, skb);
 749		break;
 750	case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
 751		ath10k_wmi_event_addba_complete(ar, skb);
 752		break;
 753	case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
 754		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
 755		break;
 756	case WMI_TLV_SERVICE_READY_EVENTID:
 757		ath10k_wmi_event_service_ready(ar, skb);
 758		return;
 759	case WMI_TLV_READY_EVENTID:
 760		ath10k_wmi_event_ready(ar, skb);
 761		break;
 762	case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
 763		ath10k_wmi_event_service_available(ar, skb);
 764		break;
 765	case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
 766		ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
 767		break;
 768	case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
 769		ath10k_wmi_tlv_event_diag_data(ar, skb);
 770		break;
 771	case WMI_TLV_DIAG_EVENTID:
 772		ath10k_wmi_tlv_event_diag(ar, skb);
 773		break;
 774	case WMI_TLV_P2P_NOA_EVENTID:
 775		ath10k_wmi_tlv_event_p2p_noa(ar, skb);
 776		break;
 777	case WMI_TLV_TX_PAUSE_EVENTID:
 778		ath10k_wmi_tlv_event_tx_pause(ar, skb);
 779		break;
 780	case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
 781		ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
 782		break;
 783	case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
 784		ath10k_wmi_tlv_event_temperature(ar, skb);
 785		break;
 786	case WMI_TLV_TDLS_PEER_EVENTID:
 787		ath10k_wmi_event_tdls_peer(ar, skb);
 788		break;
 789	case WMI_TLV_PEER_DELETE_RESP_EVENTID:
 790		ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
 791		break;
 792	case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
 793		ath10k_wmi_event_mgmt_tx_compl(ar, skb);
 794		break;
 795	case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID:
 796		ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
 797		break;
 798	default:
 799		ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
 800		break;
 801	}
 802
 803out:
 804	dev_kfree_skb(skb);
 805}
 806
 807static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
 808					  struct sk_buff *skb,
 809					  struct wmi_scan_ev_arg *arg)
 810{
 811	const void **tb;
 812	const struct wmi_scan_event *ev;
 813	int ret;
 814
 815	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 816	if (IS_ERR(tb)) {
 817		ret = PTR_ERR(tb);
 818		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 819		return ret;
 820	}
 821
 822	ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
 823	if (!ev) {
 824		kfree(tb);
 825		return -EPROTO;
 826	}
 827
 828	arg->event_type = ev->event_type;
 829	arg->reason = ev->reason;
 830	arg->channel_freq = ev->channel_freq;
 831	arg->scan_req_id = ev->scan_req_id;
 832	arg->scan_id = ev->scan_id;
 833	arg->vdev_id = ev->vdev_id;
 834
 835	kfree(tb);
 836	return 0;
 837}
 838
 839static int
 840ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
 841					struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
 842{
 843	const void **tb;
 844	const struct wmi_tlv_mgmt_tx_compl_ev *ev;
 845	int ret;
 846
 847	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 848	if (IS_ERR(tb)) {
 849		ret = PTR_ERR(tb);
 850		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 851		return ret;
 852	}
 853
 854	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
 855	if (!ev) {
 856		kfree(tb);
 857		return -EPROTO;
 858	}
 859
 860	arg->desc_id = ev->desc_id;
 861	arg->status = ev->status;
 862	arg->pdev_id = ev->pdev_id;
 863	arg->ppdu_id = ev->ppdu_id;
 864
 865	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
 866		arg->ack_rssi = ev->ack_rssi;
 867
 868	kfree(tb);
 869	return 0;
 870}
 871
 872struct wmi_tlv_tx_bundle_compl_parse {
 873	const __le32 *num_reports;
 874	const __le32 *desc_ids;
 875	const __le32 *status;
 876	const __le32 *ppdu_ids;
 877	const __le32 *ack_rssi;
 878	bool desc_ids_done;
 879	bool status_done;
 880	bool ppdu_ids_done;
 881	bool ack_rssi_done;
 882};
 883
 884static int
 885ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
 886					  const void *ptr, void *data)
 887{
 888	struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data;
 889
 890	switch (tag) {
 891	case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT:
 892		bundle_tx_compl->num_reports = ptr;
 893		break;
 894	case WMI_TLV_TAG_ARRAY_UINT32:
 895		if (!bundle_tx_compl->desc_ids_done) {
 896			bundle_tx_compl->desc_ids_done = true;
 897			bundle_tx_compl->desc_ids = ptr;
 898		} else if (!bundle_tx_compl->status_done) {
 899			bundle_tx_compl->status_done = true;
 900			bundle_tx_compl->status = ptr;
 901		} else if (!bundle_tx_compl->ppdu_ids_done) {
 902			bundle_tx_compl->ppdu_ids_done = true;
 903			bundle_tx_compl->ppdu_ids = ptr;
 904		} else if (!bundle_tx_compl->ack_rssi_done) {
 905			bundle_tx_compl->ack_rssi_done = true;
 906			bundle_tx_compl->ack_rssi = ptr;
 907		}
 908		break;
 909	default:
 910		break;
 911	}
 912	return 0;
 913}
 914
 915static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
 916				struct ath10k *ar, struct sk_buff *skb,
 917				struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
 918{
 919	struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { };
 920	int ret;
 921
 922	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
 923				  ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse,
 924				  &bundle_tx_compl);
 925	if (ret) {
 926		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 927		return ret;
 928	}
 929
 930	if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids ||
 931	    !bundle_tx_compl.status)
 932		return -EPROTO;
 933
 934	arg->num_reports = *bundle_tx_compl.num_reports;
 935	arg->desc_ids = bundle_tx_compl.desc_ids;
 936	arg->status = bundle_tx_compl.status;
 937	arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
 938
 939	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
 940		arg->ack_rssi = bundle_tx_compl.ack_rssi;
 941
 942	return 0;
 943}
 944
 945static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
 946					     struct sk_buff *skb,
 947					     struct wmi_mgmt_rx_ev_arg *arg)
 948{
 949	const void **tb;
 950	const struct wmi_tlv_mgmt_rx_ev *ev;
 951	const u8 *frame;
 952	u32 msdu_len;
 953	int ret, i;
 954
 955	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
 956	if (IS_ERR(tb)) {
 957		ret = PTR_ERR(tb);
 958		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 959		return ret;
 960	}
 961
 962	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
 963	frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
 964
 965	if (!ev || !frame) {
 966		kfree(tb);
 967		return -EPROTO;
 968	}
 969
 970	arg->channel = ev->channel;
 971	arg->buf_len = ev->buf_len;
 972	arg->status = ev->status;
 973	arg->snr = ev->snr;
 974	arg->phy_mode = ev->phy_mode;
 975	arg->rate = ev->rate;
 976
 977	for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
 978		arg->rssi[i] = ev->rssi[i];
 979
 980	msdu_len = __le32_to_cpu(arg->buf_len);
 981
 982	if (skb->len < (frame - skb->data) + msdu_len) {
 983		kfree(tb);
 984		return -EPROTO;
 985	}
 986
 987	/* shift the sk_buff to point to `frame` */
 988	skb_trim(skb, 0);
 989	skb_put(skb, frame - skb->data);
 990	skb_pull(skb, frame - skb->data);
 991	skb_put(skb, msdu_len);
 992
 993	kfree(tb);
 994	return 0;
 995}
 996
 997static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
 998					     struct sk_buff *skb,
 999					     struct wmi_ch_info_ev_arg *arg)
1000{
1001	const void **tb;
1002	const struct wmi_tlv_chan_info_event *ev;
1003	int ret;
1004
1005	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1006	if (IS_ERR(tb)) {
1007		ret = PTR_ERR(tb);
1008		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1009		return ret;
1010	}
1011
1012	ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
1013	if (!ev) {
1014		kfree(tb);
1015		return -EPROTO;
1016	}
1017
1018	arg->err_code = ev->err_code;
1019	arg->freq = ev->freq;
1020	arg->cmd_flags = ev->cmd_flags;
1021	arg->noise_floor = ev->noise_floor;
1022	arg->rx_clear_count = ev->rx_clear_count;
1023	arg->cycle_count = ev->cycle_count;
1024	if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
1025		     ar->running_fw->fw_file.fw_features))
1026		arg->mac_clk_mhz = ev->mac_clk_mhz;
1027
1028	kfree(tb);
1029	return 0;
1030}
1031
1032static int
1033ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
1034				     struct wmi_vdev_start_ev_arg *arg)
1035{
1036	const void **tb;
1037	const struct wmi_vdev_start_response_event *ev;
1038	int ret;
1039
1040	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1041	if (IS_ERR(tb)) {
1042		ret = PTR_ERR(tb);
1043		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1044		return ret;
1045	}
1046
1047	ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
1048	if (!ev) {
1049		kfree(tb);
1050		return -EPROTO;
1051	}
1052
1053	skb_pull(skb, sizeof(*ev));
1054	arg->vdev_id = ev->vdev_id;
1055	arg->req_id = ev->req_id;
1056	arg->resp_type = ev->resp_type;
1057	arg->status = ev->status;
1058
1059	kfree(tb);
1060	return 0;
1061}
1062
1063static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
1064					       struct sk_buff *skb,
1065					       struct wmi_peer_kick_ev_arg *arg)
1066{
1067	const void **tb;
1068	const struct wmi_peer_sta_kickout_event *ev;
1069	int ret;
1070
1071	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1072	if (IS_ERR(tb)) {
1073		ret = PTR_ERR(tb);
1074		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1075		return ret;
1076	}
1077
1078	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
1079	if (!ev) {
1080		kfree(tb);
1081		return -EPROTO;
1082	}
1083
1084	arg->mac_addr = ev->peer_macaddr.addr;
1085
1086	kfree(tb);
1087	return 0;
1088}
1089
1090struct wmi_tlv_swba_parse {
1091	const struct wmi_host_swba_event *ev;
1092	bool tim_done;
1093	bool noa_done;
1094	size_t n_tim;
1095	size_t n_noa;
1096	struct wmi_swba_ev_arg *arg;
1097};
1098
1099static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
1100					 const void *ptr, void *data)
1101{
1102	struct wmi_tlv_swba_parse *swba = data;
1103	struct wmi_tim_info_arg *tim_info_arg;
1104	const struct wmi_tim_info *tim_info_ev = ptr;
1105
1106	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
1107		return -EPROTO;
1108
1109	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
1110		return -ENOBUFS;
1111
1112	if (__le32_to_cpu(tim_info_ev->tim_len) >
1113	     sizeof(tim_info_ev->tim_bitmap)) {
1114		ath10k_warn(ar, "refusing to parse invalid swba structure\n");
1115		return -EPROTO;
1116	}
1117
1118	tim_info_arg = &swba->arg->tim_info[swba->n_tim];
1119	tim_info_arg->tim_len = tim_info_ev->tim_len;
1120	tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
1121	tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
1122	tim_info_arg->tim_changed = tim_info_ev->tim_changed;
1123	tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
1124
1125	swba->n_tim++;
1126
1127	return 0;
1128}
1129
1130static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
1131					 const void *ptr, void *data)
1132{
1133	struct wmi_tlv_swba_parse *swba = data;
1134
1135	if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
1136		return -EPROTO;
1137
1138	if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
1139		return -ENOBUFS;
1140
1141	swba->arg->noa_info[swba->n_noa++] = ptr;
1142	return 0;
1143}
1144
1145static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
1146				     const void *ptr, void *data)
1147{
1148	struct wmi_tlv_swba_parse *swba = data;
1149	int ret;
1150
1151	switch (tag) {
1152	case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
1153		swba->ev = ptr;
1154		break;
1155	case WMI_TLV_TAG_ARRAY_STRUCT:
1156		if (!swba->tim_done) {
1157			swba->tim_done = true;
1158			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1159						  ath10k_wmi_tlv_swba_tim_parse,
1160						  swba);
1161			if (ret)
1162				return ret;
1163		} else if (!swba->noa_done) {
1164			swba->noa_done = true;
1165			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1166						  ath10k_wmi_tlv_swba_noa_parse,
1167						  swba);
1168			if (ret)
1169				return ret;
1170		}
1171		break;
1172	default:
1173		break;
1174	}
1175	return 0;
1176}
1177
1178static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
1179					  struct sk_buff *skb,
1180					  struct wmi_swba_ev_arg *arg)
1181{
1182	struct wmi_tlv_swba_parse swba = { .arg = arg };
1183	u32 map;
1184	size_t n_vdevs;
1185	int ret;
1186
1187	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1188				  ath10k_wmi_tlv_swba_parse, &swba);
1189	if (ret) {
1190		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1191		return ret;
1192	}
1193
1194	if (!swba.ev)
1195		return -EPROTO;
1196
1197	arg->vdev_map = swba.ev->vdev_map;
1198
1199	for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
1200		if (map & BIT(0))
1201			n_vdevs++;
1202
1203	if (n_vdevs != swba.n_tim ||
1204	    n_vdevs != swba.n_noa)
1205		return -EPROTO;
1206
1207	return 0;
1208}
1209
1210static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
1211						struct sk_buff *skb,
1212						struct wmi_phyerr_hdr_arg *arg)
1213{
1214	const void **tb;
1215	const struct wmi_tlv_phyerr_ev *ev;
1216	const void *phyerrs;
1217	int ret;
1218
1219	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1220	if (IS_ERR(tb)) {
1221		ret = PTR_ERR(tb);
1222		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1223		return ret;
1224	}
1225
1226	ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
1227	phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
1228
1229	if (!ev || !phyerrs) {
1230		kfree(tb);
1231		return -EPROTO;
1232	}
1233
1234	arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
1235	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
1236	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
1237	arg->buf_len = __le32_to_cpu(ev->buf_len);
1238	arg->phyerrs = phyerrs;
1239
1240	kfree(tb);
1241	return 0;
1242}
1243
1244#define WMI_TLV_ABI_VER_NS0 0x5F414351
1245#define WMI_TLV_ABI_VER_NS1 0x00004C4D
1246#define WMI_TLV_ABI_VER_NS2 0x00000000
1247#define WMI_TLV_ABI_VER_NS3 0x00000000
1248
1249#define WMI_TLV_ABI_VER0_MAJOR 1
1250#define WMI_TLV_ABI_VER0_MINOR 0
1251#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
1252			  (((WMI_TLV_ABI_VER0_MINOR) <<  0) & 0x00FFFFFF))
1253#define WMI_TLV_ABI_VER1 53
1254
1255static int
1256ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
1257			      const void *ptr, void *data)
1258{
1259	struct wmi_svc_rdy_ev_arg *arg = data;
1260	int i;
1261
1262	if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
1263		return -EPROTO;
1264
1265	for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
1266		if (!arg->mem_reqs[i]) {
1267			arg->mem_reqs[i] = ptr;
1268			return 0;
1269		}
1270	}
1271
1272	return -ENOMEM;
1273}
1274
1275struct wmi_tlv_svc_rdy_parse {
1276	const struct hal_reg_capabilities *reg;
1277	const struct wmi_tlv_svc_rdy_ev *ev;
1278	const __le32 *svc_bmap;
1279	const struct wlan_host_mem_req *mem_reqs;
1280	bool svc_bmap_done;
1281	bool dbs_hw_mode_done;
1282};
1283
1284static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
1285					const void *ptr, void *data)
1286{
1287	struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
1288
1289	switch (tag) {
1290	case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
1291		svc_rdy->ev = ptr;
1292		break;
1293	case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
1294		svc_rdy->reg = ptr;
1295		break;
1296	case WMI_TLV_TAG_ARRAY_STRUCT:
1297		svc_rdy->mem_reqs = ptr;
1298		break;
1299	case WMI_TLV_TAG_ARRAY_UINT32:
1300		if (!svc_rdy->svc_bmap_done) {
1301			svc_rdy->svc_bmap_done = true;
1302			svc_rdy->svc_bmap = ptr;
1303		} else if (!svc_rdy->dbs_hw_mode_done) {
1304			svc_rdy->dbs_hw_mode_done = true;
1305		}
1306		break;
1307	default:
1308		break;
1309	}
1310	return 0;
1311}
1312
1313static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
1314					     struct sk_buff *skb,
1315					     struct wmi_svc_rdy_ev_arg *arg)
1316{
1317	const struct hal_reg_capabilities *reg;
1318	const struct wmi_tlv_svc_rdy_ev *ev;
1319	const __le32 *svc_bmap;
1320	const struct wlan_host_mem_req *mem_reqs;
1321	struct wmi_tlv_svc_rdy_parse svc_rdy = { };
1322	int ret;
1323
1324	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1325				  ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
1326	if (ret) {
1327		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1328		return ret;
1329	}
1330
1331	ev = svc_rdy.ev;
1332	reg = svc_rdy.reg;
1333	svc_bmap = svc_rdy.svc_bmap;
1334	mem_reqs = svc_rdy.mem_reqs;
1335
1336	if (!ev || !reg || !svc_bmap || !mem_reqs)
1337		return -EPROTO;
1338
1339	/* This is an internal ABI compatibility check for WMI TLV so check it
1340	 * here instead of the generic WMI code.
1341	 */
1342	ath10k_dbg(ar, ATH10K_DBG_WMI,
1343		   "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
1344		   __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
1345		   __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
1346		   __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
1347		   __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
1348		   __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
1349
1350	if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
1351	    __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
1352	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
1353	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
1354	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
1355		return -EOPNOTSUPP;
1356	}
1357
1358	arg->min_tx_power = ev->hw_min_tx_power;
1359	arg->max_tx_power = ev->hw_max_tx_power;
1360	arg->ht_cap = ev->ht_cap_info;
1361	arg->vht_cap = ev->vht_cap_info;
1362	arg->vht_supp_mcs = ev->vht_supp_mcs;
1363	arg->sw_ver0 = ev->abi.abi_ver0;
1364	arg->sw_ver1 = ev->abi.abi_ver1;
1365	arg->fw_build = ev->fw_build_vers;
1366	arg->phy_capab = ev->phy_capability;
1367	arg->num_rf_chains = ev->num_rf_chains;
1368	arg->eeprom_rd = reg->eeprom_rd;
1369	arg->low_2ghz_chan = reg->low_2ghz_chan;
1370	arg->high_2ghz_chan = reg->high_2ghz_chan;
1371	arg->low_5ghz_chan = reg->low_5ghz_chan;
1372	arg->high_5ghz_chan = reg->high_5ghz_chan;
1373	arg->num_mem_reqs = ev->num_mem_reqs;
1374	arg->service_map = svc_bmap;
1375	arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
1376	arg->sys_cap_info = ev->sys_cap_info;
1377
1378	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
1379				  ath10k_wmi_tlv_parse_mem_reqs, arg);
1380	if (ret) {
1381		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
1382		return ret;
1383	}
1384
1385	return 0;
1386}
1387
1388static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
1389					 struct sk_buff *skb,
1390					 struct wmi_rdy_ev_arg *arg)
1391{
1392	const void **tb;
1393	const struct wmi_tlv_rdy_ev *ev;
1394	int ret;
1395
1396	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1397	if (IS_ERR(tb)) {
1398		ret = PTR_ERR(tb);
1399		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1400		return ret;
1401	}
1402
1403	ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
1404	if (!ev) {
1405		kfree(tb);
1406		return -EPROTO;
1407	}
1408
1409	arg->sw_version = ev->abi.abi_ver0;
1410	arg->abi_version = ev->abi.abi_ver1;
1411	arg->status = ev->status;
1412	arg->mac_addr = ev->mac_addr.addr;
1413
1414	kfree(tb);
1415	return 0;
1416}
1417
1418static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
1419					  const void *ptr, void *data)
1420{
1421	struct wmi_svc_avail_ev_arg *arg = data;
1422
1423	switch (tag) {
1424	case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
1425		arg->service_map_ext_valid = true;
1426		arg->service_map_ext_len = *(__le32 *)ptr;
1427		arg->service_map_ext = ptr + sizeof(__le32);
1428		return 0;
1429	default:
1430		break;
1431	}
1432
1433	return 0;
1434}
1435
1436static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
1437					    struct sk_buff *skb,
1438					    struct wmi_svc_avail_ev_arg *arg)
1439{
1440	int ret;
1441
1442	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1443				  ath10k_wmi_tlv_svc_avail_parse, arg);
1444
1445	if (ret) {
1446		ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
1447		return ret;
1448	}
1449
1450	return 0;
1451}
1452
1453static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
1454					   struct ath10k_fw_stats_vdev *dst)
1455{
1456	int i;
1457
1458	dst->vdev_id = __le32_to_cpu(src->vdev_id);
1459	dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
1460	dst->data_snr = __le32_to_cpu(src->data_snr);
1461	dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
1462	dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
1463	dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
1464	dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
1465	dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
1466	dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
1467
1468	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
1469		dst->num_tx_frames[i] =
1470			__le32_to_cpu(src->num_tx_frames[i]);
1471
1472	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
1473		dst->num_tx_frames_retries[i] =
1474			__le32_to_cpu(src->num_tx_frames_retries[i]);
1475
1476	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
1477		dst->num_tx_frames_failures[i] =
1478			__le32_to_cpu(src->num_tx_frames_failures[i]);
1479
1480	for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
1481		dst->tx_rate_history[i] =
1482			__le32_to_cpu(src->tx_rate_history[i]);
1483
1484	for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
1485		dst->beacon_rssi_history[i] =
1486			__le32_to_cpu(src->beacon_rssi_history[i]);
1487}
1488
1489static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
1490					   struct sk_buff *skb,
1491					   struct ath10k_fw_stats *stats)
1492{
1493	const void **tb;
1494	const struct wmi_tlv_stats_ev *ev;
1495	u32 num_peer_stats_extd;
1496	const void *data;
1497	u32 num_pdev_stats;
1498	u32 num_vdev_stats;
1499	u32 num_peer_stats;
1500	u32 num_bcnflt_stats;
1501	u32 num_chan_stats;
1502	size_t data_len;
1503	u32 stats_id;
1504	int ret;
1505	int i;
1506
1507	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1508	if (IS_ERR(tb)) {
1509		ret = PTR_ERR(tb);
1510		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1511		return ret;
1512	}
1513
1514	ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
1515	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
1516
1517	if (!ev || !data) {
1518		kfree(tb);
1519		return -EPROTO;
1520	}
1521
1522	data_len = ath10k_wmi_tlv_len(data);
1523	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1524	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1525	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1526	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
1527	num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
1528	stats_id = __le32_to_cpu(ev->stats_id);
1529	num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
1530
1531	ath10k_dbg(ar, ATH10K_DBG_WMI,
1532		   "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
1533		   num_pdev_stats, num_vdev_stats, num_peer_stats,
1534		   num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
1535
1536	for (i = 0; i < num_pdev_stats; i++) {
1537		const struct wmi_pdev_stats *src;
1538		struct ath10k_fw_stats_pdev *dst;
1539
1540		src = data;
1541		if (data_len < sizeof(*src)) {
1542			kfree(tb);
1543			return -EPROTO;
1544		}
1545
1546		data += sizeof(*src);
1547		data_len -= sizeof(*src);
1548
1549		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1550		if (!dst)
1551			continue;
1552
1553		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1554		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1555		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1556		list_add_tail(&dst->list, &stats->pdevs);
1557	}
1558
1559	for (i = 0; i < num_vdev_stats; i++) {
1560		const struct wmi_tlv_vdev_stats *src;
1561		struct ath10k_fw_stats_vdev *dst;
1562
1563		src = data;
1564		if (data_len < sizeof(*src)) {
1565			kfree(tb);
1566			return -EPROTO;
1567		}
1568
1569		data += sizeof(*src);
1570		data_len -= sizeof(*src);
1571
1572		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1573		if (!dst)
1574			continue;
1575
1576		ath10k_wmi_tlv_pull_vdev_stats(src, dst);
1577		list_add_tail(&dst->list, &stats->vdevs);
1578	}
1579
1580	for (i = 0; i < num_peer_stats; i++) {
1581		const struct wmi_10x_peer_stats *src;
1582		struct ath10k_fw_stats_peer *dst;
1583
1584		src = data;
1585		if (data_len < sizeof(*src)) {
1586			kfree(tb);
1587			return -EPROTO;
1588		}
1589
1590		data += sizeof(*src);
1591		data_len -= sizeof(*src);
1592
1593		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1594		if (!dst)
1595			continue;
1596
1597		ath10k_wmi_pull_peer_stats(&src->old, dst);
1598		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1599
1600		if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
1601			const struct wmi_tlv_peer_stats_extd *extd;
1602			unsigned long rx_duration_high;
1603
1604			extd = data + sizeof(*src) * (num_peer_stats - i - 1)
1605			       + sizeof(*extd) * i;
1606
1607			dst->rx_duration = __le32_to_cpu(extd->rx_duration);
1608			rx_duration_high = __le32_to_cpu
1609						(extd->rx_duration_high);
1610
1611			if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
1612				     &rx_duration_high)) {
1613				rx_duration_high =
1614					FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
1615						  rx_duration_high);
1616				dst->rx_duration |= (u64)rx_duration_high <<
1617						    WMI_TLV_PEER_RX_DURATION_SHIFT;
1618			}
1619		}
1620
1621		list_add_tail(&dst->list, &stats->peers);
1622	}
1623
1624	kfree(tb);
1625	return 0;
1626}
1627
1628static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
1629					  struct sk_buff *skb,
1630					  struct wmi_roam_ev_arg *arg)
1631{
1632	const void **tb;
1633	const struct wmi_tlv_roam_ev *ev;
1634	int ret;
1635
1636	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1637	if (IS_ERR(tb)) {
1638		ret = PTR_ERR(tb);
1639		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1640		return ret;
1641	}
1642
1643	ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
1644	if (!ev) {
1645		kfree(tb);
1646		return -EPROTO;
1647	}
1648
1649	arg->vdev_id = ev->vdev_id;
1650	arg->reason = ev->reason;
1651	arg->rssi = ev->rssi;
1652
1653	kfree(tb);
1654	return 0;
1655}
1656
1657static int
1658ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
1659			      struct wmi_wow_ev_arg *arg)
1660{
1661	const void **tb;
1662	const struct wmi_tlv_wow_event_info *ev;
1663	int ret;
1664
1665	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1666	if (IS_ERR(tb)) {
1667		ret = PTR_ERR(tb);
1668		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1669		return ret;
1670	}
1671
1672	ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
1673	if (!ev) {
1674		kfree(tb);
1675		return -EPROTO;
1676	}
1677
1678	arg->vdev_id = __le32_to_cpu(ev->vdev_id);
1679	arg->flag = __le32_to_cpu(ev->flag);
1680	arg->wake_reason = __le32_to_cpu(ev->wake_reason);
1681	arg->data_len = __le32_to_cpu(ev->data_len);
1682
1683	kfree(tb);
1684	return 0;
1685}
1686
1687static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
1688					  struct sk_buff *skb,
1689					  struct wmi_echo_ev_arg *arg)
1690{
1691	const void **tb;
1692	const struct wmi_echo_event *ev;
1693	int ret;
1694
1695	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1696	if (IS_ERR(tb)) {
1697		ret = PTR_ERR(tb);
1698		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1699		return ret;
1700	}
1701
1702	ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
1703	if (!ev) {
1704		kfree(tb);
1705		return -EPROTO;
1706	}
1707
1708	arg->value = ev->value;
1709
1710	kfree(tb);
1711	return 0;
1712}
1713
1714static struct sk_buff *
1715ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
1716{
1717	struct wmi_tlv_pdev_suspend *cmd;
1718	struct wmi_tlv *tlv;
1719	struct sk_buff *skb;
1720
1721	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1722	if (!skb)
1723		return ERR_PTR(-ENOMEM);
1724
1725	tlv = (void *)skb->data;
1726	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
1727	tlv->len = __cpu_to_le16(sizeof(*cmd));
1728	cmd = (void *)tlv->value;
1729	cmd->opt = __cpu_to_le32(opt);
1730
1731	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
1732	return skb;
1733}
1734
1735static struct sk_buff *
1736ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
1737{
1738	struct wmi_tlv_resume_cmd *cmd;
1739	struct wmi_tlv *tlv;
1740	struct sk_buff *skb;
1741
1742	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1743	if (!skb)
1744		return ERR_PTR(-ENOMEM);
1745
1746	tlv = (void *)skb->data;
1747	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
1748	tlv->len = __cpu_to_le16(sizeof(*cmd));
1749	cmd = (void *)tlv->value;
1750	cmd->reserved = __cpu_to_le32(0);
1751
1752	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
1753	return skb;
1754}
1755
1756static struct sk_buff *
1757ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
1758				  u16 rd, u16 rd2g, u16 rd5g,
1759				  u16 ctl2g, u16 ctl5g,
1760				  enum wmi_dfs_region dfs_reg)
1761{
1762	struct wmi_tlv_pdev_set_rd_cmd *cmd;
1763	struct wmi_tlv *tlv;
1764	struct sk_buff *skb;
1765
1766	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1767	if (!skb)
1768		return ERR_PTR(-ENOMEM);
1769
1770	tlv = (void *)skb->data;
1771	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
1772	tlv->len = __cpu_to_le16(sizeof(*cmd));
1773	cmd = (void *)tlv->value;
1774	cmd->regd = __cpu_to_le32(rd);
1775	cmd->regd_2ghz = __cpu_to_le32(rd2g);
1776	cmd->regd_5ghz = __cpu_to_le32(rd5g);
1777	cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
1778	cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
1779
1780	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
1781	return skb;
1782}
1783
1784static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
1785{
1786	return WMI_TXBF_CONF_AFTER_ASSOC;
1787}
1788
1789static struct sk_buff *
1790ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
1791				     u32 param_value)
1792{
1793	struct wmi_tlv_pdev_set_param_cmd *cmd;
1794	struct wmi_tlv *tlv;
1795	struct sk_buff *skb;
1796
1797	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1798	if (!skb)
1799		return ERR_PTR(-ENOMEM);
1800
1801	tlv = (void *)skb->data;
1802	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
1803	tlv->len = __cpu_to_le16(sizeof(*cmd));
1804	cmd = (void *)tlv->value;
1805	cmd->param_id = __cpu_to_le32(param_id);
1806	cmd->param_value = __cpu_to_le32(param_value);
1807
1808	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
1809		   param_id, param_value);
1810	return skb;
1811}
1812
1813static void
1814ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
1815{
1816	struct host_memory_chunk_tlv *chunk;
1817	struct wmi_tlv *tlv;
1818	dma_addr_t paddr;
1819	int i;
1820	__le16 tlv_len, tlv_tag;
1821
1822	tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
1823	tlv_len = __cpu_to_le16(sizeof(*chunk));
1824	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
1825		tlv = host_mem_chunks;
1826		tlv->tag = tlv_tag;
1827		tlv->len = tlv_len;
1828		chunk = (void *)tlv->value;
1829
1830		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
1831		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
1832		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
1833
1834		if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
1835			     ar->wmi.svc_map)) {
1836			paddr = ar->wmi.mem_chunks[i].paddr;
1837			chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
1838		}
1839
1840		ath10k_dbg(ar, ATH10K_DBG_WMI,
1841			   "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
1842			   i,
1843			   ar->wmi.mem_chunks[i].len,
1844			   (unsigned long long)ar->wmi.mem_chunks[i].paddr,
1845			   ar->wmi.mem_chunks[i].req_id);
1846
1847		host_mem_chunks += sizeof(*tlv);
1848		host_mem_chunks += sizeof(*chunk);
1849	}
1850}
1851
1852static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1853{
1854	struct sk_buff *skb;
1855	struct wmi_tlv *tlv;
1856	struct wmi_tlv_init_cmd *cmd;
1857	struct wmi_tlv_resource_config *cfg;
1858	void *chunks;
1859	size_t len, chunks_len;
1860	void *ptr;
1861
1862	chunks_len = ar->wmi.num_mem_chunks *
1863		     (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
1864	len = (sizeof(*tlv) + sizeof(*cmd)) +
1865	      (sizeof(*tlv) + sizeof(*cfg)) +
1866	      (sizeof(*tlv) + chunks_len);
1867
1868	skb = ath10k_wmi_alloc_skb(ar, len);
1869	if (!skb)
1870		return ERR_PTR(-ENOMEM);
1871
1872	ptr = skb->data;
1873
1874	tlv = ptr;
1875	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1876	tlv->len = __cpu_to_le16(sizeof(*cmd));
1877	cmd = (void *)tlv->value;
1878	ptr += sizeof(*tlv);
1879	ptr += sizeof(*cmd);
1880
1881	tlv = ptr;
1882	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1883	tlv->len = __cpu_to_le16(sizeof(*cfg));
1884	cfg = (void *)tlv->value;
1885	ptr += sizeof(*tlv);
1886	ptr += sizeof(*cfg);
1887
1888	tlv = ptr;
1889	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1890	tlv->len = __cpu_to_le16(chunks_len);
1891	chunks = (void *)tlv->value;
1892
1893	ptr += sizeof(*tlv);
1894	ptr += chunks_len;
1895
1896	cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1897	cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1898	cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1899	cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1900	cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1901	cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1902	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1903
1904	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1905
1906	if (ar->hw_params.num_peers)
1907		cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
1908	else
1909		cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1910	cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
1911	cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
1912
1913	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1914		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1915		cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1916	} else {
1917		cfg->num_offload_peers = __cpu_to_le32(0);
1918		cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1919	}
1920
1921	cfg->num_peer_keys = __cpu_to_le32(2);
1922	if (ar->hw_params.num_peers)
1923		cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
1924	else
1925		cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1926	cfg->tx_chain_mask = __cpu_to_le32(0x7);
1927	cfg->rx_chain_mask = __cpu_to_le32(0x7);
1928	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1929	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1930	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1931	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1932	cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
1933	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1934	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1935	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1936	cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1937	cfg->num_mcast_groups = __cpu_to_le32(0);
1938	cfg->num_mcast_table_elems = __cpu_to_le32(0);
1939	cfg->mcast2ucast_mode = __cpu_to_le32(0);
1940	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1941	cfg->dma_burst_size = __cpu_to_le32(0);
1942	cfg->mac_aggr_delim = __cpu_to_le32(0);
1943	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1944	cfg->vow_config = __cpu_to_le32(0);
1945	cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1946	cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
1947	cfg->max_frag_entries = __cpu_to_le32(2);
1948	cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
1949	cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1950	cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1951	cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1952	cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
1953	cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1954	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1955	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1956	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1957	cfg->wmi_send_separate = __cpu_to_le32(0);
1958	cfg->num_ocb_vdevs = __cpu_to_le32(0);
1959	cfg->num_ocb_channels = __cpu_to_le32(0);
1960	cfg->num_ocb_schedules = __cpu_to_le32(0);
1961	cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
1962
1963	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
1964		cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
1965
1966	ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
1967
1968	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1969	return skb;
1970}
1971
1972static struct sk_buff *
1973ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1974				 const struct wmi_start_scan_arg *arg)
1975{
1976	struct wmi_tlv_start_scan_cmd *cmd;
1977	struct wmi_tlv *tlv;
1978	struct sk_buff *skb;
1979	size_t len, chan_len, ssid_len, bssid_len, ie_len;
1980	__le32 *chans;
1981	struct wmi_ssid *ssids;
1982	struct wmi_mac_addr *addrs;
1983	void *ptr;
1984	int i, ret;
1985
1986	ret = ath10k_wmi_start_scan_verify(arg);
1987	if (ret)
1988		return ERR_PTR(ret);
1989
1990	chan_len = arg->n_channels * sizeof(__le32);
1991	ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1992	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1993	ie_len = roundup(arg->ie_len, 4);
1994	len = (sizeof(*tlv) + sizeof(*cmd)) +
1995	      sizeof(*tlv) + chan_len +
1996	      sizeof(*tlv) + ssid_len +
1997	      sizeof(*tlv) + bssid_len +
1998	      sizeof(*tlv) + ie_len;
1999
2000	skb = ath10k_wmi_alloc_skb(ar, len);
2001	if (!skb)
2002		return ERR_PTR(-ENOMEM);
2003
2004	ptr = (void *)skb->data;
2005	tlv = ptr;
2006	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
2007	tlv->len = __cpu_to_le16(sizeof(*cmd));
2008	cmd = (void *)tlv->value;
2009
2010	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
2011	cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
2012	cmd->num_channels = __cpu_to_le32(arg->n_channels);
2013	cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
2014	cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
2015	cmd->ie_len = __cpu_to_le32(arg->ie_len);
2016	cmd->num_probes = __cpu_to_le32(3);
2017	ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
2018	ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
2019
2020	/* FIXME: There are some scan flag inconsistencies across firmwares,
2021	 * e.g. WMI-TLV inverts the logic behind the following flag.
2022	 */
2023	cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2024
2025	ptr += sizeof(*tlv);
2026	ptr += sizeof(*cmd);
2027
2028	tlv = ptr;
2029	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2030	tlv->len = __cpu_to_le16(chan_len);
2031	chans = (void *)tlv->value;
2032	for (i = 0; i < arg->n_channels; i++)
2033		chans[i] = __cpu_to_le32(arg->channels[i]);
2034
2035	ptr += sizeof(*tlv);
2036	ptr += chan_len;
2037
2038	tlv = ptr;
2039	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2040	tlv->len = __cpu_to_le16(ssid_len);
2041	ssids = (void *)tlv->value;
2042	for (i = 0; i < arg->n_ssids; i++) {
2043		ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
2044		memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
2045	}
2046
2047	ptr += sizeof(*tlv);
2048	ptr += ssid_len;
2049
2050	tlv = ptr;
2051	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2052	tlv->len = __cpu_to_le16(bssid_len);
2053	addrs = (void *)tlv->value;
2054	for (i = 0; i < arg->n_bssids; i++)
2055		ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
2056
2057	ptr += sizeof(*tlv);
2058	ptr += bssid_len;
2059
2060	tlv = ptr;
2061	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2062	tlv->len = __cpu_to_le16(ie_len);
2063	memcpy(tlv->value, arg->ie, arg->ie_len);
2064
2065	ptr += sizeof(*tlv);
2066	ptr += ie_len;
2067
2068	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
2069	return skb;
2070}
2071
2072static struct sk_buff *
2073ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
2074				const struct wmi_stop_scan_arg *arg)
2075{
2076	struct wmi_stop_scan_cmd *cmd;
2077	struct wmi_tlv *tlv;
2078	struct sk_buff *skb;
2079	u32 scan_id;
2080	u32 req_id;
2081
2082	if (arg->req_id > 0xFFF)
2083		return ERR_PTR(-EINVAL);
2084	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
2085		return ERR_PTR(-EINVAL);
2086
2087	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2088	if (!skb)
2089		return ERR_PTR(-ENOMEM);
2090
2091	scan_id = arg->u.scan_id;
2092	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
2093
2094	req_id = arg->req_id;
2095	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
2096
2097	tlv = (void *)skb->data;
2098	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
2099	tlv->len = __cpu_to_le16(sizeof(*cmd));
2100	cmd = (void *)tlv->value;
2101	cmd->req_type = __cpu_to_le32(arg->req_type);
2102	cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
2103	cmd->scan_id = __cpu_to_le32(scan_id);
2104	cmd->scan_req_id = __cpu_to_le32(req_id);
2105
2106	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
2107	return skb;
2108}
2109
2110static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
2111					      enum wmi_vdev_subtype subtype)
2112{
2113	switch (subtype) {
2114	case WMI_VDEV_SUBTYPE_NONE:
2115		return WMI_TLV_VDEV_SUBTYPE_NONE;
2116	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
2117		return WMI_TLV_VDEV_SUBTYPE_P2P_DEV;
2118	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
2119		return WMI_TLV_VDEV_SUBTYPE_P2P_CLI;
2120	case WMI_VDEV_SUBTYPE_P2P_GO:
2121		return WMI_TLV_VDEV_SUBTYPE_P2P_GO;
2122	case WMI_VDEV_SUBTYPE_PROXY_STA:
2123		return WMI_TLV_VDEV_SUBTYPE_PROXY_STA;
2124	case WMI_VDEV_SUBTYPE_MESH_11S:
2125		return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
2126	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
2127		return -EOPNOTSUPP;
2128	}
2129	return -EOPNOTSUPP;
2130}
2131
2132static struct sk_buff *
2133ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
2134				  u32 vdev_id,
2135				  enum wmi_vdev_type vdev_type,
2136				  enum wmi_vdev_subtype vdev_subtype,
2137				  const u8 mac_addr[ETH_ALEN])
2138{
2139	struct wmi_vdev_create_cmd *cmd;
2140	struct wmi_tlv *tlv;
2141	struct sk_buff *skb;
2142
2143	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2144	if (!skb)
2145		return ERR_PTR(-ENOMEM);
2146
2147	tlv = (void *)skb->data;
2148	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
2149	tlv->len = __cpu_to_le16(sizeof(*cmd));
2150	cmd = (void *)tlv->value;
2151	cmd->vdev_id = __cpu_to_le32(vdev_id);
2152	cmd->vdev_type = __cpu_to_le32(vdev_type);
2153	cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
2154	ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
2155
2156	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
2157	return skb;
2158}
2159
2160static struct sk_buff *
2161ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
2162{
2163	struct wmi_vdev_delete_cmd *cmd;
2164	struct wmi_tlv *tlv;
2165	struct sk_buff *skb;
2166
2167	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2168	if (!skb)
2169		return ERR_PTR(-ENOMEM);
2170
2171	tlv = (void *)skb->data;
2172	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
2173	tlv->len = __cpu_to_le16(sizeof(*cmd));
2174	cmd = (void *)tlv->value;
2175	cmd->vdev_id = __cpu_to_le32(vdev_id);
2176
2177	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
2178	return skb;
2179}
2180
2181static struct sk_buff *
2182ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
2183				 const struct wmi_vdev_start_request_arg *arg,
2184				 bool restart)
2185{
2186	struct wmi_tlv_vdev_start_cmd *cmd;
2187	struct wmi_channel *ch;
2188	struct wmi_tlv *tlv;
2189	struct sk_buff *skb;
2190	size_t len;
2191	void *ptr;
2192	u32 flags = 0;
2193
2194	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
2195		return ERR_PTR(-EINVAL);
2196	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
2197		return ERR_PTR(-EINVAL);
2198
2199	len = (sizeof(*tlv) + sizeof(*cmd)) +
2200	      (sizeof(*tlv) + sizeof(*ch)) +
2201	      (sizeof(*tlv) + 0);
2202	skb = ath10k_wmi_alloc_skb(ar, len);
2203	if (!skb)
2204		return ERR_PTR(-ENOMEM);
2205
2206	if (arg->hidden_ssid)
2207		flags |= WMI_VDEV_START_HIDDEN_SSID;
2208	if (arg->pmf_enabled)
2209		flags |= WMI_VDEV_START_PMF_ENABLED;
2210
2211	ptr = (void *)skb->data;
2212
2213	tlv = ptr;
2214	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
2215	tlv->len = __cpu_to_le16(sizeof(*cmd));
2216	cmd = (void *)tlv->value;
2217	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2218	cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
2219	cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
2220	cmd->flags = __cpu_to_le32(flags);
2221	cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
2222	cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
2223	cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
2224
2225	if (arg->ssid) {
2226		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
2227		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
2228	}
2229
2230	ptr += sizeof(*tlv);
2231	ptr += sizeof(*cmd);
2232
2233	tlv = ptr;
2234	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2235	tlv->len = __cpu_to_le16(sizeof(*ch));
2236	ch = (void *)tlv->value;
2237	ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
2238
2239	ptr += sizeof(*tlv);
2240	ptr += sizeof(*ch);
2241
2242	tlv = ptr;
2243	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2244	tlv->len = 0;
2245
2246	/* Note: This is a nested TLV containing:
2247	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
2248	 */
2249
2250	ptr += sizeof(*tlv);
2251	ptr += 0;
2252
2253	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
2254	return skb;
2255}
2256
2257static struct sk_buff *
2258ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
2259{
2260	struct wmi_vdev_stop_cmd *cmd;
2261	struct wmi_tlv *tlv;
2262	struct sk_buff *skb;
2263
2264	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2265	if (!skb)
2266		return ERR_PTR(-ENOMEM);
2267
2268	tlv = (void *)skb->data;
2269	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
2270	tlv->len = __cpu_to_le16(sizeof(*cmd));
2271	cmd = (void *)tlv->value;
2272	cmd->vdev_id = __cpu_to_le32(vdev_id);
2273
2274	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
2275	return skb;
2276}
2277
2278static struct sk_buff *
2279ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
2280			      const u8 *bssid)
2281
2282{
2283	struct wmi_vdev_up_cmd *cmd;
2284	struct wmi_tlv *tlv;
2285	struct sk_buff *skb;
2286
2287	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2288	if (!skb)
2289		return ERR_PTR(-ENOMEM);
2290
2291	tlv = (void *)skb->data;
2292	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
2293	tlv->len = __cpu_to_le16(sizeof(*cmd));
2294	cmd = (void *)tlv->value;
2295	cmd->vdev_id = __cpu_to_le32(vdev_id);
2296	cmd->vdev_assoc_id = __cpu_to_le32(aid);
2297	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
2298
2299	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
2300	return skb;
2301}
2302
2303static struct sk_buff *
2304ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
2305{
2306	struct wmi_vdev_down_cmd *cmd;
2307	struct wmi_tlv *tlv;
2308	struct sk_buff *skb;
2309
2310	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2311	if (!skb)
2312		return ERR_PTR(-ENOMEM);
2313
2314	tlv = (void *)skb->data;
2315	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
2316	tlv->len = __cpu_to_le16(sizeof(*cmd));
2317	cmd = (void *)tlv->value;
2318	cmd->vdev_id = __cpu_to_le32(vdev_id);
2319
2320	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
2321	return skb;
2322}
2323
2324static struct sk_buff *
2325ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
2326				     u32 param_id, u32 param_value)
2327{
2328	struct wmi_vdev_set_param_cmd *cmd;
2329	struct wmi_tlv *tlv;
2330	struct sk_buff *skb;
2331
2332	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2333	if (!skb)
2334		return ERR_PTR(-ENOMEM);
2335
2336	tlv = (void *)skb->data;
2337	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
2338	tlv->len = __cpu_to_le16(sizeof(*cmd));
2339	cmd = (void *)tlv->value;
2340	cmd->vdev_id = __cpu_to_le32(vdev_id);
2341	cmd->param_id = __cpu_to_le32(param_id);
2342	cmd->param_value = __cpu_to_le32(param_value);
2343
2344	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
2345		   vdev_id, param_id, param_value);
2346	return skb;
2347}
2348
2349static struct sk_buff *
2350ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
2351				       const struct wmi_vdev_install_key_arg *arg)
2352{
2353	struct wmi_vdev_install_key_cmd *cmd;
2354	struct wmi_tlv *tlv;
2355	struct sk_buff *skb;
2356	size_t len;
2357	void *ptr;
2358
2359	if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2360	    arg->key_data)
2361		return ERR_PTR(-EINVAL);
2362	if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2363	    !arg->key_data)
2364		return ERR_PTR(-EINVAL);
2365
2366	len = sizeof(*tlv) + sizeof(*cmd) +
2367	      sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
2368	skb = ath10k_wmi_alloc_skb(ar, len);
2369	if (!skb)
2370		return ERR_PTR(-ENOMEM);
2371
2372	ptr = (void *)skb->data;
2373	tlv = ptr;
2374	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
2375	tlv->len = __cpu_to_le16(sizeof(*cmd));
2376	cmd = (void *)tlv->value;
2377	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2378	cmd->key_idx = __cpu_to_le32(arg->key_idx);
2379	cmd->key_flags = __cpu_to_le32(arg->key_flags);
2380	cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
2381	cmd->key_len = __cpu_to_le32(arg->key_len);
2382	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
2383	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
2384
2385	if (arg->macaddr)
2386		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2387
2388	ptr += sizeof(*tlv);
2389	ptr += sizeof(*cmd);
2390
2391	tlv = ptr;
2392	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2393	tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
2394	if (arg->key_data)
2395		memcpy(tlv->value, arg->key_data, arg->key_len);
2396
2397	ptr += sizeof(*tlv);
2398	ptr += roundup(arg->key_len, sizeof(__le32));
2399
2400	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
2401	return skb;
2402}
2403
2404static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
2405					 const struct wmi_sta_uapsd_auto_trig_arg *arg)
2406{
2407	struct wmi_sta_uapsd_auto_trig_param *ac;
2408	struct wmi_tlv *tlv;
2409
2410	tlv = ptr;
2411	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
2412	tlv->len = __cpu_to_le16(sizeof(*ac));
2413	ac = (void *)tlv->value;
2414
2415	ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
2416	ac->user_priority = __cpu_to_le32(arg->user_priority);
2417	ac->service_interval = __cpu_to_le32(arg->service_interval);
2418	ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
2419	ac->delay_interval = __cpu_to_le32(arg->delay_interval);
2420
2421	ath10k_dbg(ar, ATH10K_DBG_WMI,
2422		   "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
2423		   ac->wmm_ac, ac->user_priority, ac->service_interval,
2424		   ac->suspend_interval, ac->delay_interval);
2425
2426	return ptr + sizeof(*tlv) + sizeof(*ac);
2427}
2428
2429static struct sk_buff *
2430ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
2431				     const u8 peer_addr[ETH_ALEN],
2432				     const struct wmi_sta_uapsd_auto_trig_arg *args,
2433				     u32 num_ac)
2434{
2435	struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
2436	struct wmi_sta_uapsd_auto_trig_param *ac;
2437	struct wmi_tlv *tlv;
2438	struct sk_buff *skb;
2439	size_t len;
2440	size_t ac_tlv_len;
2441	void *ptr;
2442	int i;
2443
2444	ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
2445	len = sizeof(*tlv) + sizeof(*cmd) +
2446	      sizeof(*tlv) + ac_tlv_len;
2447	skb = ath10k_wmi_alloc_skb(ar, len);
2448	if (!skb)
2449		return ERR_PTR(-ENOMEM);
2450
2451	ptr = (void *)skb->data;
2452	tlv = ptr;
2453	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
2454	tlv->len = __cpu_to_le16(sizeof(*cmd));
2455	cmd = (void *)tlv->value;
2456	cmd->vdev_id = __cpu_to_le32(vdev_id);
2457	cmd->num_ac = __cpu_to_le32(num_ac);
2458	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2459
2460	ptr += sizeof(*tlv);
2461	ptr += sizeof(*cmd);
2462
2463	tlv = ptr;
2464	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2465	tlv->len = __cpu_to_le16(ac_tlv_len);
2466	ac = (void *)tlv->value;
2467
2468	ptr += sizeof(*tlv);
2469	for (i = 0; i < num_ac; i++)
2470		ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
2471
2472	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
2473	return skb;
2474}
2475
2476static void *ath10k_wmi_tlv_put_wmm(void *ptr,
2477				    const struct wmi_wmm_params_arg *arg)
2478{
2479	struct wmi_wmm_params *wmm;
2480	struct wmi_tlv *tlv;
2481
2482	tlv = ptr;
2483	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
2484	tlv->len = __cpu_to_le16(sizeof(*wmm));
2485	wmm = (void *)tlv->value;
2486	ath10k_wmi_set_wmm_param(wmm, arg);
2487
2488	return ptr + sizeof(*tlv) + sizeof(*wmm);
2489}
2490
2491static struct sk_buff *
2492ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
2493				    const struct wmi_wmm_params_all_arg *arg)
2494{
2495	struct wmi_tlv_vdev_set_wmm_cmd *cmd;
2496	struct wmi_tlv *tlv;
2497	struct sk_buff *skb;
2498	size_t len;
2499	void *ptr;
2500
2501	len = sizeof(*tlv) + sizeof(*cmd);
2502	skb = ath10k_wmi_alloc_skb(ar, len);
2503	if (!skb)
2504		return ERR_PTR(-ENOMEM);
2505
2506	ptr = (void *)skb->data;
2507	tlv = ptr;
2508	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
2509	tlv->len = __cpu_to_le16(sizeof(*cmd));
2510	cmd = (void *)tlv->value;
2511	cmd->vdev_id = __cpu_to_le32(vdev_id);
2512
2513	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
2514	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
2515	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
2516	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
2517
2518	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
2519	return skb;
2520}
2521
2522static struct sk_buff *
2523ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
2524				    const struct wmi_sta_keepalive_arg *arg)
2525{
2526	struct wmi_tlv_sta_keepalive_cmd *cmd;
2527	struct wmi_sta_keepalive_arp_resp *arp;
2528	struct sk_buff *skb;
2529	struct wmi_tlv *tlv;
2530	void *ptr;
2531	size_t len;
2532
2533	len = sizeof(*tlv) + sizeof(*cmd) +
2534	      sizeof(*tlv) + sizeof(*arp);
2535	skb = ath10k_wmi_alloc_skb(ar, len);
2536	if (!skb)
2537		return ERR_PTR(-ENOMEM);
2538
2539	ptr = (void *)skb->data;
2540	tlv = ptr;
2541	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
2542	tlv->len = __cpu_to_le16(sizeof(*cmd));
2543	cmd = (void *)tlv->value;
2544	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2545	cmd->enabled = __cpu_to_le32(arg->enabled);
2546	cmd->method = __cpu_to_le32(arg->method);
2547	cmd->interval = __cpu_to_le32(arg->interval);
2548
2549	ptr += sizeof(*tlv);
2550	ptr += sizeof(*cmd);
2551
2552	tlv = ptr;
2553	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
2554	tlv->len = __cpu_to_le16(sizeof(*arp));
2555	arp = (void *)tlv->value;
2556
2557	arp->src_ip4_addr = arg->src_ip4_addr;
2558	arp->dest_ip4_addr = arg->dest_ip4_addr;
2559	ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
2560
2561	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
2562		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
2563	return skb;
2564}
2565
2566static struct sk_buff *
2567ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
2568				  const u8 peer_addr[ETH_ALEN],
2569				  enum wmi_peer_type peer_type)
2570{
2571	struct wmi_tlv_peer_create_cmd *cmd;
2572	struct wmi_tlv *tlv;
2573	struct sk_buff *skb;
2574
2575	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2576	if (!skb)
2577		return ERR_PTR(-ENOMEM);
2578
2579	tlv = (void *)skb->data;
2580	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
2581	tlv->len = __cpu_to_le16(sizeof(*cmd));
2582	cmd = (void *)tlv->value;
2583	cmd->vdev_id = __cpu_to_le32(vdev_id);
2584	cmd->peer_type = __cpu_to_le32(peer_type);
2585	ether_addr_copy(cmd->peer_addr.addr, peer_addr);
2586
2587	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
2588	return skb;
2589}
2590
2591static struct sk_buff *
2592ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
2593				  const u8 peer_addr[ETH_ALEN])
2594{
2595	struct wmi_peer_delete_cmd *cmd;
2596	struct wmi_tlv *tlv;
2597	struct sk_buff *skb;
2598
2599	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2600	if (!skb)
2601		return ERR_PTR(-ENOMEM);
2602
2603	tlv = (void *)skb->data;
2604	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
2605	tlv->len = __cpu_to_le16(sizeof(*cmd));
2606	cmd = (void *)tlv->value;
2607	cmd->vdev_id = __cpu_to_le32(vdev_id);
2608	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2609
2610	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
2611	return skb;
2612}
2613
2614static struct sk_buff *
2615ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
2616				 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
2617{
2618	struct wmi_peer_flush_tids_cmd *cmd;
2619	struct wmi_tlv *tlv;
2620	struct sk_buff *skb;
2621
2622	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2623	if (!skb)
2624		return ERR_PTR(-ENOMEM);
2625
2626	tlv = (void *)skb->data;
2627	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
2628	tlv->len = __cpu_to_le16(sizeof(*cmd));
2629	cmd = (void *)tlv->value;
2630	cmd->vdev_id = __cpu_to_le32(vdev_id);
2631	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
2632	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2633
2634	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
2635	return skb;
2636}
2637
2638static struct sk_buff *
2639ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
2640				     const u8 *peer_addr,
2641				     enum wmi_peer_param param_id,
2642				     u32 param_value)
2643{
2644	struct wmi_peer_set_param_cmd *cmd;
2645	struct wmi_tlv *tlv;
2646	struct sk_buff *skb;
2647
2648	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2649	if (!skb)
2650		return ERR_PTR(-ENOMEM);
2651
2652	tlv = (void *)skb->data;
2653	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
2654	tlv->len = __cpu_to_le16(sizeof(*cmd));
2655	cmd = (void *)tlv->value;
2656	cmd->vdev_id = __cpu_to_le32(vdev_id);
2657	cmd->param_id = __cpu_to_le32(param_id);
2658	cmd->param_value = __cpu_to_le32(param_value);
2659	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2660
2661	ath10k_dbg(ar, ATH10K_DBG_WMI,
2662		   "wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
2663		   vdev_id, peer_addr, param_id, param_value);
2664	return skb;
2665}
2666
2667static struct sk_buff *
2668ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
2669				 const struct wmi_peer_assoc_complete_arg *arg)
2670{
2671	struct wmi_tlv_peer_assoc_cmd *cmd;
2672	struct wmi_vht_rate_set *vht_rate;
2673	struct wmi_tlv *tlv;
2674	struct sk_buff *skb;
2675	size_t len, legacy_rate_len, ht_rate_len;
2676	void *ptr;
2677
2678	if (arg->peer_mpdu_density > 16)
2679		return ERR_PTR(-EINVAL);
2680	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2681		return ERR_PTR(-EINVAL);
2682	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2683		return ERR_PTR(-EINVAL);
2684
2685	legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
2686				  sizeof(__le32));
2687	ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
2688	len = (sizeof(*tlv) + sizeof(*cmd)) +
2689	      (sizeof(*tlv) + legacy_rate_len) +
2690	      (sizeof(*tlv) + ht_rate_len) +
2691	      (sizeof(*tlv) + sizeof(*vht_rate));
2692	skb = ath10k_wmi_alloc_skb(ar, len);
2693	if (!skb)
2694		return ERR_PTR(-ENOMEM);
2695
2696	ptr = (void *)skb->data;
2697	tlv = ptr;
2698	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
2699	tlv->len = __cpu_to_le16(sizeof(*cmd));
2700	cmd = (void *)tlv->value;
2701
2702	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2703	cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2704	cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
2705	cmd->flags = __cpu_to_le32(arg->peer_flags);
2706	cmd->caps = __cpu_to_le32(arg->peer_caps);
2707	cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2708	cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2709	cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2710	cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2711	cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2712	cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2713	cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2714	cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
2715	cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2716	cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
2717	ether_addr_copy(cmd->mac_addr.addr, arg->addr);
2718
2719	ptr += sizeof(*tlv);
2720	ptr += sizeof(*cmd);
2721
2722	tlv = ptr;
2723	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2724	tlv->len = __cpu_to_le16(legacy_rate_len);
2725	memcpy(tlv->value, arg->peer_legacy_rates.rates,
2726	       arg->peer_legacy_rates.num_rates);
2727
2728	ptr += sizeof(*tlv);
2729	ptr += legacy_rate_len;
2730
2731	tlv = ptr;
2732	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2733	tlv->len = __cpu_to_le16(ht_rate_len);
2734	memcpy(tlv->value, arg->peer_ht_rates.rates,
2735	       arg->peer_ht_rates.num_rates);
2736
2737	ptr += sizeof(*tlv);
2738	ptr += ht_rate_len;
2739
2740	tlv = ptr;
2741	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
2742	tlv->len = __cpu_to_le16(sizeof(*vht_rate));
2743	vht_rate = (void *)tlv->value;
2744
2745	vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2746	vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2747	vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2748	vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2749
2750	ptr += sizeof(*tlv);
2751	ptr += sizeof(*vht_rate);
2752
2753	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
2754	return skb;
2755}
2756
2757static struct sk_buff *
2758ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
2759				 enum wmi_sta_ps_mode psmode)
2760{
2761	struct wmi_sta_powersave_mode_cmd *cmd;
2762	struct wmi_tlv *tlv;
2763	struct sk_buff *skb;
2764
2765	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2766	if (!skb)
2767		return ERR_PTR(-ENOMEM);
2768
2769	tlv = (void *)skb->data;
2770	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
2771	tlv->len = __cpu_to_le16(sizeof(*cmd));
2772	cmd = (void *)tlv->value;
2773	cmd->vdev_id = __cpu_to_le32(vdev_id);
2774	cmd->sta_ps_mode = __cpu_to_le32(psmode);
2775
2776	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
2777	return skb;
2778}
2779
2780static struct sk_buff *
2781ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
2782				 enum wmi_sta_powersave_param param_id,
2783				 u32 param_value)
2784{
2785	struct wmi_sta_powersave_param_cmd *cmd;
2786	struct wmi_tlv *tlv;
2787	struct sk_buff *skb;
2788
2789	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2790	if (!skb)
2791		return ERR_PTR(-ENOMEM);
2792
2793	tlv = (void *)skb->data;
2794	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
2795	tlv->len = __cpu_to_le16(sizeof(*cmd));
2796	cmd = (void *)tlv->value;
2797	cmd->vdev_id = __cpu_to_le32(vdev_id);
2798	cmd->param_id = __cpu_to_le32(param_id);
2799	cmd->param_value = __cpu_to_le32(param_value);
2800
2801	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
2802	return skb;
2803}
2804
2805static struct sk_buff *
2806ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2807				enum wmi_ap_ps_peer_param param_id, u32 value)
2808{
2809	struct wmi_ap_ps_peer_cmd *cmd;
2810	struct wmi_tlv *tlv;
2811	struct sk_buff *skb;
2812
2813	if (!mac)
2814		return ERR_PTR(-EINVAL);
2815
2816	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2817	if (!skb)
2818		return ERR_PTR(-ENOMEM);
2819
2820	tlv = (void *)skb->data;
2821	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
2822	tlv->len = __cpu_to_le16(sizeof(*cmd));
2823	cmd = (void *)tlv->value;
2824	cmd->vdev_id = __cpu_to_le32(vdev_id);
2825	cmd->param_id = __cpu_to_le32(param_id);
2826	cmd->param_value = __cpu_to_le32(value);
2827	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2828
2829	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
2830	return skb;
2831}
2832
2833static struct sk_buff *
2834ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
2835				     const struct wmi_scan_chan_list_arg *arg)
2836{
2837	struct wmi_tlv_scan_chan_list_cmd *cmd;
2838	struct wmi_channel *ci;
2839	struct wmi_channel_arg *ch;
2840	struct wmi_tlv *tlv;
2841	struct sk_buff *skb;
2842	size_t chans_len, len;
2843	int i;
2844	void *ptr, *chans;
2845
2846	chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
2847	len = (sizeof(*tlv) + sizeof(*cmd)) +
2848	      (sizeof(*tlv) + chans_len);
2849
2850	skb = ath10k_wmi_alloc_skb(ar, len);
2851	if (!skb)
2852		return ERR_PTR(-ENOMEM);
2853
2854	ptr = (void *)skb->data;
2855	tlv = ptr;
2856	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
2857	tlv->len = __cpu_to_le16(sizeof(*cmd));
2858	cmd = (void *)tlv->value;
2859	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2860
2861	ptr += sizeof(*tlv);
2862	ptr += sizeof(*cmd);
2863
2864	tlv = ptr;
2865	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2866	tlv->len = __cpu_to_le16(chans_len);
2867	chans = (void *)tlv->value;
2868
2869	for (i = 0; i < arg->n_channels; i++) {
2870		ch = &arg->channels[i];
2871
2872		tlv = chans;
2873		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2874		tlv->len = __cpu_to_le16(sizeof(*ci));
2875		ci = (void *)tlv->value;
2876
2877		ath10k_wmi_put_wmi_channel(ar, ci, ch);
2878
2879		chans += sizeof(*tlv);
2880		chans += sizeof(*ci);
2881	}
2882
2883	ptr += sizeof(*tlv);
2884	ptr += chans_len;
2885
2886	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2887	return skb;
2888}
2889
2890static struct sk_buff *
2891ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
2892{
2893	struct wmi_scan_prob_req_oui_cmd *cmd;
2894	struct wmi_tlv *tlv;
2895	struct sk_buff *skb;
2896
2897	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2898	if (!skb)
2899		return ERR_PTR(-ENOMEM);
2900
2901	tlv = (void *)skb->data;
2902	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
2903	tlv->len = __cpu_to_le16(sizeof(*cmd));
2904	cmd = (void *)tlv->value;
2905	cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
2906
2907	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
2908	return skb;
2909}
2910
2911static struct sk_buff *
2912ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2913				 const void *bcn, size_t bcn_len,
2914				 u32 bcn_paddr, bool dtim_zero,
2915				 bool deliver_cab)
2916
2917{
2918	struct wmi_bcn_tx_ref_cmd *cmd;
2919	struct wmi_tlv *tlv;
2920	struct sk_buff *skb;
2921	struct ieee80211_hdr *hdr;
2922	u16 fc;
2923
2924	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2925	if (!skb)
2926		return ERR_PTR(-ENOMEM);
2927
2928	hdr = (struct ieee80211_hdr *)bcn;
2929	fc = le16_to_cpu(hdr->frame_control);
2930
2931	tlv = (void *)skb->data;
2932	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2933	tlv->len = __cpu_to_le16(sizeof(*cmd));
2934	cmd = (void *)tlv->value;
2935	cmd->vdev_id = __cpu_to_le32(vdev_id);
2936	cmd->data_len = __cpu_to_le32(bcn_len);
2937	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2938	cmd->msdu_id = 0;
2939	cmd->frame_control = __cpu_to_le32(fc);
2940	cmd->flags = 0;
2941
2942	if (dtim_zero)
2943		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2944
2945	if (deliver_cab)
2946		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2947
2948	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2949	return skb;
2950}
2951
2952static struct sk_buff *
2953ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2954				   const struct wmi_wmm_params_all_arg *arg)
2955{
2956	struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2957	struct wmi_wmm_params *wmm;
2958	struct wmi_tlv *tlv;
2959	struct sk_buff *skb;
2960	size_t len;
2961	void *ptr;
2962
2963	len = (sizeof(*tlv) + sizeof(*cmd)) +
2964	      (4 * (sizeof(*tlv) + sizeof(*wmm)));
2965	skb = ath10k_wmi_alloc_skb(ar, len);
2966	if (!skb)
2967		return ERR_PTR(-ENOMEM);
2968
2969	ptr = (void *)skb->data;
2970
2971	tlv = ptr;
2972	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2973	tlv->len = __cpu_to_le16(sizeof(*cmd));
2974	cmd = (void *)tlv->value;
2975
2976	/* nothing to set here */
2977
2978	ptr += sizeof(*tlv);
2979	ptr += sizeof(*cmd);
2980
2981	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2982	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2983	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2984	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2985
2986	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2987	return skb;
2988}
2989
2990static struct sk_buff *
2991ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
2992{
2993	struct wmi_request_stats_cmd *cmd;
2994	struct wmi_tlv *tlv;
2995	struct sk_buff *skb;
2996
2997	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2998	if (!skb)
2999		return ERR_PTR(-ENOMEM);
3000
3001	tlv = (void *)skb->data;
3002	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
3003	tlv->len = __cpu_to_le16(sizeof(*cmd));
3004	cmd = (void *)tlv->value;
3005	cmd->stats_id = __cpu_to_le32(stats_mask);
3006
3007	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
3008	return skb;
3009}
3010
3011static struct sk_buff *
3012ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
3013					      u32 vdev_id,
3014					      enum wmi_peer_stats_info_request_type type,
3015					      u8 *addr,
3016					      u32 reset)
3017{
3018	struct wmi_tlv_request_peer_stats_info *cmd;
3019	struct wmi_tlv *tlv;
3020	struct sk_buff *skb;
3021
3022	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3023	if (!skb)
3024		return ERR_PTR(-ENOMEM);
3025
3026	tlv = (void *)skb->data;
3027	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
3028	tlv->len = __cpu_to_le16(sizeof(*cmd));
3029	cmd = (void *)tlv->value;
3030	cmd->vdev_id = __cpu_to_le32(vdev_id);
3031	cmd->request_type = __cpu_to_le32(type);
3032
3033	if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
3034		ether_addr_copy(cmd->peer_macaddr.addr, addr);
3035
3036	cmd->reset_after_request = __cpu_to_le32(reset);
3037	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
3038	return skb;
3039}
3040
3041static int
3042ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
3043				       struct sk_buff *msdu)
3044{
3045	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3046	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
3047	struct ath10k_wmi *wmi = &ar->wmi;
3048
3049	spin_lock_bh(&ar->data_lock);
3050	pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
3051	spin_unlock_bh(&ar->data_lock);
3052
3053	kfree(pkt_addr);
3054
3055	return 0;
3056}
3057
3058static int
3059ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
3060				 dma_addr_t paddr)
3061{
3062	struct ath10k_wmi *wmi = &ar->wmi;
3063	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
3064	int ret;
3065
3066	pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
3067	if (!pkt_addr)
3068		return -ENOMEM;
3069
3070	pkt_addr->vaddr = skb;
3071	pkt_addr->paddr = paddr;
3072
3073	spin_lock_bh(&ar->data_lock);
3074	ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
3075			wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
3076	spin_unlock_bh(&ar->data_lock);
3077
3078	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
3079	return ret;
3080}
3081
3082static struct sk_buff *
3083ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
3084				   dma_addr_t paddr)
3085{
3086	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3087	struct wmi_tlv_mgmt_tx_cmd *cmd;
3088	struct ieee80211_hdr *hdr;
3089	struct ath10k_vif *arvif;
3090	u32 buf_len = msdu->len;
3091	struct wmi_tlv *tlv;
3092	struct sk_buff *skb;
3093	int len, desc_id;
3094	u32 vdev_id;
3095	void *ptr;
3096
3097	if (!cb->vif)
3098		return ERR_PTR(-EINVAL);
3099
3100	hdr = (struct ieee80211_hdr *)msdu->data;
3101	arvif = (void *)cb->vif->drv_priv;
3102	vdev_id = arvif->vdev_id;
3103
3104	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
3105			 (!(ieee80211_is_nullfunc(hdr->frame_control) ||
3106			 ieee80211_is_qos_nullfunc(hdr->frame_control)))))
3107		return ERR_PTR(-EINVAL);
3108
3109	len = sizeof(*cmd) + 2 * sizeof(*tlv);
3110
3111	if ((ieee80211_is_action(hdr->frame_control) ||
3112	     ieee80211_is_deauth(hdr->frame_control) ||
3113	     ieee80211_is_disassoc(hdr->frame_control)) &&
3114	     ieee80211_has_protected(hdr->frame_control)) {
3115		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
3116		buf_len += IEEE80211_CCMP_MIC_LEN;
3117	}
3118
3119	buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
3120	buf_len = round_up(buf_len, 4);
3121
3122	len += buf_len;
3123	len = round_up(len, 4);
3124	skb = ath10k_wmi_alloc_skb(ar, len);
3125	if (!skb)
3126		return ERR_PTR(-ENOMEM);
3127
3128	desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
3129	if (desc_id < 0)
3130		goto err_free_skb;
3131
3132	cb->msdu_id = desc_id;
3133
3134	ptr = (void *)skb->data;
3135	tlv = ptr;
3136	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
3137	tlv->len = __cpu_to_le16(sizeof(*cmd));
3138	cmd = (void *)tlv->value;
3139	cmd->vdev_id = __cpu_to_le32(vdev_id);
3140	cmd->desc_id = __cpu_to_le32(desc_id);
3141	cmd->chanfreq = 0;
3142	cmd->buf_len = __cpu_to_le32(buf_len);
3143	cmd->frame_len = __cpu_to_le32(msdu->len);
3144	cmd->paddr = __cpu_to_le64(paddr);
3145
3146	ptr += sizeof(*tlv);
3147	ptr += sizeof(*cmd);
3148
3149	tlv = ptr;
3150	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3151	tlv->len = __cpu_to_le16(buf_len);
3152
3153	ptr += sizeof(*tlv);
3154	memcpy(ptr, msdu->data, buf_len);
3155
3156	return skb;
3157
3158err_free_skb:
3159	dev_kfree_skb(skb);
3160	return ERR_PTR(desc_id);
3161}
3162
3163static struct sk_buff *
3164ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
3165				    enum wmi_force_fw_hang_type type,
3166				    u32 delay_ms)
3167{
3168	struct wmi_force_fw_hang_cmd *cmd;
3169	struct wmi_tlv *tlv;
3170	struct sk_buff *skb;
3171
3172	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3173	if (!skb)
3174		return ERR_PTR(-ENOMEM);
3175
3176	tlv = (void *)skb->data;
3177	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
3178	tlv->len = __cpu_to_le16(sizeof(*cmd));
3179	cmd = (void *)tlv->value;
3180	cmd->type = __cpu_to_le32(type);
3181	cmd->delay_ms = __cpu_to_le32(delay_ms);
3182
3183	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
3184	return skb;
3185}
3186
3187static struct sk_buff *
3188ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
3189				 u32 log_level)
3190{
3191	struct wmi_tlv_dbglog_cmd *cmd;
3192	struct wmi_tlv *tlv;
3193	struct sk_buff *skb;
3194	size_t len, bmap_len;
3195	u32 value;
3196	void *ptr;
3197
3198	if (module_enable) {
3199		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3200				module_enable,
3201				WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
3202	} else {
3203		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3204				WMI_TLV_DBGLOG_ALL_MODULES,
3205				WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
3206	}
3207
3208	bmap_len = 0;
3209	len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
3210	skb = ath10k_wmi_alloc_skb(ar, len);
3211	if (!skb)
3212		return ERR_PTR(-ENOMEM);
3213
3214	ptr = (void *)skb->data;
3215
3216	tlv = ptr;
3217	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
3218	tlv->len = __cpu_to_le16(sizeof(*cmd));
3219	cmd = (void *)tlv->value;
3220	cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
3221	cmd->value = __cpu_to_le32(value);
3222
3223	ptr += sizeof(*tlv);
3224	ptr += sizeof(*cmd);
3225
3226	tlv = ptr;
3227	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3228	tlv->len = __cpu_to_le16(bmap_len);
3229
3230	/* nothing to do here */
3231
3232	ptr += sizeof(*tlv);
3233	ptr += sizeof(bmap_len);
3234
3235	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
3236	return skb;
3237}
3238
3239static struct sk_buff *
3240ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
3241{
3242	struct wmi_tlv_pktlog_enable *cmd;
3243	struct wmi_tlv *tlv;
3244	struct sk_buff *skb;
3245	void *ptr;
3246	size_t len;
3247
3248	len = sizeof(*tlv) + sizeof(*cmd);
3249	skb = ath10k_wmi_alloc_skb(ar, len);
3250	if (!skb)
3251		return ERR_PTR(-ENOMEM);
3252
3253	ptr = (void *)skb->data;
3254	tlv = ptr;
3255	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
3256	tlv->len = __cpu_to_le16(sizeof(*cmd));
3257	cmd = (void *)tlv->value;
3258	cmd->filter = __cpu_to_le32(filter);
3259
3260	ptr += sizeof(*tlv);
3261	ptr += sizeof(*cmd);
3262
3263	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
3264		   filter);
3265	return skb;
3266}
3267
3268static struct sk_buff *
3269ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar)
3270{
3271	struct wmi_tlv_pdev_get_temp_cmd *cmd;
3272	struct wmi_tlv *tlv;
3273	struct sk_buff *skb;
3274
3275	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3276	if (!skb)
3277		return ERR_PTR(-ENOMEM);
3278
3279	tlv = (void *)skb->data;
3280	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD);
3281	tlv->len = __cpu_to_le16(sizeof(*cmd));
3282	cmd = (void *)tlv->value;
3283	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n");
3284	return skb;
3285}
3286
3287static struct sk_buff *
3288ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
3289{
3290	struct wmi_tlv_pktlog_disable *cmd;
3291	struct wmi_tlv *tlv;
3292	struct sk_buff *skb;
3293	void *ptr;
3294	size_t len;
3295
3296	len = sizeof(*tlv) + sizeof(*cmd);
3297	skb = ath10k_wmi_alloc_skb(ar, len);
3298	if (!skb)
3299		return ERR_PTR(-ENOMEM);
3300
3301	ptr = (void *)skb->data;
3302	tlv = ptr;
3303	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
3304	tlv->len = __cpu_to_le16(sizeof(*cmd));
3305	cmd = (void *)tlv->value;
3306
3307	ptr += sizeof(*tlv);
3308	ptr += sizeof(*cmd);
3309
3310	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
3311	return skb;
3312}
3313
3314static struct sk_buff *
3315ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
3316			       u32 tim_ie_offset, struct sk_buff *bcn,
3317			       u32 prb_caps, u32 prb_erp, void *prb_ies,
3318			       size_t prb_ies_len)
3319{
3320	struct wmi_tlv_bcn_tmpl_cmd *cmd;
3321	struct wmi_tlv_bcn_prb_info *info;
3322	struct wmi_tlv *tlv;
3323	struct sk_buff *skb;
3324	void *ptr;
3325	size_t len;
3326
3327	if (WARN_ON(prb_ies_len > 0 && !prb_ies))
3328		return ERR_PTR(-EINVAL);
3329
3330	len = sizeof(*tlv) + sizeof(*cmd) +
3331	      sizeof(*tlv) + sizeof(*info) + prb_ies_len +
3332	      sizeof(*tlv) + roundup(bcn->len, 4);
3333	skb = ath10k_wmi_alloc_skb(ar, len);
3334	if (!skb)
3335		return ERR_PTR(-ENOMEM);
3336
3337	ptr = (void *)skb->data;
3338	tlv = ptr;
3339	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
3340	tlv->len = __cpu_to_le16(sizeof(*cmd));
3341	cmd = (void *)tlv->value;
3342	cmd->vdev_id = __cpu_to_le32(vdev_id);
3343	cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
3344	cmd->buf_len = __cpu_to_le32(bcn->len);
3345
3346	ptr += sizeof(*tlv);
3347	ptr += sizeof(*cmd);
3348
3349	/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
3350	 * then it is then impossible to pass original ie len.
3351	 * This chunk is not used yet so if setting probe resp template yields
3352	 * problems with beaconing or crashes firmware look here.
3353	 */
3354	tlv = ptr;
3355	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3356	tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
3357	info = (void *)tlv->value;
3358	info->caps = __cpu_to_le32(prb_caps);
3359	info->erp = __cpu_to_le32(prb_erp);
3360	memcpy(info->ies, prb_ies, prb_ies_len);
3361
3362	ptr += sizeof(*tlv);
3363	ptr += sizeof(*info);
3364	ptr += prb_ies_len;
3365
3366	tlv = ptr;
3367	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3368	tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
3369	memcpy(tlv->value, bcn->data, bcn->len);
3370
3371	/* FIXME: Adjust TSF? */
3372
3373	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
3374		   vdev_id);
3375	return skb;
3376}
3377
3378static struct sk_buff *
3379ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
3380			       struct sk_buff *prb)
3381{
3382	struct wmi_tlv_prb_tmpl_cmd *cmd;
3383	struct wmi_tlv_bcn_prb_info *info;
3384	struct wmi_tlv *tlv;
3385	struct sk_buff *skb;
3386	void *ptr;
3387	size_t len;
3388
3389	len = sizeof(*tlv) + sizeof(*cmd) +
3390	      sizeof(*tlv) + sizeof(*info) +
3391	      sizeof(*tlv) + roundup(prb->len, 4);
3392	skb = ath10k_wmi_alloc_skb(ar, len);
3393	if (!skb)
3394		return ERR_PTR(-ENOMEM);
3395
3396	ptr = (void *)skb->data;
3397	tlv = ptr;
3398	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
3399	tlv->len = __cpu_to_le16(sizeof(*cmd));
3400	cmd = (void *)tlv->value;
3401	cmd->vdev_id = __cpu_to_le32(vdev_id);
3402	cmd->buf_len = __cpu_to_le32(prb->len);
3403
3404	ptr += sizeof(*tlv);
3405	ptr += sizeof(*cmd);
3406
3407	tlv = ptr;
3408	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3409	tlv->len = __cpu_to_le16(sizeof(*info));
3410	info = (void *)tlv->value;
3411	info->caps = 0;
3412	info->erp = 0;
3413
3414	ptr += sizeof(*tlv);
3415	ptr += sizeof(*info);
3416
3417	tlv = ptr;
3418	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3419	tlv->len = __cpu_to_le16(roundup(prb->len, 4));
3420	memcpy(tlv->value, prb->data, prb->len);
3421
3422	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
3423		   vdev_id);
3424	return skb;
3425}
3426
3427static struct sk_buff *
3428ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
3429				    const u8 *p2p_ie)
3430{
3431	struct wmi_tlv_p2p_go_bcn_ie *cmd;
3432	struct wmi_tlv *tlv;
3433	struct sk_buff *skb;
3434	void *ptr;
3435	size_t len;
3436
3437	len = sizeof(*tlv) + sizeof(*cmd) +
3438	      sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
3439	skb = ath10k_wmi_alloc_skb(ar, len);
3440	if (!skb)
3441		return ERR_PTR(-ENOMEM);
3442
3443	ptr = (void *)skb->data;
3444	tlv = ptr;
3445	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
3446	tlv->len = __cpu_to_le16(sizeof(*cmd));
3447	cmd = (void *)tlv->value;
3448	cmd->vdev_id = __cpu_to_le32(vdev_id);
3449	cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
3450
3451	ptr += sizeof(*tlv);
3452	ptr += sizeof(*cmd);
3453
3454	tlv = ptr;
3455	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3456	tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
3457	memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
3458
3459	ptr += sizeof(*tlv);
3460	ptr += roundup(p2p_ie[1] + 2, 4);
3461
3462	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
3463		   vdev_id);
3464	return skb;
3465}
3466
3467static struct sk_buff *
3468ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
3469					   enum wmi_tdls_state state)
3470{
3471	struct wmi_tdls_set_state_cmd *cmd;
3472	struct wmi_tlv *tlv;
3473	struct sk_buff *skb;
3474	void *ptr;
3475	size_t len;
3476	/* Set to options from wmi_tlv_tdls_options,
3477	 * for now none of them are enabled.
3478	 */
3479	u32 options = 0;
3480
3481	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
3482		options |=  WMI_TLV_TDLS_BUFFER_STA_EN;
3483
3484	/* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS
3485	 * link inactivity detecting logic.
3486	 */
3487	if (state == WMI_TDLS_ENABLE_ACTIVE)
3488		state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL;
3489
3490	len = sizeof(*tlv) + sizeof(*cmd);
3491	skb = ath10k_wmi_alloc_skb(ar, len);
3492	if (!skb)
3493		return ERR_PTR(-ENOMEM);
3494
3495	ptr = (void *)skb->data;
3496	tlv = ptr;
3497	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
3498	tlv->len = __cpu_to_le16(sizeof(*cmd));
3499
3500	cmd = (void *)tlv->value;
3501	cmd->vdev_id = __cpu_to_le32(vdev_id);
3502	cmd->state = __cpu_to_le32(state);
3503	cmd->notification_interval_ms = __cpu_to_le32(5000);
3504	cmd->tx_discovery_threshold = __cpu_to_le32(100);
3505	cmd->tx_teardown_threshold = __cpu_to_le32(5);
3506	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
3507	cmd->rssi_delta = __cpu_to_le32(-20);
3508	cmd->tdls_options = __cpu_to_le32(options);
3509	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
3510	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
3511	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
3512	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
3513	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
3514
3515	ptr += sizeof(*tlv);
3516	ptr += sizeof(*cmd);
3517
3518	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
3519		   state, vdev_id);
3520	return skb;
3521}
3522
3523static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
3524{
3525	u32 peer_qos = 0;
3526
3527	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
3528		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
3529	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
3530		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
3531	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
3532		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
3533	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
3534		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
3535
3536	peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
3537
3538	return peer_qos;
3539}
3540
3541static struct sk_buff *
3542ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
3543				       const struct wmi_tdls_peer_update_cmd_arg *arg,
3544				       const struct wmi_tdls_peer_capab_arg *cap,
3545				       const struct wmi_channel_arg *chan_arg)
3546{
3547	struct wmi_tdls_peer_update_cmd *cmd;
3548	struct wmi_tdls_peer_capab *peer_cap;
3549	struct wmi_channel *chan;
3550	struct wmi_tlv *tlv;
3551	struct sk_buff *skb;
3552	u32 peer_qos;
3553	void *ptr;
3554	int len;
3555	int i;
3556
3557	len = sizeof(*tlv) + sizeof(*cmd) +
3558	      sizeof(*tlv) + sizeof(*peer_cap) +
3559	      sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
3560
3561	skb = ath10k_wmi_alloc_skb(ar, len);
3562	if (!skb)
3563		return ERR_PTR(-ENOMEM);
3564
3565	ptr = (void *)skb->data;
3566	tlv = ptr;
3567	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
3568	tlv->len = __cpu_to_le16(sizeof(*cmd));
3569
3570	cmd = (void *)tlv->value;
3571	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
3572	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
3573	cmd->peer_state = __cpu_to_le32(arg->peer_state);
3574
3575	ptr += sizeof(*tlv);
3576	ptr += sizeof(*cmd);
3577
3578	tlv = ptr;
3579	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
3580	tlv->len = __cpu_to_le16(sizeof(*peer_cap));
3581	peer_cap = (void *)tlv->value;
3582	peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
3583						   cap->peer_max_sp);
3584	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
3585	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
3586	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
3587	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
3588	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
3589	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
3590	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
3591
3592	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
3593		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
3594
3595	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
3596	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
3597	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
3598
3599	ptr += sizeof(*tlv);
3600	ptr += sizeof(*peer_cap);
3601
3602	tlv = ptr;
3603	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3604	tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
3605
3606	ptr += sizeof(*tlv);
3607
3608	for (i = 0; i < cap->peer_chan_len; i++) {
3609		tlv = ptr;
3610		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
3611		tlv->len = __cpu_to_le16(sizeof(*chan));
3612		chan = (void *)tlv->value;
3613		ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
3614
3615		ptr += sizeof(*tlv);
3616		ptr += sizeof(*chan);
3617	}
3618
3619	ath10k_dbg(ar, ATH10K_DBG_WMI,
3620		   "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
3621		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
3622	return skb;
3623}
3624
3625static struct sk_buff *
3626ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
3627					  u32 duration, u32 next_offset,
3628					  u32 enabled)
3629{
3630	struct wmi_tlv_set_quiet_cmd *cmd;
3631	struct wmi_tlv *tlv;
3632	struct sk_buff *skb;
3633
3634	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3635	if (!skb)
3636		return ERR_PTR(-ENOMEM);
3637
3638	tlv = (void *)skb->data;
3639	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD);
3640	tlv->len = __cpu_to_le16(sizeof(*cmd));
3641	cmd = (void *)tlv->value;
3642
3643	/* vdev_id is not in use, set to 0 */
3644	cmd->vdev_id = __cpu_to_le32(0);
3645	cmd->period = __cpu_to_le32(period);
3646	cmd->duration = __cpu_to_le32(duration);
3647	cmd->next_start = __cpu_to_le32(next_offset);
3648	cmd->enabled = __cpu_to_le32(enabled);
3649
3650	ath10k_dbg(ar, ATH10K_DBG_WMI,
3651		   "wmi tlv quiet param: period %u duration %u enabled %d\n",
3652		   period, duration, enabled);
3653	return skb;
3654}
3655
3656static struct sk_buff *
3657ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
3658{
3659	struct wmi_tlv_wow_enable_cmd *cmd;
3660	struct wmi_tlv *tlv;
3661	struct sk_buff *skb;
3662	size_t len;
3663
3664	len = sizeof(*tlv) + sizeof(*cmd);
3665	skb = ath10k_wmi_alloc_skb(ar, len);
3666	if (!skb)
3667		return ERR_PTR(-ENOMEM);
3668
3669	tlv = (struct wmi_tlv *)skb->data;
3670	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
3671	tlv->len = __cpu_to_le16(sizeof(*cmd));
3672	cmd = (void *)tlv->value;
3673
3674	cmd->enable = __cpu_to_le32(1);
3675	if (!ar->bus_param.link_can_suspend)
3676		cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED);
3677
3678	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
3679	return skb;
3680}
3681
3682static struct sk_buff *
3683ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
3684					   u32 vdev_id,
3685					   enum wmi_wow_wakeup_event event,
3686					   u32 enable)
3687{
3688	struct wmi_tlv_wow_add_del_event_cmd *cmd;
3689	struct wmi_tlv *tlv;
3690	struct sk_buff *skb;
3691	size_t len;
3692
3693	len = sizeof(*tlv) + sizeof(*cmd);
3694	skb = ath10k_wmi_alloc_skb(ar, len);
3695	if (!skb)
3696		return ERR_PTR(-ENOMEM);
3697
3698	tlv = (struct wmi_tlv *)skb->data;
3699	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
3700	tlv->len = __cpu_to_le16(sizeof(*cmd));
3701	cmd = (void *)tlv->value;
3702
3703	cmd->vdev_id = __cpu_to_le32(vdev_id);
3704	cmd->is_add = __cpu_to_le32(enable);
3705	cmd->event_bitmap = __cpu_to_le32(1 << event);
3706
3707	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
3708		   wow_wakeup_event(event), enable, vdev_id);
3709	return skb;
3710}
3711
3712static struct sk_buff *
3713ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
3714{
3715	struct wmi_tlv_wow_host_wakeup_ind *cmd;
3716	struct wmi_tlv *tlv;
3717	struct sk_buff *skb;
3718	size_t len;
3719
3720	len = sizeof(*tlv) + sizeof(*cmd);
3721	skb = ath10k_wmi_alloc_skb(ar, len);
3722	if (!skb)
3723		return ERR_PTR(-ENOMEM);
3724
3725	tlv = (struct wmi_tlv *)skb->data;
3726	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
3727	tlv->len = __cpu_to_le16(sizeof(*cmd));
3728	cmd = (void *)tlv->value;
3729
3730	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
3731	return skb;
3732}
3733
3734static struct sk_buff *
3735ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
3736				      u32 pattern_id, const u8 *pattern,
3737				      const u8 *bitmask, int pattern_len,
3738				      int pattern_offset)
3739{
3740	struct wmi_tlv_wow_add_pattern_cmd *cmd;
3741	struct wmi_tlv_wow_bitmap_pattern *bitmap;
3742	struct wmi_tlv *tlv;
3743	struct sk_buff *skb;
3744	void *ptr;
3745	size_t len;
3746
3747	len = sizeof(*tlv) + sizeof(*cmd) +
3748	      sizeof(*tlv) +			/* array struct */
3749	      sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
3750	      sizeof(*tlv) +			/* empty ipv4 sync */
3751	      sizeof(*tlv) +			/* empty ipv6 sync */
3752	      sizeof(*tlv) +			/* empty magic */
3753	      sizeof(*tlv) +			/* empty info timeout */
3754	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
3755
3756	skb = ath10k_wmi_alloc_skb(ar, len);
3757	if (!skb)
3758		return ERR_PTR(-ENOMEM);
3759
3760	/* cmd */
3761	ptr = (void *)skb->data;
3762	tlv = ptr;
3763	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
3764	tlv->len = __cpu_to_le16(sizeof(*cmd));
3765	cmd = (void *)tlv->value;
3766
3767	cmd->vdev_id = __cpu_to_le32(vdev_id);
3768	cmd->pattern_id = __cpu_to_le32(pattern_id);
3769	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3770
3771	ptr += sizeof(*tlv);
3772	ptr += sizeof(*cmd);
3773
3774	/* bitmap */
3775	tlv = ptr;
3776	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3777	tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
3778
3779	ptr += sizeof(*tlv);
3780
3781	tlv = ptr;
3782	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
3783	tlv->len = __cpu_to_le16(sizeof(*bitmap));
3784	bitmap = (void *)tlv->value;
3785
3786	memcpy(bitmap->patternbuf, pattern, pattern_len);
3787	memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
3788	bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
3789	bitmap->pattern_len = __cpu_to_le32(pattern_len);
3790	bitmap->bitmask_len = __cpu_to_le32(pattern_len);
3791	bitmap->pattern_id = __cpu_to_le32(pattern_id);
3792
3793	ptr += sizeof(*tlv);
3794	ptr += sizeof(*bitmap);
3795
3796	/* ipv4 sync */
3797	tlv = ptr;
3798	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3799	tlv->len = __cpu_to_le16(0);
3800
3801	ptr += sizeof(*tlv);
3802
3803	/* ipv6 sync */
3804	tlv = ptr;
3805	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3806	tlv->len = __cpu_to_le16(0);
3807
3808	ptr += sizeof(*tlv);
3809
3810	/* magic */
3811	tlv = ptr;
3812	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3813	tlv->len = __cpu_to_le16(0);
3814
3815	ptr += sizeof(*tlv);
3816
3817	/* pattern info timeout */
3818	tlv = ptr;
3819	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3820	tlv->len = __cpu_to_le16(0);
3821
3822	ptr += sizeof(*tlv);
3823
3824	/* ratelimit interval */
3825	tlv = ptr;
3826	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3827	tlv->len = __cpu_to_le16(sizeof(u32));
3828
3829	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
3830		   vdev_id, pattern_id, pattern_offset);
3831	return skb;
3832}
3833
3834static struct sk_buff *
3835ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
3836				      u32 pattern_id)
3837{
3838	struct wmi_tlv_wow_del_pattern_cmd *cmd;
3839	struct wmi_tlv *tlv;
3840	struct sk_buff *skb;
3841	size_t len;
3842
3843	len = sizeof(*tlv) + sizeof(*cmd);
3844	skb = ath10k_wmi_alloc_skb(ar, len);
3845	if (!skb)
3846		return ERR_PTR(-ENOMEM);
3847
3848	tlv = (struct wmi_tlv *)skb->data;
3849	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
3850	tlv->len = __cpu_to_le16(sizeof(*cmd));
3851	cmd = (void *)tlv->value;
3852
3853	cmd->vdev_id = __cpu_to_le32(vdev_id);
3854	cmd->pattern_id = __cpu_to_le32(pattern_id);
3855	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3856
3857	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
3858		   vdev_id, pattern_id);
3859	return skb;
3860}
3861
3862/* Request FW to start PNO operation */
3863static struct sk_buff *
3864ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
3865				       u32 vdev_id,
3866				       struct wmi_pno_scan_req *pno)
3867{
3868	struct nlo_configured_parameters *nlo_list;
3869	struct wmi_tlv_wow_nlo_config_cmd *cmd;
3870	struct wmi_tlv *tlv;
3871	struct sk_buff *skb;
3872	__le32 *channel_list;
3873	u16 tlv_len;
3874	size_t len;
3875	void *ptr;
3876	u32 i;
3877
3878	len = sizeof(*tlv) + sizeof(*cmd) +
3879	      sizeof(*tlv) +
3880	      /* TLV place holder for array of structures
3881	       * nlo_configured_parameters(nlo_list)
3882	       */
3883	      sizeof(*tlv);
3884	      /* TLV place holder for array of uint32 channel_list */
3885
3886	len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
3887				   WMI_NLO_MAX_CHAN);
3888	len += sizeof(struct nlo_configured_parameters) *
3889				min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
3890
3891	skb = ath10k_wmi_alloc_skb(ar, len);
3892	if (!skb)
3893		return ERR_PTR(-ENOMEM);
3894
3895	ptr = (void *)skb->data;
3896	tlv = ptr;
3897	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
3898	tlv->len = __cpu_to_le16(sizeof(*cmd));
3899	cmd = (void *)tlv->value;
3900
3901	/* wmi_tlv_wow_nlo_config_cmd parameters*/
3902	cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
3903	cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
3904
3905	/* current FW does not support min-max range for dwell time */
3906	cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
3907	cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
3908
3909	if (pno->do_passive_scan)
3910		cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
3911
3912	/* copy scan interval */
3913	cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
3914	cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
3915	cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
3916	cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
3917
3918	if (pno->enable_pno_scan_randomization) {
3919		cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
3920				WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
3921		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
3922		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
3923	}
3924
3925	ptr += sizeof(*tlv);
3926	ptr += sizeof(*cmd);
3927
3928	/* nlo_configured_parameters(nlo_list) */
3929	cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
3930					       WMI_NLO_MAX_SSIDS));
3931	tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
3932		sizeof(struct nlo_configured_parameters);
3933
3934	tlv = ptr;
3935	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3936	tlv->len = __cpu_to_le16(tlv_len);
3937
3938	ptr += sizeof(*tlv);
3939	nlo_list = ptr;
3940	for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
3941		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
3942		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3943		tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
3944					 sizeof(*tlv));
3945
3946		/* copy ssid and it's length */
3947		nlo_list[i].ssid.valid = __cpu_to_le32(true);
3948		nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
3949		memcpy(nlo_list[i].ssid.ssid.ssid,
3950		       pno->a_networks[i].ssid.ssid,
3951		       __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
3952
3953		/* copy rssi threshold */
3954		if (pno->a_networks[i].rssi_threshold &&
3955		    pno->a_networks[i].rssi_threshold > -300) {
3956			nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
3957			nlo_list[i].rssi_cond.rssi =
3958				__cpu_to_le32(pno->a_networks[i].rssi_threshold);
3959		}
3960
3961		nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
3962		nlo_list[i].bcast_nw_type.bcast_nw_type =
3963			__cpu_to_le32(pno->a_networks[i].bcast_nw_type);
3964	}
3965
3966	ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
3967
3968	/* copy channel info */
3969	cmd->num_of_channels = __cpu_to_le32(min_t(u8,
3970						   pno->a_networks[0].channel_count,
3971						   WMI_NLO_MAX_CHAN));
3972
3973	tlv = ptr;
3974	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3975	tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
3976				 sizeof(u_int32_t));
3977	ptr += sizeof(*tlv);
3978
3979	channel_list = (__le32 *)ptr;
3980	for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
3981		channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
3982
3983	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
3984		   vdev_id);
3985
3986	return skb;
3987}
3988
3989/* Request FW to stop ongoing PNO operation */
3990static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
3991							     u32 vdev_id)
3992{
3993	struct wmi_tlv_wow_nlo_config_cmd *cmd;
3994	struct wmi_tlv *tlv;
3995	struct sk_buff *skb;
3996	void *ptr;
3997	size_t len;
3998
3999	len = sizeof(*tlv) + sizeof(*cmd) +
4000	      sizeof(*tlv) +
4001	      /* TLV place holder for array of structures
4002	       * nlo_configured_parameters(nlo_list)
4003	       */
4004	      sizeof(*tlv);
4005	      /* TLV place holder for array of uint32 channel_list */
4006	skb = ath10k_wmi_alloc_skb(ar, len);
4007	if (!skb)
4008		return ERR_PTR(-ENOMEM);
4009
4010	ptr = (void *)skb->data;
4011	tlv = ptr;
4012	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
4013	tlv->len = __cpu_to_le16(sizeof(*cmd));
4014	cmd = (void *)tlv->value;
4015
4016	cmd->vdev_id = __cpu_to_le32(vdev_id);
4017	cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
4018
4019	ptr += sizeof(*tlv);
4020	ptr += sizeof(*cmd);
4021
4022	/* nlo_configured_parameters(nlo_list) */
4023	tlv = ptr;
4024	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
4025	tlv->len = __cpu_to_le16(0);
4026
4027	ptr += sizeof(*tlv);
4028
4029	/* channel list */
4030	tlv = ptr;
4031	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
4032	tlv->len = __cpu_to_le16(0);
4033
4034	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
4035	return skb;
4036}
4037
4038static struct sk_buff *
4039ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
4040				 struct wmi_pno_scan_req *pno_scan)
4041{
4042	if (pno_scan->enable)
4043		return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
4044	else
4045		return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
4046}
4047
4048static struct sk_buff *
4049ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
4050{
4051	struct wmi_tlv_adaptive_qcs *cmd;
4052	struct wmi_tlv *tlv;
4053	struct sk_buff *skb;
4054	void *ptr;
4055	size_t len;
4056
4057	len = sizeof(*tlv) + sizeof(*cmd);
4058	skb = ath10k_wmi_alloc_skb(ar, len);
4059	if (!skb)
4060		return ERR_PTR(-ENOMEM);
4061
4062	ptr = (void *)skb->data;
4063	tlv = ptr;
4064	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
4065	tlv->len = __cpu_to_le16(sizeof(*cmd));
4066	cmd = (void *)tlv->value;
4067	cmd->enable = __cpu_to_le32(enable ? 1 : 0);
4068
4069	ptr += sizeof(*tlv);
4070	ptr += sizeof(*cmd);
4071
4072	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
4073	return skb;
4074}
4075
4076static struct sk_buff *
4077ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
4078{
4079	struct wmi_echo_cmd *cmd;
4080	struct wmi_tlv *tlv;
4081	struct sk_buff *skb;
4082	void *ptr;
4083	size_t len;
4084
4085	len = sizeof(*tlv) + sizeof(*cmd);
4086	skb = ath10k_wmi_alloc_skb(ar, len);
4087	if (!skb)
4088		return ERR_PTR(-ENOMEM);
4089
4090	ptr = (void *)skb->data;
4091	tlv = ptr;
4092	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
4093	tlv->len = __cpu_to_le16(sizeof(*cmd));
4094	cmd = (void *)tlv->value;
4095	cmd->value = cpu_to_le32(value);
4096
4097	ptr += sizeof(*tlv);
4098	ptr += sizeof(*cmd);
4099
4100	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
4101	return skb;
4102}
4103
4104static struct sk_buff *
4105ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
4106					 const struct wmi_vdev_spectral_conf_arg *arg)
4107{
4108	struct wmi_vdev_spectral_conf_cmd *cmd;
4109	struct sk_buff *skb;
4110	struct wmi_tlv *tlv;
4111	void *ptr;
4112	size_t len;
4113
4114	len = sizeof(*tlv) + sizeof(*cmd);
4115	skb = ath10k_wmi_alloc_skb(ar, len);
4116	if (!skb)
4117		return ERR_PTR(-ENOMEM);
4118
4119	ptr = (void *)skb->data;
4120	tlv = ptr;
4121	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
4122	tlv->len = __cpu_to_le16(sizeof(*cmd));
4123	cmd = (void *)tlv->value;
4124	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
4125	cmd->scan_count = __cpu_to_le32(arg->scan_count);
4126	cmd->scan_period = __cpu_to_le32(arg->scan_period);
4127	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
4128	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
4129	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
4130	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
4131	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
4132	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
4133	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
4134	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
4135	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
4136	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
4137	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
4138	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
4139	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
4140	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
4141	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
4142	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
4143
4144	return skb;
4145}
4146
4147static struct sk_buff *
4148ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
4149					   u32 trigger, u32 enable)
4150{
4151	struct wmi_vdev_spectral_enable_cmd *cmd;
4152	struct sk_buff *skb;
4153	struct wmi_tlv *tlv;
4154	void *ptr;
4155	size_t len;
4156
4157	len = sizeof(*tlv) + sizeof(*cmd);
4158	skb = ath10k_wmi_alloc_skb(ar, len);
4159	if (!skb)
4160		return ERR_PTR(-ENOMEM);
4161
4162	ptr = (void *)skb->data;
4163	tlv = ptr;
4164	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
4165	tlv->len = __cpu_to_le16(sizeof(*cmd));
4166	cmd = (void *)tlv->value;
4167	cmd->vdev_id = __cpu_to_le32(vdev_id);
4168	cmd->trigger_cmd = __cpu_to_le32(trigger);
4169	cmd->enable_cmd = __cpu_to_le32(enable);
4170
4171	return skb;
4172}
4173
4174/****************/
4175/* TLV mappings */
4176/****************/
4177
4178static struct wmi_cmd_map wmi_tlv_cmd_map = {
4179	.init_cmdid = WMI_TLV_INIT_CMDID,
4180	.start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
4181	.stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
4182	.scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
4183	.scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
4184	.scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
4185	.pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
4186	.pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
4187	.pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
4188	.pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
4189	.pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
4190	.pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
4191	.pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
4192	.pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
4193	.pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
4194	.pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
4195	.pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
4196	.pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
4197	.pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
4198	.vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
4199	.vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
4200	.vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
4201	.vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
4202	.vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
4203	.vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
4204	.vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
4205	.vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
4206	.vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
4207	.peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
4208	.peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
4209	.peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
4210	.peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
4211	.peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
4212	.peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
4213	.peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
4214	.peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
4215	.bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
4216	.pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
4217	.bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
4218	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
4219	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
4220	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
4221	.mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
4222	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
4223	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
4224	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
4225	.addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
4226	.delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
4227	.addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
4228	.send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
4229	.sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
4230	.sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
4231	.sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
4232	.pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
4233	.pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
4234	.roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
4235	.roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
4236	.roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
4237	.roam_scan_rssi_change_threshold =
4238				WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
4239	.roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4240	.ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4241	.ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
4242	.ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
4243	.p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
4244	.p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
4245	.p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
4246	.p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
4247	.p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
4248	.ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
4249	.ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
4250	.peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
4251	.wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
4252	.wlan_profile_set_hist_intvl_cmdid =
4253				WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
4254	.wlan_profile_get_profile_data_cmdid =
4255				WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
4256	.wlan_profile_enable_profile_id_cmdid =
4257				WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
4258	.wlan_profile_list_profile_id_cmdid =
4259				WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
4260	.pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
4261	.pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
4262	.add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
4263	.rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
4264	.wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
4265	.wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
4266	.wow_enable_disable_wake_event_cmdid =
4267				WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
4268	.wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
4269	.wow_hostwakeup_from_sleep_cmdid =
4270				WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
4271	.rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
4272	.rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
4273	.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
4274	.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
4275	.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
4276	.request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
4277	.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
4278	.network_list_offload_config_cmdid =
4279				WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
4280	.gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
4281	.csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
4282	.csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
4283	.chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
4284	.peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
4285	.peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
4286	.sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
4287	.sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
4288	.sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
4289	.echo_cmdid = WMI_TLV_ECHO_CMDID,
4290	.pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
4291	.dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
4292	.pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
4293	.pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
4294	.vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
4295	.vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
4296	.force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
4297	.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
4298	.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
4299	.pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
4300	.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
4301	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
4302	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
4303	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
4304	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
4305	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
4306	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
4307	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
4308	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
4309	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
4310	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
4311	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
4312	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
4313	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
4314	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
4315	.nan_cmdid = WMI_CMD_UNSUPPORTED,
4316	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
4317	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
4318	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
4319	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4320	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4321	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
4322	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
4323	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
4324	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
4325	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
4326	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
4327	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
4328	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
4329	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
4330	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
4331	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4332	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4333	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
4334	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
4335	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
4336};
4337
4338static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
4339	.tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
4340	.rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
4341	.txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
4342	.txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
4343	.txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
4344	.beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
4345	.beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
4346	.resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
4347	.protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
4348	.dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
4349	.non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
4350	.agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
4351	.sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
4352	.ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
4353	.ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
4354	.ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
4355	.ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
4356	.ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
4357	.ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
4358	.ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
4359	.ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
4360	.ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
4361	.ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
4362	.l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
4363	.dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
4364	.pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
4365	.pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4366	.pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4367	.pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
4368	.pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
4369	.vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
4370	.peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
4371	.bcnflt_stats_update_period =
4372				WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
4373	.pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
4374	.arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
4375	.dcs = WMI_TLV_PDEV_PARAM_DCS,
4376	.ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
4377	.ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
4378	.ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
4379	.ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
4380	.ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
4381	.dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
4382	.proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
4383	.idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
4384	.power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
4385	.fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
4386	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
4387	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
4388	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
4389	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
4390	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4391	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
4392	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
4393	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4394	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
4395	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
4396	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4397	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4398	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4399	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4400	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4401	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4402	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
4403	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
4404	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4405	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4406	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4407	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4408	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4409	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4410	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
4411	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
4412	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
4413	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4414	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4415	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
4416	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
4417	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
4418	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
4419	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
4420	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
4421	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
4422	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
4423	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
4424	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
4425	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4426	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
4427	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
4428	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
4429	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4430	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4431	.rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
4432	.rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
4433	.peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
4434};
4435
4436static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
4437	.smps_state = WMI_TLV_PEER_SMPS_STATE,
4438	.ampdu = WMI_TLV_PEER_AMPDU,
4439	.authorize = WMI_TLV_PEER_AUTHORIZE,
4440	.chan_width = WMI_TLV_PEER_CHAN_WIDTH,
4441	.nss = WMI_TLV_PEER_NSS,
4442	.use_4addr = WMI_TLV_PEER_USE_4ADDR,
4443	.membership = WMI_TLV_PEER_MEMBERSHIP,
4444	.user_pos = WMI_TLV_PEER_USERPOS,
4445	.crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
4446	.tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
4447	.set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
4448	.ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
4449	.phymode = WMI_TLV_PEER_PHYMODE,
4450	.use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
4451	.dummy_var = WMI_TLV_PEER_DUMMY_VAR,
4452};
4453
4454static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
4455	.rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
4456	.fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
4457	.beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
4458	.listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
4459	.multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
4460	.mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
4461	.slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
4462	.preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
4463	.swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
4464	.wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
4465	.wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
4466	.wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
4467	.dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
4468	.wmi_vdev_oc_scheduler_air_time_limit =
4469				WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
4470	.wds = WMI_TLV_VDEV_PARAM_WDS,
4471	.atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
4472	.bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
4473	.bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
4474	.bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
4475	.feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
4476	.chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
4477	.chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
4478	.disable_htprotection =	WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
4479	.sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
4480	.mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
4481	.protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
4482	.fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
4483	.sgi = WMI_TLV_VDEV_PARAM_SGI,
4484	.ldpc = WMI_TLV_VDEV_PARAM_LDPC,
4485	.tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
4486	.rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
4487	.intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
4488	.def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
4489	.nss = WMI_TLV_VDEV_PARAM_NSS,
4490	.bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
4491	.mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
4492	.mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
4493	.dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
4494	.unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
4495	.ap_keepalive_min_idle_inactive_time_secs =
4496		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
4497	.ap_keepalive_max_idle_inactive_time_secs =
4498		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
4499	.ap_keepalive_max_unresponsive_time_secs =
4500		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
4501	.ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
4502	.mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4503	.enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
4504	.txbf = WMI_TLV_VDEV_PARAM_TXBF,
4505	.packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
4506	.drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
4507	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
4508	.ap_detect_out_of_sync_sleeping_sta_time_secs =
4509					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4510	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
4511	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
4512	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
4513	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
4514	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
4515	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4516	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
4517	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
4518	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
4519	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
4520	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
4521	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
4522	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
4523	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
4524	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
4525	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4526};
4527
4528static const struct wmi_ops wmi_tlv_ops = {
4529	.rx = ath10k_wmi_tlv_op_rx,
4530	.map_svc = wmi_tlv_svc_map,
4531	.map_svc_ext = wmi_tlv_svc_map_ext,
4532
4533	.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
4534	.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
4535	.pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
4536	.pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev,
4537	.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
4538	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
4539	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
4540	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
4541	.pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
4542	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
4543	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
4544	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
4545	.pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
4546	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
4547	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
4548	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
4549	.pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
4550	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
4551
4552	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
4553	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
4554	.gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
4555	.gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
4556	.gen_init = ath10k_wmi_tlv_op_gen_init,
4557	.gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
4558	.gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
4559	.gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
4560	.gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
4561	.gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
4562	.gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
4563	.gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
4564	.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
4565	.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
4566	.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
4567	.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
4568	.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
4569	.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
4570	.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
4571	.gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
4572	.gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
4573	.gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
4574	.gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
4575	.gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
4576	.gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
4577	.gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
4578	.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
4579	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
4580	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
4581	.gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
4582	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
4583	/* .gen_mgmt_tx = not implemented; HTT is used */
4584	.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
4585	.cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
4586	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
4587	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
4588	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
4589	.gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode,
4590	.gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature,
4591	/* .gen_addba_clear_resp not implemented */
4592	/* .gen_addba_send not implemented */
4593	/* .gen_addba_set_resp not implemented */
4594	/* .gen_delba_send not implemented */
4595	.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
4596	.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
4597	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
4598	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
4599	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
4600	.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
4601	.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
4602	.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
4603	.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
4604	.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
4605	.gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
4606	.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
4607	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
4608	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
4609	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
4610	.get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype,
4611	.gen_echo = ath10k_wmi_tlv_op_gen_echo,
4612	.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
4613	.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
4614	/* .gen_gpio_config not implemented */
4615	/* .gen_gpio_output not implemented */
4616};
4617
4618static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
4619	.auth = WMI_TLV_PEER_AUTH,
4620	.qos = WMI_TLV_PEER_QOS,
4621	.need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
4622	.need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
4623	.apsd = WMI_TLV_PEER_APSD,
4624	.ht = WMI_TLV_PEER_HT,
4625	.bw40 = WMI_TLV_PEER_40MHZ,
4626	.stbc = WMI_TLV_PEER_STBC,
4627	.ldbc = WMI_TLV_PEER_LDPC,
4628	.dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
4629	.static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
4630	.spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
4631	.vht = WMI_TLV_PEER_VHT,
4632	.bw80 = WMI_TLV_PEER_80MHZ,
4633	.pmf = WMI_TLV_PEER_PMF,
4634	.bw160 = WMI_TLV_PEER_160MHZ,
4635};
4636
4637/************/
4638/* TLV init */
4639/************/
4640
4641void ath10k_wmi_tlv_attach(struct ath10k *ar)
4642{
4643	ar->wmi.cmd = &wmi_tlv_cmd_map;
4644	ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
4645	ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
4646	ar->wmi.peer_param = &wmi_tlv_peer_param_map;
4647	ar->wmi.ops = &wmi_tlv_ops;
4648	ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
4649}