Loading...
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6 */
7#include "core.h"
8#include "debug.h"
9#include "mac.h"
10#include "hw.h"
11#include "wmi.h"
12#include "wmi-ops.h"
13#include "wmi-tlv.h"
14#include "p2p.h"
15#include "testmode.h"
16#include <linux/bitfield.h>
17
18/***************/
19/* TLV helpers */
20/**************/
21
22struct wmi_tlv_policy {
23 size_t min_len;
24};
25
26static const struct wmi_tlv_policy wmi_tlv_policies[] = {
27 [WMI_TLV_TAG_ARRAY_BYTE]
28 = { .min_len = 0 },
29 [WMI_TLV_TAG_ARRAY_UINT32]
30 = { .min_len = 0 },
31 [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
32 = { .min_len = sizeof(struct wmi_scan_event) },
33 [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
34 = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
35 [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
36 = { .min_len = sizeof(struct wmi_chan_info_event) },
37 [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
38 = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
39 [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
40 = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
41 [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
42 = { .min_len = sizeof(struct wmi_host_swba_event) },
43 [WMI_TLV_TAG_STRUCT_TIM_INFO]
44 = { .min_len = sizeof(struct wmi_tim_info) },
45 [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
46 = { .min_len = sizeof(struct wmi_p2p_noa_info) },
47 [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
48 = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
49 [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
50 = { .min_len = sizeof(struct hal_reg_capabilities) },
51 [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
52 = { .min_len = sizeof(struct wlan_host_mem_req) },
53 [WMI_TLV_TAG_STRUCT_READY_EVENT]
54 = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
55 [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
56 = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
57 [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
58 = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
59 [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
60 = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
61 [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
62 = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
63 [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
64 = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
65 [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
66 = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
67};
68
69static int
70ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
71 int (*iter)(struct ath10k *ar, u16 tag, u16 len,
72 const void *ptr, void *data),
73 void *data)
74{
75 const void *begin = ptr;
76 const struct wmi_tlv *tlv;
77 u16 tlv_tag, tlv_len;
78 int ret;
79
80 while (len > 0) {
81 if (len < sizeof(*tlv)) {
82 ath10k_dbg(ar, ATH10K_DBG_WMI,
83 "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
84 ptr - begin, len, sizeof(*tlv));
85 return -EINVAL;
86 }
87
88 tlv = ptr;
89 tlv_tag = __le16_to_cpu(tlv->tag);
90 tlv_len = __le16_to_cpu(tlv->len);
91 ptr += sizeof(*tlv);
92 len -= sizeof(*tlv);
93
94 if (tlv_len > len) {
95 ath10k_dbg(ar, ATH10K_DBG_WMI,
96 "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
97 tlv_tag, ptr - begin, len, tlv_len);
98 return -EINVAL;
99 }
100
101 if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
102 wmi_tlv_policies[tlv_tag].min_len &&
103 wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
104 ath10k_dbg(ar, ATH10K_DBG_WMI,
105 "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
106 tlv_tag, ptr - begin, tlv_len,
107 wmi_tlv_policies[tlv_tag].min_len);
108 return -EINVAL;
109 }
110
111 ret = iter(ar, tlv_tag, tlv_len, ptr, data);
112 if (ret)
113 return ret;
114
115 ptr += tlv_len;
116 len -= tlv_len;
117 }
118
119 return 0;
120}
121
122static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
123 const void *ptr, void *data)
124{
125 const void **tb = data;
126
127 if (tag < WMI_TLV_TAG_MAX)
128 tb[tag] = ptr;
129
130 return 0;
131}
132
133static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
134 const void *ptr, size_t len)
135{
136 return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
137 (void *)tb);
138}
139
140static const void **
141ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
142 size_t len, gfp_t gfp)
143{
144 const void **tb;
145 int ret;
146
147 tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
148 if (!tb)
149 return ERR_PTR(-ENOMEM);
150
151 ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
152 if (ret) {
153 kfree(tb);
154 return ERR_PTR(ret);
155 }
156
157 return tb;
158}
159
160static u16 ath10k_wmi_tlv_len(const void *ptr)
161{
162 return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
163}
164
165/**************/
166/* TLV events */
167/**************/
168static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
169 struct sk_buff *skb)
170{
171 const void **tb;
172 const struct wmi_tlv_bcn_tx_status_ev *ev;
173 struct ath10k_vif *arvif;
174 u32 vdev_id, tx_status;
175 int ret;
176
177 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
178 if (IS_ERR(tb)) {
179 ret = PTR_ERR(tb);
180 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
181 return ret;
182 }
183
184 ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
185 if (!ev) {
186 kfree(tb);
187 return -EPROTO;
188 }
189
190 tx_status = __le32_to_cpu(ev->tx_status);
191 vdev_id = __le32_to_cpu(ev->vdev_id);
192
193 switch (tx_status) {
194 case WMI_TLV_BCN_TX_STATUS_OK:
195 break;
196 case WMI_TLV_BCN_TX_STATUS_XRETRY:
197 case WMI_TLV_BCN_TX_STATUS_DROP:
198 case WMI_TLV_BCN_TX_STATUS_FILTERED:
199 /* FIXME: It's probably worth telling mac80211 to stop the
200 * interface as it is crippled.
201 */
202 ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
203 vdev_id, tx_status);
204 break;
205 }
206
207 arvif = ath10k_get_arvif(ar, vdev_id);
208 if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
209 ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
210
211 kfree(tb);
212 return 0;
213}
214
215static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
216 struct sk_buff *skb)
217{
218 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
219 complete(&ar->vdev_delete_done);
220}
221
222static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
223 const void *ptr, void *data)
224{
225 const struct wmi_tlv_peer_stats_info *stat = ptr;
226 struct ieee80211_sta *sta;
227 struct ath10k_sta *arsta;
228
229 if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
230 return -EPROTO;
231
232 ath10k_dbg(ar, ATH10K_DBG_WMI,
233 "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
234 stat->peer_macaddr.addr,
235 __le32_to_cpu(stat->last_rx_rate_code),
236 __le32_to_cpu(stat->last_rx_bitrate_kbps));
237
238 ath10k_dbg(ar, ATH10K_DBG_WMI,
239 "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
240 __le32_to_cpu(stat->last_tx_rate_code),
241 __le32_to_cpu(stat->last_tx_bitrate_kbps));
242
243 rcu_read_lock();
244 sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
245 if (!sta) {
246 rcu_read_unlock();
247 ath10k_warn(ar, "not found station for peer stats\n");
248 return -EINVAL;
249 }
250
251 arsta = (struct ath10k_sta *)sta->drv_priv;
252 arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
253 arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
254 arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
255 arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
256 rcu_read_unlock();
257
258 return 0;
259}
260
261static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
262 struct sk_buff *skb)
263{
264 const void **tb;
265 const struct wmi_tlv_peer_stats_info_ev *ev;
266 const void *data;
267 u32 num_peer_stats;
268 int ret;
269
270 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
271 if (IS_ERR(tb)) {
272 ret = PTR_ERR(tb);
273 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
274 return ret;
275 }
276
277 ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
278 data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
279
280 if (!ev || !data) {
281 kfree(tb);
282 return -EPROTO;
283 }
284
285 num_peer_stats = __le32_to_cpu(ev->num_peers);
286
287 ath10k_dbg(ar, ATH10K_DBG_WMI,
288 "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
289 __le32_to_cpu(ev->vdev_id),
290 num_peer_stats,
291 __le32_to_cpu(ev->more_data));
292
293 ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
294 ath10k_wmi_tlv_parse_peer_stats_info, NULL);
295 if (ret)
296 ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
297
298 kfree(tb);
299 return 0;
300}
301
302static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
303 struct sk_buff *skb)
304{
305 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
306 ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
307 complete(&ar->peer_stats_info_complete);
308}
309
310static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
311 struct sk_buff *skb)
312{
313 const void **tb;
314 const struct wmi_tlv_diag_data_ev *ev;
315 const struct wmi_tlv_diag_item *item;
316 const void *data;
317 int ret, num_items, len;
318
319 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
320 if (IS_ERR(tb)) {
321 ret = PTR_ERR(tb);
322 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
323 return ret;
324 }
325
326 ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
327 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
328 if (!ev || !data) {
329 kfree(tb);
330 return -EPROTO;
331 }
332
333 num_items = __le32_to_cpu(ev->num_items);
334 len = ath10k_wmi_tlv_len(data);
335
336 while (num_items--) {
337 if (len == 0)
338 break;
339 if (len < sizeof(*item)) {
340 ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
341 break;
342 }
343
344 item = data;
345
346 if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
347 ath10k_warn(ar, "failed to parse diag data: item is too long\n");
348 break;
349 }
350
351 trace_ath10k_wmi_diag_container(ar,
352 item->type,
353 __le32_to_cpu(item->timestamp),
354 __le32_to_cpu(item->code),
355 __le16_to_cpu(item->len),
356 item->payload);
357
358 len -= sizeof(*item);
359 len -= roundup(__le16_to_cpu(item->len), 4);
360
361 data += sizeof(*item);
362 data += roundup(__le16_to_cpu(item->len), 4);
363 }
364
365 if (num_items != -1 || len != 0)
366 ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
367 num_items, len);
368
369 kfree(tb);
370 return 0;
371}
372
373static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
374 struct sk_buff *skb)
375{
376 const void **tb;
377 const void *data;
378 int ret, len;
379
380 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
381 if (IS_ERR(tb)) {
382 ret = PTR_ERR(tb);
383 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
384 return ret;
385 }
386
387 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
388 if (!data) {
389 kfree(tb);
390 return -EPROTO;
391 }
392 len = ath10k_wmi_tlv_len(data);
393
394 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
395 trace_ath10k_wmi_diag(ar, data, len);
396
397 kfree(tb);
398 return 0;
399}
400
401static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
402 struct sk_buff *skb)
403{
404 const void **tb;
405 const struct wmi_tlv_p2p_noa_ev *ev;
406 const struct wmi_p2p_noa_info *noa;
407 int ret, vdev_id;
408
409 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
410 if (IS_ERR(tb)) {
411 ret = PTR_ERR(tb);
412 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
413 return ret;
414 }
415
416 ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
417 noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
418
419 if (!ev || !noa) {
420 kfree(tb);
421 return -EPROTO;
422 }
423
424 vdev_id = __le32_to_cpu(ev->vdev_id);
425
426 ath10k_dbg(ar, ATH10K_DBG_WMI,
427 "wmi tlv p2p noa vdev_id %i descriptors %u\n",
428 vdev_id, noa->num_descriptors);
429
430 ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
431 kfree(tb);
432 return 0;
433}
434
435static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
436 struct sk_buff *skb)
437{
438 const void **tb;
439 const struct wmi_tlv_tx_pause_ev *ev;
440 int ret, vdev_id;
441 u32 pause_id, action, vdev_map, peer_id, tid_map;
442
443 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
444 if (IS_ERR(tb)) {
445 ret = PTR_ERR(tb);
446 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
447 return ret;
448 }
449
450 ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
451 if (!ev) {
452 kfree(tb);
453 return -EPROTO;
454 }
455
456 pause_id = __le32_to_cpu(ev->pause_id);
457 action = __le32_to_cpu(ev->action);
458 vdev_map = __le32_to_cpu(ev->vdev_map);
459 peer_id = __le32_to_cpu(ev->peer_id);
460 tid_map = __le32_to_cpu(ev->tid_map);
461
462 ath10k_dbg(ar, ATH10K_DBG_WMI,
463 "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
464 pause_id, action, vdev_map, peer_id, tid_map);
465
466 switch (pause_id) {
467 case WMI_TLV_TX_PAUSE_ID_MCC:
468 case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
469 case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
470 case WMI_TLV_TX_PAUSE_ID_AP_PS:
471 case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
472 for (vdev_id = 0; vdev_map; vdev_id++) {
473 if (!(vdev_map & BIT(vdev_id)))
474 continue;
475
476 vdev_map &= ~BIT(vdev_id);
477 ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
478 action);
479 }
480 break;
481 case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
482 case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
483 case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
484 case WMI_TLV_TX_PAUSE_ID_HOST:
485 ath10k_dbg(ar, ATH10K_DBG_MAC,
486 "mac ignoring unsupported tx pause id %d\n",
487 pause_id);
488 break;
489 default:
490 ath10k_dbg(ar, ATH10K_DBG_MAC,
491 "mac ignoring unknown tx pause vdev %d\n",
492 pause_id);
493 break;
494 }
495
496 kfree(tb);
497 return 0;
498}
499
500static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
501 struct sk_buff *skb)
502{
503 const struct wmi_tlv_rfkill_state_change_ev *ev;
504 const void **tb;
505 bool radio;
506 int ret;
507
508 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
509 if (IS_ERR(tb)) {
510 ret = PTR_ERR(tb);
511 ath10k_warn(ar,
512 "failed to parse rfkill state change event: %d\n",
513 ret);
514 return;
515 }
516
517 ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
518 if (!ev) {
519 kfree(tb);
520 return;
521 }
522
523 ath10k_dbg(ar, ATH10K_DBG_MAC,
524 "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
525 __le32_to_cpu(ev->gpio_pin_num),
526 __le32_to_cpu(ev->int_type),
527 __le32_to_cpu(ev->radio_state));
528
529 radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
530
531 spin_lock_bh(&ar->data_lock);
532
533 if (!radio)
534 ar->hw_rfkill_on = true;
535
536 spin_unlock_bh(&ar->data_lock);
537
538 /* notify cfg80211 radio state change */
539 ath10k_mac_rfkill_enable_radio(ar, radio);
540 wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
541}
542
543static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
544 struct sk_buff *skb)
545{
546 const struct wmi_tlv_pdev_temperature_event *ev;
547
548 ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
549 if (WARN_ON(skb->len < sizeof(*ev)))
550 return -EPROTO;
551
552 ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
553 return 0;
554}
555
556static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
557{
558 struct ieee80211_sta *station;
559 const struct wmi_tlv_tdls_peer_event *ev;
560 const void **tb;
561 struct ath10k_vif *arvif;
562
563 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
564 if (IS_ERR(tb)) {
565 ath10k_warn(ar, "tdls peer failed to parse tlv");
566 return;
567 }
568 ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
569 if (!ev) {
570 kfree(tb);
571 ath10k_warn(ar, "tdls peer NULL event");
572 return;
573 }
574
575 switch (__le32_to_cpu(ev->peer_reason)) {
576 case WMI_TDLS_TEARDOWN_REASON_TX:
577 case WMI_TDLS_TEARDOWN_REASON_RSSI:
578 case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
579 rcu_read_lock();
580 station = ieee80211_find_sta_by_ifaddr(ar->hw,
581 ev->peer_macaddr.addr,
582 NULL);
583 if (!station) {
584 ath10k_warn(ar, "did not find station from tdls peer event");
585 goto exit;
586 }
587
588 arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
589 if (!arvif) {
590 ath10k_warn(ar, "no vif for vdev_id %d found",
591 __le32_to_cpu(ev->vdev_id));
592 goto exit;
593 }
594
595 ieee80211_tdls_oper_request(
596 arvif->vif, station->addr,
597 NL80211_TDLS_TEARDOWN,
598 WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
599 GFP_ATOMIC
600 );
601 break;
602 default:
603 kfree(tb);
604 return;
605 }
606
607exit:
608 rcu_read_unlock();
609 kfree(tb);
610}
611
612static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
613 struct sk_buff *skb)
614{
615 struct wmi_peer_delete_resp_ev_arg *arg;
616 struct wmi_tlv *tlv_hdr;
617
618 tlv_hdr = (struct wmi_tlv *)skb->data;
619 arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value;
620
621 ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id);
622 ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr);
623 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n");
624
625 complete(&ar->peer_delete_done);
626
627 return 0;
628}
629
630/***********/
631/* TLV ops */
632/***********/
633
634static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
635{
636 struct wmi_cmd_hdr *cmd_hdr;
637 enum wmi_tlv_event_id id;
638 bool consumed;
639
640 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
641 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
642
643 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
644 goto out;
645
646 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
647
648 consumed = ath10k_tm_event_wmi(ar, id, skb);
649
650 /* Ready event must be handled normally also in UTF mode so that we
651 * know the UTF firmware has booted, others we are just bypass WMI
652 * events to testmode.
653 */
654 if (consumed && id != WMI_TLV_READY_EVENTID) {
655 ath10k_dbg(ar, ATH10K_DBG_WMI,
656 "wmi tlv testmode consumed 0x%x\n", id);
657 goto out;
658 }
659
660 switch (id) {
661 case WMI_TLV_MGMT_RX_EVENTID:
662 ath10k_wmi_event_mgmt_rx(ar, skb);
663 /* mgmt_rx() owns the skb now! */
664 return;
665 case WMI_TLV_SCAN_EVENTID:
666 ath10k_wmi_event_scan(ar, skb);
667 break;
668 case WMI_TLV_CHAN_INFO_EVENTID:
669 ath10k_wmi_event_chan_info(ar, skb);
670 break;
671 case WMI_TLV_ECHO_EVENTID:
672 ath10k_wmi_event_echo(ar, skb);
673 break;
674 case WMI_TLV_DEBUG_MESG_EVENTID:
675 ath10k_wmi_event_debug_mesg(ar, skb);
676 break;
677 case WMI_TLV_UPDATE_STATS_EVENTID:
678 ath10k_wmi_event_update_stats(ar, skb);
679 break;
680 case WMI_TLV_PEER_STATS_INFO_EVENTID:
681 ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
682 break;
683 case WMI_TLV_VDEV_START_RESP_EVENTID:
684 ath10k_wmi_event_vdev_start_resp(ar, skb);
685 break;
686 case WMI_TLV_VDEV_STOPPED_EVENTID:
687 ath10k_wmi_event_vdev_stopped(ar, skb);
688 break;
689 case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
690 ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
691 break;
692 case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
693 ath10k_wmi_event_peer_sta_kickout(ar, skb);
694 break;
695 case WMI_TLV_HOST_SWBA_EVENTID:
696 ath10k_wmi_event_host_swba(ar, skb);
697 break;
698 case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
699 ath10k_wmi_event_tbttoffset_update(ar, skb);
700 break;
701 case WMI_TLV_PHYERR_EVENTID:
702 ath10k_wmi_event_phyerr(ar, skb);
703 break;
704 case WMI_TLV_ROAM_EVENTID:
705 ath10k_wmi_event_roam(ar, skb);
706 break;
707 case WMI_TLV_PROFILE_MATCH:
708 ath10k_wmi_event_profile_match(ar, skb);
709 break;
710 case WMI_TLV_DEBUG_PRINT_EVENTID:
711 ath10k_wmi_event_debug_print(ar, skb);
712 break;
713 case WMI_TLV_PDEV_QVIT_EVENTID:
714 ath10k_wmi_event_pdev_qvit(ar, skb);
715 break;
716 case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
717 ath10k_wmi_event_wlan_profile_data(ar, skb);
718 break;
719 case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
720 ath10k_wmi_event_rtt_measurement_report(ar, skb);
721 break;
722 case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
723 ath10k_wmi_event_tsf_measurement_report(ar, skb);
724 break;
725 case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
726 ath10k_wmi_event_rtt_error_report(ar, skb);
727 break;
728 case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
729 ath10k_wmi_event_wow_wakeup_host(ar, skb);
730 break;
731 case WMI_TLV_DCS_INTERFERENCE_EVENTID:
732 ath10k_wmi_event_dcs_interference(ar, skb);
733 break;
734 case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
735 ath10k_wmi_event_pdev_tpc_config(ar, skb);
736 break;
737 case WMI_TLV_PDEV_FTM_INTG_EVENTID:
738 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
739 break;
740 case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
741 ath10k_wmi_event_gtk_offload_status(ar, skb);
742 break;
743 case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
744 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
745 break;
746 case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
747 ath10k_wmi_event_delba_complete(ar, skb);
748 break;
749 case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
750 ath10k_wmi_event_addba_complete(ar, skb);
751 break;
752 case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
753 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
754 break;
755 case WMI_TLV_SERVICE_READY_EVENTID:
756 ath10k_wmi_event_service_ready(ar, skb);
757 return;
758 case WMI_TLV_READY_EVENTID:
759 ath10k_wmi_event_ready(ar, skb);
760 break;
761 case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
762 ath10k_wmi_event_service_available(ar, skb);
763 break;
764 case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
765 ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
766 break;
767 case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
768 ath10k_wmi_tlv_event_diag_data(ar, skb);
769 break;
770 case WMI_TLV_DIAG_EVENTID:
771 ath10k_wmi_tlv_event_diag(ar, skb);
772 break;
773 case WMI_TLV_P2P_NOA_EVENTID:
774 ath10k_wmi_tlv_event_p2p_noa(ar, skb);
775 break;
776 case WMI_TLV_TX_PAUSE_EVENTID:
777 ath10k_wmi_tlv_event_tx_pause(ar, skb);
778 break;
779 case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
780 ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
781 break;
782 case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
783 ath10k_wmi_tlv_event_temperature(ar, skb);
784 break;
785 case WMI_TLV_TDLS_PEER_EVENTID:
786 ath10k_wmi_event_tdls_peer(ar, skb);
787 break;
788 case WMI_TLV_PEER_DELETE_RESP_EVENTID:
789 ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
790 break;
791 case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
792 ath10k_wmi_event_mgmt_tx_compl(ar, skb);
793 break;
794 case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID:
795 ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
796 break;
797 default:
798 ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
799 break;
800 }
801
802out:
803 dev_kfree_skb(skb);
804}
805
806static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
807 struct sk_buff *skb,
808 struct wmi_scan_ev_arg *arg)
809{
810 const void **tb;
811 const struct wmi_scan_event *ev;
812 int ret;
813
814 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
815 if (IS_ERR(tb)) {
816 ret = PTR_ERR(tb);
817 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
818 return ret;
819 }
820
821 ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
822 if (!ev) {
823 kfree(tb);
824 return -EPROTO;
825 }
826
827 arg->event_type = ev->event_type;
828 arg->reason = ev->reason;
829 arg->channel_freq = ev->channel_freq;
830 arg->scan_req_id = ev->scan_req_id;
831 arg->scan_id = ev->scan_id;
832 arg->vdev_id = ev->vdev_id;
833
834 kfree(tb);
835 return 0;
836}
837
838static int
839ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
840 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
841{
842 const void **tb;
843 const struct wmi_tlv_mgmt_tx_compl_ev *ev;
844 int ret;
845
846 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
847 if (IS_ERR(tb)) {
848 ret = PTR_ERR(tb);
849 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
850 return ret;
851 }
852
853 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
854
855 arg->desc_id = ev->desc_id;
856 arg->status = ev->status;
857 arg->pdev_id = ev->pdev_id;
858 arg->ppdu_id = ev->ppdu_id;
859
860 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
861 arg->ack_rssi = ev->ack_rssi;
862
863 kfree(tb);
864 return 0;
865}
866
867struct wmi_tlv_tx_bundle_compl_parse {
868 const __le32 *num_reports;
869 const __le32 *desc_ids;
870 const __le32 *status;
871 const __le32 *ppdu_ids;
872 const __le32 *ack_rssi;
873 bool desc_ids_done;
874 bool status_done;
875 bool ppdu_ids_done;
876 bool ack_rssi_done;
877};
878
879static int
880ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
881 const void *ptr, void *data)
882{
883 struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data;
884
885 switch (tag) {
886 case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT:
887 bundle_tx_compl->num_reports = ptr;
888 break;
889 case WMI_TLV_TAG_ARRAY_UINT32:
890 if (!bundle_tx_compl->desc_ids_done) {
891 bundle_tx_compl->desc_ids_done = true;
892 bundle_tx_compl->desc_ids = ptr;
893 } else if (!bundle_tx_compl->status_done) {
894 bundle_tx_compl->status_done = true;
895 bundle_tx_compl->status = ptr;
896 } else if (!bundle_tx_compl->ppdu_ids_done) {
897 bundle_tx_compl->ppdu_ids_done = true;
898 bundle_tx_compl->ppdu_ids = ptr;
899 } else if (!bundle_tx_compl->ack_rssi_done) {
900 bundle_tx_compl->ack_rssi_done = true;
901 bundle_tx_compl->ack_rssi = ptr;
902 }
903 break;
904 default:
905 break;
906 }
907 return 0;
908}
909
910static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
911 struct ath10k *ar, struct sk_buff *skb,
912 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
913{
914 struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { };
915 int ret;
916
917 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
918 ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse,
919 &bundle_tx_compl);
920 if (ret) {
921 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
922 return ret;
923 }
924
925 if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids ||
926 !bundle_tx_compl.status)
927 return -EPROTO;
928
929 arg->num_reports = *bundle_tx_compl.num_reports;
930 arg->desc_ids = bundle_tx_compl.desc_ids;
931 arg->status = bundle_tx_compl.status;
932 arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
933
934 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
935 arg->ack_rssi = bundle_tx_compl.ack_rssi;
936
937 return 0;
938}
939
940static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
941 struct sk_buff *skb,
942 struct wmi_mgmt_rx_ev_arg *arg)
943{
944 const void **tb;
945 const struct wmi_tlv_mgmt_rx_ev *ev;
946 const u8 *frame;
947 u32 msdu_len;
948 int ret, i;
949
950 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
951 if (IS_ERR(tb)) {
952 ret = PTR_ERR(tb);
953 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
954 return ret;
955 }
956
957 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
958 frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
959
960 if (!ev || !frame) {
961 kfree(tb);
962 return -EPROTO;
963 }
964
965 arg->channel = ev->channel;
966 arg->buf_len = ev->buf_len;
967 arg->status = ev->status;
968 arg->snr = ev->snr;
969 arg->phy_mode = ev->phy_mode;
970 arg->rate = ev->rate;
971
972 for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
973 arg->rssi[i] = ev->rssi[i];
974
975 msdu_len = __le32_to_cpu(arg->buf_len);
976
977 if (skb->len < (frame - skb->data) + msdu_len) {
978 kfree(tb);
979 return -EPROTO;
980 }
981
982 /* shift the sk_buff to point to `frame` */
983 skb_trim(skb, 0);
984 skb_put(skb, frame - skb->data);
985 skb_pull(skb, frame - skb->data);
986 skb_put(skb, msdu_len);
987
988 kfree(tb);
989 return 0;
990}
991
992static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
993 struct sk_buff *skb,
994 struct wmi_ch_info_ev_arg *arg)
995{
996 const void **tb;
997 const struct wmi_tlv_chan_info_event *ev;
998 int ret;
999
1000 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1001 if (IS_ERR(tb)) {
1002 ret = PTR_ERR(tb);
1003 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1004 return ret;
1005 }
1006
1007 ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
1008 if (!ev) {
1009 kfree(tb);
1010 return -EPROTO;
1011 }
1012
1013 arg->err_code = ev->err_code;
1014 arg->freq = ev->freq;
1015 arg->cmd_flags = ev->cmd_flags;
1016 arg->noise_floor = ev->noise_floor;
1017 arg->rx_clear_count = ev->rx_clear_count;
1018 arg->cycle_count = ev->cycle_count;
1019 if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
1020 ar->running_fw->fw_file.fw_features))
1021 arg->mac_clk_mhz = ev->mac_clk_mhz;
1022
1023 kfree(tb);
1024 return 0;
1025}
1026
1027static int
1028ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
1029 struct wmi_vdev_start_ev_arg *arg)
1030{
1031 const void **tb;
1032 const struct wmi_vdev_start_response_event *ev;
1033 int ret;
1034
1035 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1036 if (IS_ERR(tb)) {
1037 ret = PTR_ERR(tb);
1038 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1039 return ret;
1040 }
1041
1042 ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
1043 if (!ev) {
1044 kfree(tb);
1045 return -EPROTO;
1046 }
1047
1048 skb_pull(skb, sizeof(*ev));
1049 arg->vdev_id = ev->vdev_id;
1050 arg->req_id = ev->req_id;
1051 arg->resp_type = ev->resp_type;
1052 arg->status = ev->status;
1053
1054 kfree(tb);
1055 return 0;
1056}
1057
1058static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
1059 struct sk_buff *skb,
1060 struct wmi_peer_kick_ev_arg *arg)
1061{
1062 const void **tb;
1063 const struct wmi_peer_sta_kickout_event *ev;
1064 int ret;
1065
1066 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1067 if (IS_ERR(tb)) {
1068 ret = PTR_ERR(tb);
1069 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1070 return ret;
1071 }
1072
1073 ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
1074 if (!ev) {
1075 kfree(tb);
1076 return -EPROTO;
1077 }
1078
1079 arg->mac_addr = ev->peer_macaddr.addr;
1080
1081 kfree(tb);
1082 return 0;
1083}
1084
1085struct wmi_tlv_swba_parse {
1086 const struct wmi_host_swba_event *ev;
1087 bool tim_done;
1088 bool noa_done;
1089 size_t n_tim;
1090 size_t n_noa;
1091 struct wmi_swba_ev_arg *arg;
1092};
1093
1094static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
1095 const void *ptr, void *data)
1096{
1097 struct wmi_tlv_swba_parse *swba = data;
1098 struct wmi_tim_info_arg *tim_info_arg;
1099 const struct wmi_tim_info *tim_info_ev = ptr;
1100
1101 if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
1102 return -EPROTO;
1103
1104 if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
1105 return -ENOBUFS;
1106
1107 if (__le32_to_cpu(tim_info_ev->tim_len) >
1108 sizeof(tim_info_ev->tim_bitmap)) {
1109 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
1110 return -EPROTO;
1111 }
1112
1113 tim_info_arg = &swba->arg->tim_info[swba->n_tim];
1114 tim_info_arg->tim_len = tim_info_ev->tim_len;
1115 tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
1116 tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
1117 tim_info_arg->tim_changed = tim_info_ev->tim_changed;
1118 tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
1119
1120 swba->n_tim++;
1121
1122 return 0;
1123}
1124
1125static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
1126 const void *ptr, void *data)
1127{
1128 struct wmi_tlv_swba_parse *swba = data;
1129
1130 if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
1131 return -EPROTO;
1132
1133 if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
1134 return -ENOBUFS;
1135
1136 swba->arg->noa_info[swba->n_noa++] = ptr;
1137 return 0;
1138}
1139
1140static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
1141 const void *ptr, void *data)
1142{
1143 struct wmi_tlv_swba_parse *swba = data;
1144 int ret;
1145
1146 switch (tag) {
1147 case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
1148 swba->ev = ptr;
1149 break;
1150 case WMI_TLV_TAG_ARRAY_STRUCT:
1151 if (!swba->tim_done) {
1152 swba->tim_done = true;
1153 ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1154 ath10k_wmi_tlv_swba_tim_parse,
1155 swba);
1156 if (ret)
1157 return ret;
1158 } else if (!swba->noa_done) {
1159 swba->noa_done = true;
1160 ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1161 ath10k_wmi_tlv_swba_noa_parse,
1162 swba);
1163 if (ret)
1164 return ret;
1165 }
1166 break;
1167 default:
1168 break;
1169 }
1170 return 0;
1171}
1172
1173static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
1174 struct sk_buff *skb,
1175 struct wmi_swba_ev_arg *arg)
1176{
1177 struct wmi_tlv_swba_parse swba = { .arg = arg };
1178 u32 map;
1179 size_t n_vdevs;
1180 int ret;
1181
1182 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1183 ath10k_wmi_tlv_swba_parse, &swba);
1184 if (ret) {
1185 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1186 return ret;
1187 }
1188
1189 if (!swba.ev)
1190 return -EPROTO;
1191
1192 arg->vdev_map = swba.ev->vdev_map;
1193
1194 for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
1195 if (map & BIT(0))
1196 n_vdevs++;
1197
1198 if (n_vdevs != swba.n_tim ||
1199 n_vdevs != swba.n_noa)
1200 return -EPROTO;
1201
1202 return 0;
1203}
1204
1205static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
1206 struct sk_buff *skb,
1207 struct wmi_phyerr_hdr_arg *arg)
1208{
1209 const void **tb;
1210 const struct wmi_tlv_phyerr_ev *ev;
1211 const void *phyerrs;
1212 int ret;
1213
1214 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1215 if (IS_ERR(tb)) {
1216 ret = PTR_ERR(tb);
1217 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1218 return ret;
1219 }
1220
1221 ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
1222 phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
1223
1224 if (!ev || !phyerrs) {
1225 kfree(tb);
1226 return -EPROTO;
1227 }
1228
1229 arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
1230 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
1231 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
1232 arg->buf_len = __le32_to_cpu(ev->buf_len);
1233 arg->phyerrs = phyerrs;
1234
1235 kfree(tb);
1236 return 0;
1237}
1238
1239#define WMI_TLV_ABI_VER_NS0 0x5F414351
1240#define WMI_TLV_ABI_VER_NS1 0x00004C4D
1241#define WMI_TLV_ABI_VER_NS2 0x00000000
1242#define WMI_TLV_ABI_VER_NS3 0x00000000
1243
1244#define WMI_TLV_ABI_VER0_MAJOR 1
1245#define WMI_TLV_ABI_VER0_MINOR 0
1246#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
1247 (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
1248#define WMI_TLV_ABI_VER1 53
1249
1250static int
1251ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
1252 const void *ptr, void *data)
1253{
1254 struct wmi_svc_rdy_ev_arg *arg = data;
1255 int i;
1256
1257 if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
1258 return -EPROTO;
1259
1260 for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
1261 if (!arg->mem_reqs[i]) {
1262 arg->mem_reqs[i] = ptr;
1263 return 0;
1264 }
1265 }
1266
1267 return -ENOMEM;
1268}
1269
1270struct wmi_tlv_svc_rdy_parse {
1271 const struct hal_reg_capabilities *reg;
1272 const struct wmi_tlv_svc_rdy_ev *ev;
1273 const __le32 *svc_bmap;
1274 const struct wlan_host_mem_req *mem_reqs;
1275 bool svc_bmap_done;
1276 bool dbs_hw_mode_done;
1277};
1278
1279static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
1280 const void *ptr, void *data)
1281{
1282 struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
1283
1284 switch (tag) {
1285 case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
1286 svc_rdy->ev = ptr;
1287 break;
1288 case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
1289 svc_rdy->reg = ptr;
1290 break;
1291 case WMI_TLV_TAG_ARRAY_STRUCT:
1292 svc_rdy->mem_reqs = ptr;
1293 break;
1294 case WMI_TLV_TAG_ARRAY_UINT32:
1295 if (!svc_rdy->svc_bmap_done) {
1296 svc_rdy->svc_bmap_done = true;
1297 svc_rdy->svc_bmap = ptr;
1298 } else if (!svc_rdy->dbs_hw_mode_done) {
1299 svc_rdy->dbs_hw_mode_done = true;
1300 }
1301 break;
1302 default:
1303 break;
1304 }
1305 return 0;
1306}
1307
1308static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
1309 struct sk_buff *skb,
1310 struct wmi_svc_rdy_ev_arg *arg)
1311{
1312 const struct hal_reg_capabilities *reg;
1313 const struct wmi_tlv_svc_rdy_ev *ev;
1314 const __le32 *svc_bmap;
1315 const struct wlan_host_mem_req *mem_reqs;
1316 struct wmi_tlv_svc_rdy_parse svc_rdy = { };
1317 int ret;
1318
1319 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1320 ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
1321 if (ret) {
1322 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1323 return ret;
1324 }
1325
1326 ev = svc_rdy.ev;
1327 reg = svc_rdy.reg;
1328 svc_bmap = svc_rdy.svc_bmap;
1329 mem_reqs = svc_rdy.mem_reqs;
1330
1331 if (!ev || !reg || !svc_bmap || !mem_reqs)
1332 return -EPROTO;
1333
1334 /* This is an internal ABI compatibility check for WMI TLV so check it
1335 * here instead of the generic WMI code.
1336 */
1337 ath10k_dbg(ar, ATH10K_DBG_WMI,
1338 "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
1339 __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
1340 __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
1341 __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
1342 __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
1343 __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
1344
1345 if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
1346 __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
1347 __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
1348 __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
1349 __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
1350 return -ENOTSUPP;
1351 }
1352
1353 arg->min_tx_power = ev->hw_min_tx_power;
1354 arg->max_tx_power = ev->hw_max_tx_power;
1355 arg->ht_cap = ev->ht_cap_info;
1356 arg->vht_cap = ev->vht_cap_info;
1357 arg->vht_supp_mcs = ev->vht_supp_mcs;
1358 arg->sw_ver0 = ev->abi.abi_ver0;
1359 arg->sw_ver1 = ev->abi.abi_ver1;
1360 arg->fw_build = ev->fw_build_vers;
1361 arg->phy_capab = ev->phy_capability;
1362 arg->num_rf_chains = ev->num_rf_chains;
1363 arg->eeprom_rd = reg->eeprom_rd;
1364 arg->low_2ghz_chan = reg->low_2ghz_chan;
1365 arg->high_2ghz_chan = reg->high_2ghz_chan;
1366 arg->low_5ghz_chan = reg->low_5ghz_chan;
1367 arg->high_5ghz_chan = reg->high_5ghz_chan;
1368 arg->num_mem_reqs = ev->num_mem_reqs;
1369 arg->service_map = svc_bmap;
1370 arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
1371 arg->sys_cap_info = ev->sys_cap_info;
1372
1373 ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
1374 ath10k_wmi_tlv_parse_mem_reqs, arg);
1375 if (ret) {
1376 ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
1377 return ret;
1378 }
1379
1380 return 0;
1381}
1382
1383static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
1384 struct sk_buff *skb,
1385 struct wmi_rdy_ev_arg *arg)
1386{
1387 const void **tb;
1388 const struct wmi_tlv_rdy_ev *ev;
1389 int ret;
1390
1391 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1392 if (IS_ERR(tb)) {
1393 ret = PTR_ERR(tb);
1394 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1395 return ret;
1396 }
1397
1398 ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
1399 if (!ev) {
1400 kfree(tb);
1401 return -EPROTO;
1402 }
1403
1404 arg->sw_version = ev->abi.abi_ver0;
1405 arg->abi_version = ev->abi.abi_ver1;
1406 arg->status = ev->status;
1407 arg->mac_addr = ev->mac_addr.addr;
1408
1409 kfree(tb);
1410 return 0;
1411}
1412
1413static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
1414 const void *ptr, void *data)
1415{
1416 struct wmi_svc_avail_ev_arg *arg = data;
1417
1418 switch (tag) {
1419 case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
1420 arg->service_map_ext_valid = true;
1421 arg->service_map_ext_len = *(__le32 *)ptr;
1422 arg->service_map_ext = ptr + sizeof(__le32);
1423 return 0;
1424 default:
1425 break;
1426 }
1427
1428 return 0;
1429}
1430
1431static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
1432 struct sk_buff *skb,
1433 struct wmi_svc_avail_ev_arg *arg)
1434{
1435 int ret;
1436
1437 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1438 ath10k_wmi_tlv_svc_avail_parse, arg);
1439
1440 if (ret) {
1441 ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
1442 return ret;
1443 }
1444
1445 return 0;
1446}
1447
1448static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
1449 struct ath10k_fw_stats_vdev *dst)
1450{
1451 int i;
1452
1453 dst->vdev_id = __le32_to_cpu(src->vdev_id);
1454 dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
1455 dst->data_snr = __le32_to_cpu(src->data_snr);
1456 dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
1457 dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
1458 dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
1459 dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
1460 dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
1461 dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
1462
1463 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
1464 dst->num_tx_frames[i] =
1465 __le32_to_cpu(src->num_tx_frames[i]);
1466
1467 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
1468 dst->num_tx_frames_retries[i] =
1469 __le32_to_cpu(src->num_tx_frames_retries[i]);
1470
1471 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
1472 dst->num_tx_frames_failures[i] =
1473 __le32_to_cpu(src->num_tx_frames_failures[i]);
1474
1475 for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
1476 dst->tx_rate_history[i] =
1477 __le32_to_cpu(src->tx_rate_history[i]);
1478
1479 for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
1480 dst->beacon_rssi_history[i] =
1481 __le32_to_cpu(src->beacon_rssi_history[i]);
1482}
1483
1484static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
1485 struct sk_buff *skb,
1486 struct ath10k_fw_stats *stats)
1487{
1488 const void **tb;
1489 const struct wmi_tlv_stats_ev *ev;
1490 u32 num_peer_stats_extd;
1491 const void *data;
1492 u32 num_pdev_stats;
1493 u32 num_vdev_stats;
1494 u32 num_peer_stats;
1495 u32 num_bcnflt_stats;
1496 u32 num_chan_stats;
1497 size_t data_len;
1498 u32 stats_id;
1499 int ret;
1500 int i;
1501
1502 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1503 if (IS_ERR(tb)) {
1504 ret = PTR_ERR(tb);
1505 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1506 return ret;
1507 }
1508
1509 ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
1510 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
1511
1512 if (!ev || !data) {
1513 kfree(tb);
1514 return -EPROTO;
1515 }
1516
1517 data_len = ath10k_wmi_tlv_len(data);
1518 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1519 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1520 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1521 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
1522 num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
1523 stats_id = __le32_to_cpu(ev->stats_id);
1524 num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
1525
1526 ath10k_dbg(ar, ATH10K_DBG_WMI,
1527 "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
1528 num_pdev_stats, num_vdev_stats, num_peer_stats,
1529 num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
1530
1531 for (i = 0; i < num_pdev_stats; i++) {
1532 const struct wmi_pdev_stats *src;
1533 struct ath10k_fw_stats_pdev *dst;
1534
1535 src = data;
1536 if (data_len < sizeof(*src)) {
1537 kfree(tb);
1538 return -EPROTO;
1539 }
1540
1541 data += sizeof(*src);
1542 data_len -= sizeof(*src);
1543
1544 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1545 if (!dst)
1546 continue;
1547
1548 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1549 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1550 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1551 list_add_tail(&dst->list, &stats->pdevs);
1552 }
1553
1554 for (i = 0; i < num_vdev_stats; i++) {
1555 const struct wmi_tlv_vdev_stats *src;
1556 struct ath10k_fw_stats_vdev *dst;
1557
1558 src = data;
1559 if (data_len < sizeof(*src)) {
1560 kfree(tb);
1561 return -EPROTO;
1562 }
1563
1564 data += sizeof(*src);
1565 data_len -= sizeof(*src);
1566
1567 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1568 if (!dst)
1569 continue;
1570
1571 ath10k_wmi_tlv_pull_vdev_stats(src, dst);
1572 list_add_tail(&dst->list, &stats->vdevs);
1573 }
1574
1575 for (i = 0; i < num_peer_stats; i++) {
1576 const struct wmi_10x_peer_stats *src;
1577 struct ath10k_fw_stats_peer *dst;
1578
1579 src = data;
1580 if (data_len < sizeof(*src)) {
1581 kfree(tb);
1582 return -EPROTO;
1583 }
1584
1585 data += sizeof(*src);
1586 data_len -= sizeof(*src);
1587
1588 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1589 if (!dst)
1590 continue;
1591
1592 ath10k_wmi_pull_peer_stats(&src->old, dst);
1593 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1594
1595 if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
1596 const struct wmi_tlv_peer_stats_extd *extd;
1597 unsigned long rx_duration_high;
1598
1599 extd = data + sizeof(*src) * (num_peer_stats - i - 1)
1600 + sizeof(*extd) * i;
1601
1602 dst->rx_duration = __le32_to_cpu(extd->rx_duration);
1603 rx_duration_high = __le32_to_cpu
1604 (extd->rx_duration_high);
1605
1606 if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
1607 &rx_duration_high)) {
1608 rx_duration_high =
1609 FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
1610 rx_duration_high);
1611 dst->rx_duration |= (u64)rx_duration_high <<
1612 WMI_TLV_PEER_RX_DURATION_SHIFT;
1613 }
1614 }
1615
1616 list_add_tail(&dst->list, &stats->peers);
1617 }
1618
1619 kfree(tb);
1620 return 0;
1621}
1622
1623static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
1624 struct sk_buff *skb,
1625 struct wmi_roam_ev_arg *arg)
1626{
1627 const void **tb;
1628 const struct wmi_tlv_roam_ev *ev;
1629 int ret;
1630
1631 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1632 if (IS_ERR(tb)) {
1633 ret = PTR_ERR(tb);
1634 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1635 return ret;
1636 }
1637
1638 ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
1639 if (!ev) {
1640 kfree(tb);
1641 return -EPROTO;
1642 }
1643
1644 arg->vdev_id = ev->vdev_id;
1645 arg->reason = ev->reason;
1646 arg->rssi = ev->rssi;
1647
1648 kfree(tb);
1649 return 0;
1650}
1651
1652static int
1653ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
1654 struct wmi_wow_ev_arg *arg)
1655{
1656 const void **tb;
1657 const struct wmi_tlv_wow_event_info *ev;
1658 int ret;
1659
1660 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1661 if (IS_ERR(tb)) {
1662 ret = PTR_ERR(tb);
1663 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1664 return ret;
1665 }
1666
1667 ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
1668 if (!ev) {
1669 kfree(tb);
1670 return -EPROTO;
1671 }
1672
1673 arg->vdev_id = __le32_to_cpu(ev->vdev_id);
1674 arg->flag = __le32_to_cpu(ev->flag);
1675 arg->wake_reason = __le32_to_cpu(ev->wake_reason);
1676 arg->data_len = __le32_to_cpu(ev->data_len);
1677
1678 kfree(tb);
1679 return 0;
1680}
1681
1682static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
1683 struct sk_buff *skb,
1684 struct wmi_echo_ev_arg *arg)
1685{
1686 const void **tb;
1687 const struct wmi_echo_event *ev;
1688 int ret;
1689
1690 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1691 if (IS_ERR(tb)) {
1692 ret = PTR_ERR(tb);
1693 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1694 return ret;
1695 }
1696
1697 ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
1698 if (!ev) {
1699 kfree(tb);
1700 return -EPROTO;
1701 }
1702
1703 arg->value = ev->value;
1704
1705 kfree(tb);
1706 return 0;
1707}
1708
1709static struct sk_buff *
1710ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
1711{
1712 struct wmi_tlv_pdev_suspend *cmd;
1713 struct wmi_tlv *tlv;
1714 struct sk_buff *skb;
1715
1716 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1717 if (!skb)
1718 return ERR_PTR(-ENOMEM);
1719
1720 tlv = (void *)skb->data;
1721 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
1722 tlv->len = __cpu_to_le16(sizeof(*cmd));
1723 cmd = (void *)tlv->value;
1724 cmd->opt = __cpu_to_le32(opt);
1725
1726 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
1727 return skb;
1728}
1729
1730static struct sk_buff *
1731ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
1732{
1733 struct wmi_tlv_resume_cmd *cmd;
1734 struct wmi_tlv *tlv;
1735 struct sk_buff *skb;
1736
1737 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1738 if (!skb)
1739 return ERR_PTR(-ENOMEM);
1740
1741 tlv = (void *)skb->data;
1742 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
1743 tlv->len = __cpu_to_le16(sizeof(*cmd));
1744 cmd = (void *)tlv->value;
1745 cmd->reserved = __cpu_to_le32(0);
1746
1747 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
1748 return skb;
1749}
1750
1751static struct sk_buff *
1752ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
1753 u16 rd, u16 rd2g, u16 rd5g,
1754 u16 ctl2g, u16 ctl5g,
1755 enum wmi_dfs_region dfs_reg)
1756{
1757 struct wmi_tlv_pdev_set_rd_cmd *cmd;
1758 struct wmi_tlv *tlv;
1759 struct sk_buff *skb;
1760
1761 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1762 if (!skb)
1763 return ERR_PTR(-ENOMEM);
1764
1765 tlv = (void *)skb->data;
1766 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
1767 tlv->len = __cpu_to_le16(sizeof(*cmd));
1768 cmd = (void *)tlv->value;
1769 cmd->regd = __cpu_to_le32(rd);
1770 cmd->regd_2ghz = __cpu_to_le32(rd2g);
1771 cmd->regd_5ghz = __cpu_to_le32(rd5g);
1772 cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
1773 cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
1774
1775 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
1776 return skb;
1777}
1778
1779static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
1780{
1781 return WMI_TXBF_CONF_AFTER_ASSOC;
1782}
1783
1784static struct sk_buff *
1785ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
1786 u32 param_value)
1787{
1788 struct wmi_tlv_pdev_set_param_cmd *cmd;
1789 struct wmi_tlv *tlv;
1790 struct sk_buff *skb;
1791
1792 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1793 if (!skb)
1794 return ERR_PTR(-ENOMEM);
1795
1796 tlv = (void *)skb->data;
1797 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
1798 tlv->len = __cpu_to_le16(sizeof(*cmd));
1799 cmd = (void *)tlv->value;
1800 cmd->param_id = __cpu_to_le32(param_id);
1801 cmd->param_value = __cpu_to_le32(param_value);
1802
1803 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
1804 param_id, param_value);
1805 return skb;
1806}
1807
1808static void
1809ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
1810{
1811 struct host_memory_chunk_tlv *chunk;
1812 struct wmi_tlv *tlv;
1813 dma_addr_t paddr;
1814 int i;
1815 __le16 tlv_len, tlv_tag;
1816
1817 tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
1818 tlv_len = __cpu_to_le16(sizeof(*chunk));
1819 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
1820 tlv = host_mem_chunks;
1821 tlv->tag = tlv_tag;
1822 tlv->len = tlv_len;
1823 chunk = (void *)tlv->value;
1824
1825 chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
1826 chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
1827 chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
1828
1829 if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
1830 ar->wmi.svc_map)) {
1831 paddr = ar->wmi.mem_chunks[i].paddr;
1832 chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
1833 }
1834
1835 ath10k_dbg(ar, ATH10K_DBG_WMI,
1836 "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
1837 i,
1838 ar->wmi.mem_chunks[i].len,
1839 (unsigned long long)ar->wmi.mem_chunks[i].paddr,
1840 ar->wmi.mem_chunks[i].req_id);
1841
1842 host_mem_chunks += sizeof(*tlv);
1843 host_mem_chunks += sizeof(*chunk);
1844 }
1845}
1846
1847static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1848{
1849 struct sk_buff *skb;
1850 struct wmi_tlv *tlv;
1851 struct wmi_tlv_init_cmd *cmd;
1852 struct wmi_tlv_resource_config *cfg;
1853 void *chunks;
1854 size_t len, chunks_len;
1855 void *ptr;
1856
1857 chunks_len = ar->wmi.num_mem_chunks *
1858 (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
1859 len = (sizeof(*tlv) + sizeof(*cmd)) +
1860 (sizeof(*tlv) + sizeof(*cfg)) +
1861 (sizeof(*tlv) + chunks_len);
1862
1863 skb = ath10k_wmi_alloc_skb(ar, len);
1864 if (!skb)
1865 return ERR_PTR(-ENOMEM);
1866
1867 ptr = skb->data;
1868
1869 tlv = ptr;
1870 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1871 tlv->len = __cpu_to_le16(sizeof(*cmd));
1872 cmd = (void *)tlv->value;
1873 ptr += sizeof(*tlv);
1874 ptr += sizeof(*cmd);
1875
1876 tlv = ptr;
1877 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1878 tlv->len = __cpu_to_le16(sizeof(*cfg));
1879 cfg = (void *)tlv->value;
1880 ptr += sizeof(*tlv);
1881 ptr += sizeof(*cfg);
1882
1883 tlv = ptr;
1884 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1885 tlv->len = __cpu_to_le16(chunks_len);
1886 chunks = (void *)tlv->value;
1887
1888 ptr += sizeof(*tlv);
1889 ptr += chunks_len;
1890
1891 cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1892 cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1893 cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1894 cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1895 cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1896 cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1897 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1898
1899 cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1900
1901 if (ar->hw_params.num_peers)
1902 cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
1903 else
1904 cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1905 cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
1906 cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
1907
1908 if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1909 cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1910 cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1911 } else {
1912 cfg->num_offload_peers = __cpu_to_le32(0);
1913 cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1914 }
1915
1916 cfg->num_peer_keys = __cpu_to_le32(2);
1917 if (ar->hw_params.num_peers)
1918 cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
1919 else
1920 cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1921 cfg->tx_chain_mask = __cpu_to_le32(0x7);
1922 cfg->rx_chain_mask = __cpu_to_le32(0x7);
1923 cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1924 cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1925 cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1926 cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1927 cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
1928 cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1929 cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1930 cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1931 cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1932 cfg->num_mcast_groups = __cpu_to_le32(0);
1933 cfg->num_mcast_table_elems = __cpu_to_le32(0);
1934 cfg->mcast2ucast_mode = __cpu_to_le32(0);
1935 cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1936 cfg->dma_burst_size = __cpu_to_le32(0);
1937 cfg->mac_aggr_delim = __cpu_to_le32(0);
1938 cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1939 cfg->vow_config = __cpu_to_le32(0);
1940 cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1941 cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
1942 cfg->max_frag_entries = __cpu_to_le32(2);
1943 cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
1944 cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1945 cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1946 cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1947 cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
1948 cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1949 cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1950 cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1951 cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1952 cfg->wmi_send_separate = __cpu_to_le32(0);
1953 cfg->num_ocb_vdevs = __cpu_to_le32(0);
1954 cfg->num_ocb_channels = __cpu_to_le32(0);
1955 cfg->num_ocb_schedules = __cpu_to_le32(0);
1956 cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
1957
1958 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
1959 cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
1960
1961 ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
1962
1963 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1964 return skb;
1965}
1966
1967static struct sk_buff *
1968ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1969 const struct wmi_start_scan_arg *arg)
1970{
1971 struct wmi_tlv_start_scan_cmd *cmd;
1972 struct wmi_tlv *tlv;
1973 struct sk_buff *skb;
1974 size_t len, chan_len, ssid_len, bssid_len, ie_len;
1975 __le32 *chans;
1976 struct wmi_ssid *ssids;
1977 struct wmi_mac_addr *addrs;
1978 void *ptr;
1979 int i, ret;
1980
1981 ret = ath10k_wmi_start_scan_verify(arg);
1982 if (ret)
1983 return ERR_PTR(ret);
1984
1985 chan_len = arg->n_channels * sizeof(__le32);
1986 ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1987 bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1988 ie_len = roundup(arg->ie_len, 4);
1989 len = (sizeof(*tlv) + sizeof(*cmd)) +
1990 sizeof(*tlv) + chan_len +
1991 sizeof(*tlv) + ssid_len +
1992 sizeof(*tlv) + bssid_len +
1993 sizeof(*tlv) + ie_len;
1994
1995 skb = ath10k_wmi_alloc_skb(ar, len);
1996 if (!skb)
1997 return ERR_PTR(-ENOMEM);
1998
1999 ptr = (void *)skb->data;
2000 tlv = ptr;
2001 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
2002 tlv->len = __cpu_to_le16(sizeof(*cmd));
2003 cmd = (void *)tlv->value;
2004
2005 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
2006 cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
2007 cmd->num_channels = __cpu_to_le32(arg->n_channels);
2008 cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
2009 cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
2010 cmd->ie_len = __cpu_to_le32(arg->ie_len);
2011 cmd->num_probes = __cpu_to_le32(3);
2012 ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
2013 ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
2014
2015 /* FIXME: There are some scan flag inconsistencies across firmwares,
2016 * e.g. WMI-TLV inverts the logic behind the following flag.
2017 */
2018 cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2019
2020 ptr += sizeof(*tlv);
2021 ptr += sizeof(*cmd);
2022
2023 tlv = ptr;
2024 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2025 tlv->len = __cpu_to_le16(chan_len);
2026 chans = (void *)tlv->value;
2027 for (i = 0; i < arg->n_channels; i++)
2028 chans[i] = __cpu_to_le32(arg->channels[i]);
2029
2030 ptr += sizeof(*tlv);
2031 ptr += chan_len;
2032
2033 tlv = ptr;
2034 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2035 tlv->len = __cpu_to_le16(ssid_len);
2036 ssids = (void *)tlv->value;
2037 for (i = 0; i < arg->n_ssids; i++) {
2038 ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
2039 memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
2040 }
2041
2042 ptr += sizeof(*tlv);
2043 ptr += ssid_len;
2044
2045 tlv = ptr;
2046 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2047 tlv->len = __cpu_to_le16(bssid_len);
2048 addrs = (void *)tlv->value;
2049 for (i = 0; i < arg->n_bssids; i++)
2050 ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
2051
2052 ptr += sizeof(*tlv);
2053 ptr += bssid_len;
2054
2055 tlv = ptr;
2056 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2057 tlv->len = __cpu_to_le16(ie_len);
2058 memcpy(tlv->value, arg->ie, arg->ie_len);
2059
2060 ptr += sizeof(*tlv);
2061 ptr += ie_len;
2062
2063 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
2064 return skb;
2065}
2066
2067static struct sk_buff *
2068ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
2069 const struct wmi_stop_scan_arg *arg)
2070{
2071 struct wmi_stop_scan_cmd *cmd;
2072 struct wmi_tlv *tlv;
2073 struct sk_buff *skb;
2074 u32 scan_id;
2075 u32 req_id;
2076
2077 if (arg->req_id > 0xFFF)
2078 return ERR_PTR(-EINVAL);
2079 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
2080 return ERR_PTR(-EINVAL);
2081
2082 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2083 if (!skb)
2084 return ERR_PTR(-ENOMEM);
2085
2086 scan_id = arg->u.scan_id;
2087 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
2088
2089 req_id = arg->req_id;
2090 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
2091
2092 tlv = (void *)skb->data;
2093 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
2094 tlv->len = __cpu_to_le16(sizeof(*cmd));
2095 cmd = (void *)tlv->value;
2096 cmd->req_type = __cpu_to_le32(arg->req_type);
2097 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
2098 cmd->scan_id = __cpu_to_le32(scan_id);
2099 cmd->scan_req_id = __cpu_to_le32(req_id);
2100
2101 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
2102 return skb;
2103}
2104
2105static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
2106 enum wmi_vdev_subtype subtype)
2107{
2108 switch (subtype) {
2109 case WMI_VDEV_SUBTYPE_NONE:
2110 return WMI_TLV_VDEV_SUBTYPE_NONE;
2111 case WMI_VDEV_SUBTYPE_P2P_DEVICE:
2112 return WMI_TLV_VDEV_SUBTYPE_P2P_DEV;
2113 case WMI_VDEV_SUBTYPE_P2P_CLIENT:
2114 return WMI_TLV_VDEV_SUBTYPE_P2P_CLI;
2115 case WMI_VDEV_SUBTYPE_P2P_GO:
2116 return WMI_TLV_VDEV_SUBTYPE_P2P_GO;
2117 case WMI_VDEV_SUBTYPE_PROXY_STA:
2118 return WMI_TLV_VDEV_SUBTYPE_PROXY_STA;
2119 case WMI_VDEV_SUBTYPE_MESH_11S:
2120 return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
2121 case WMI_VDEV_SUBTYPE_MESH_NON_11S:
2122 return -ENOTSUPP;
2123 }
2124 return -ENOTSUPP;
2125}
2126
2127static struct sk_buff *
2128ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
2129 u32 vdev_id,
2130 enum wmi_vdev_type vdev_type,
2131 enum wmi_vdev_subtype vdev_subtype,
2132 const u8 mac_addr[ETH_ALEN])
2133{
2134 struct wmi_vdev_create_cmd *cmd;
2135 struct wmi_tlv *tlv;
2136 struct sk_buff *skb;
2137
2138 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2139 if (!skb)
2140 return ERR_PTR(-ENOMEM);
2141
2142 tlv = (void *)skb->data;
2143 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
2144 tlv->len = __cpu_to_le16(sizeof(*cmd));
2145 cmd = (void *)tlv->value;
2146 cmd->vdev_id = __cpu_to_le32(vdev_id);
2147 cmd->vdev_type = __cpu_to_le32(vdev_type);
2148 cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
2149 ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
2150
2151 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
2152 return skb;
2153}
2154
2155static struct sk_buff *
2156ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
2157{
2158 struct wmi_vdev_delete_cmd *cmd;
2159 struct wmi_tlv *tlv;
2160 struct sk_buff *skb;
2161
2162 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2163 if (!skb)
2164 return ERR_PTR(-ENOMEM);
2165
2166 tlv = (void *)skb->data;
2167 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
2168 tlv->len = __cpu_to_le16(sizeof(*cmd));
2169 cmd = (void *)tlv->value;
2170 cmd->vdev_id = __cpu_to_le32(vdev_id);
2171
2172 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
2173 return skb;
2174}
2175
2176static struct sk_buff *
2177ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
2178 const struct wmi_vdev_start_request_arg *arg,
2179 bool restart)
2180{
2181 struct wmi_tlv_vdev_start_cmd *cmd;
2182 struct wmi_channel *ch;
2183 struct wmi_tlv *tlv;
2184 struct sk_buff *skb;
2185 size_t len;
2186 void *ptr;
2187 u32 flags = 0;
2188
2189 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
2190 return ERR_PTR(-EINVAL);
2191 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
2192 return ERR_PTR(-EINVAL);
2193
2194 len = (sizeof(*tlv) + sizeof(*cmd)) +
2195 (sizeof(*tlv) + sizeof(*ch)) +
2196 (sizeof(*tlv) + 0);
2197 skb = ath10k_wmi_alloc_skb(ar, len);
2198 if (!skb)
2199 return ERR_PTR(-ENOMEM);
2200
2201 if (arg->hidden_ssid)
2202 flags |= WMI_VDEV_START_HIDDEN_SSID;
2203 if (arg->pmf_enabled)
2204 flags |= WMI_VDEV_START_PMF_ENABLED;
2205
2206 ptr = (void *)skb->data;
2207
2208 tlv = ptr;
2209 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
2210 tlv->len = __cpu_to_le16(sizeof(*cmd));
2211 cmd = (void *)tlv->value;
2212 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2213 cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
2214 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
2215 cmd->flags = __cpu_to_le32(flags);
2216 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
2217 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
2218 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
2219
2220 if (arg->ssid) {
2221 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
2222 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
2223 }
2224
2225 ptr += sizeof(*tlv);
2226 ptr += sizeof(*cmd);
2227
2228 tlv = ptr;
2229 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2230 tlv->len = __cpu_to_le16(sizeof(*ch));
2231 ch = (void *)tlv->value;
2232 ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
2233
2234 ptr += sizeof(*tlv);
2235 ptr += sizeof(*ch);
2236
2237 tlv = ptr;
2238 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2239 tlv->len = 0;
2240
2241 /* Note: This is a nested TLV containing:
2242 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
2243 */
2244
2245 ptr += sizeof(*tlv);
2246 ptr += 0;
2247
2248 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
2249 return skb;
2250}
2251
2252static struct sk_buff *
2253ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
2254{
2255 struct wmi_vdev_stop_cmd *cmd;
2256 struct wmi_tlv *tlv;
2257 struct sk_buff *skb;
2258
2259 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2260 if (!skb)
2261 return ERR_PTR(-ENOMEM);
2262
2263 tlv = (void *)skb->data;
2264 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
2265 tlv->len = __cpu_to_le16(sizeof(*cmd));
2266 cmd = (void *)tlv->value;
2267 cmd->vdev_id = __cpu_to_le32(vdev_id);
2268
2269 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
2270 return skb;
2271}
2272
2273static struct sk_buff *
2274ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
2275 const u8 *bssid)
2276
2277{
2278 struct wmi_vdev_up_cmd *cmd;
2279 struct wmi_tlv *tlv;
2280 struct sk_buff *skb;
2281
2282 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2283 if (!skb)
2284 return ERR_PTR(-ENOMEM);
2285
2286 tlv = (void *)skb->data;
2287 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
2288 tlv->len = __cpu_to_le16(sizeof(*cmd));
2289 cmd = (void *)tlv->value;
2290 cmd->vdev_id = __cpu_to_le32(vdev_id);
2291 cmd->vdev_assoc_id = __cpu_to_le32(aid);
2292 ether_addr_copy(cmd->vdev_bssid.addr, bssid);
2293
2294 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
2295 return skb;
2296}
2297
2298static struct sk_buff *
2299ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
2300{
2301 struct wmi_vdev_down_cmd *cmd;
2302 struct wmi_tlv *tlv;
2303 struct sk_buff *skb;
2304
2305 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2306 if (!skb)
2307 return ERR_PTR(-ENOMEM);
2308
2309 tlv = (void *)skb->data;
2310 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
2311 tlv->len = __cpu_to_le16(sizeof(*cmd));
2312 cmd = (void *)tlv->value;
2313 cmd->vdev_id = __cpu_to_le32(vdev_id);
2314
2315 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
2316 return skb;
2317}
2318
2319static struct sk_buff *
2320ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
2321 u32 param_id, u32 param_value)
2322{
2323 struct wmi_vdev_set_param_cmd *cmd;
2324 struct wmi_tlv *tlv;
2325 struct sk_buff *skb;
2326
2327 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2328 if (!skb)
2329 return ERR_PTR(-ENOMEM);
2330
2331 tlv = (void *)skb->data;
2332 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
2333 tlv->len = __cpu_to_le16(sizeof(*cmd));
2334 cmd = (void *)tlv->value;
2335 cmd->vdev_id = __cpu_to_le32(vdev_id);
2336 cmd->param_id = __cpu_to_le32(param_id);
2337 cmd->param_value = __cpu_to_le32(param_value);
2338
2339 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
2340 vdev_id, param_id, param_value);
2341 return skb;
2342}
2343
2344static struct sk_buff *
2345ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
2346 const struct wmi_vdev_install_key_arg *arg)
2347{
2348 struct wmi_vdev_install_key_cmd *cmd;
2349 struct wmi_tlv *tlv;
2350 struct sk_buff *skb;
2351 size_t len;
2352 void *ptr;
2353
2354 if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2355 arg->key_data)
2356 return ERR_PTR(-EINVAL);
2357 if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2358 !arg->key_data)
2359 return ERR_PTR(-EINVAL);
2360
2361 len = sizeof(*tlv) + sizeof(*cmd) +
2362 sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
2363 skb = ath10k_wmi_alloc_skb(ar, len);
2364 if (!skb)
2365 return ERR_PTR(-ENOMEM);
2366
2367 ptr = (void *)skb->data;
2368 tlv = ptr;
2369 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
2370 tlv->len = __cpu_to_le16(sizeof(*cmd));
2371 cmd = (void *)tlv->value;
2372 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2373 cmd->key_idx = __cpu_to_le32(arg->key_idx);
2374 cmd->key_flags = __cpu_to_le32(arg->key_flags);
2375 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
2376 cmd->key_len = __cpu_to_le32(arg->key_len);
2377 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
2378 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
2379
2380 if (arg->macaddr)
2381 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2382
2383 ptr += sizeof(*tlv);
2384 ptr += sizeof(*cmd);
2385
2386 tlv = ptr;
2387 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2388 tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
2389 if (arg->key_data)
2390 memcpy(tlv->value, arg->key_data, arg->key_len);
2391
2392 ptr += sizeof(*tlv);
2393 ptr += roundup(arg->key_len, sizeof(__le32));
2394
2395 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
2396 return skb;
2397}
2398
2399static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
2400 const struct wmi_sta_uapsd_auto_trig_arg *arg)
2401{
2402 struct wmi_sta_uapsd_auto_trig_param *ac;
2403 struct wmi_tlv *tlv;
2404
2405 tlv = ptr;
2406 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
2407 tlv->len = __cpu_to_le16(sizeof(*ac));
2408 ac = (void *)tlv->value;
2409
2410 ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
2411 ac->user_priority = __cpu_to_le32(arg->user_priority);
2412 ac->service_interval = __cpu_to_le32(arg->service_interval);
2413 ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
2414 ac->delay_interval = __cpu_to_le32(arg->delay_interval);
2415
2416 ath10k_dbg(ar, ATH10K_DBG_WMI,
2417 "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
2418 ac->wmm_ac, ac->user_priority, ac->service_interval,
2419 ac->suspend_interval, ac->delay_interval);
2420
2421 return ptr + sizeof(*tlv) + sizeof(*ac);
2422}
2423
2424static struct sk_buff *
2425ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
2426 const u8 peer_addr[ETH_ALEN],
2427 const struct wmi_sta_uapsd_auto_trig_arg *args,
2428 u32 num_ac)
2429{
2430 struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
2431 struct wmi_sta_uapsd_auto_trig_param *ac;
2432 struct wmi_tlv *tlv;
2433 struct sk_buff *skb;
2434 size_t len;
2435 size_t ac_tlv_len;
2436 void *ptr;
2437 int i;
2438
2439 ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
2440 len = sizeof(*tlv) + sizeof(*cmd) +
2441 sizeof(*tlv) + ac_tlv_len;
2442 skb = ath10k_wmi_alloc_skb(ar, len);
2443 if (!skb)
2444 return ERR_PTR(-ENOMEM);
2445
2446 ptr = (void *)skb->data;
2447 tlv = ptr;
2448 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
2449 tlv->len = __cpu_to_le16(sizeof(*cmd));
2450 cmd = (void *)tlv->value;
2451 cmd->vdev_id = __cpu_to_le32(vdev_id);
2452 cmd->num_ac = __cpu_to_le32(num_ac);
2453 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2454
2455 ptr += sizeof(*tlv);
2456 ptr += sizeof(*cmd);
2457
2458 tlv = ptr;
2459 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2460 tlv->len = __cpu_to_le16(ac_tlv_len);
2461 ac = (void *)tlv->value;
2462
2463 ptr += sizeof(*tlv);
2464 for (i = 0; i < num_ac; i++)
2465 ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
2466
2467 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
2468 return skb;
2469}
2470
2471static void *ath10k_wmi_tlv_put_wmm(void *ptr,
2472 const struct wmi_wmm_params_arg *arg)
2473{
2474 struct wmi_wmm_params *wmm;
2475 struct wmi_tlv *tlv;
2476
2477 tlv = ptr;
2478 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
2479 tlv->len = __cpu_to_le16(sizeof(*wmm));
2480 wmm = (void *)tlv->value;
2481 ath10k_wmi_set_wmm_param(wmm, arg);
2482
2483 return ptr + sizeof(*tlv) + sizeof(*wmm);
2484}
2485
2486static struct sk_buff *
2487ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
2488 const struct wmi_wmm_params_all_arg *arg)
2489{
2490 struct wmi_tlv_vdev_set_wmm_cmd *cmd;
2491 struct wmi_tlv *tlv;
2492 struct sk_buff *skb;
2493 size_t len;
2494 void *ptr;
2495
2496 len = sizeof(*tlv) + sizeof(*cmd);
2497 skb = ath10k_wmi_alloc_skb(ar, len);
2498 if (!skb)
2499 return ERR_PTR(-ENOMEM);
2500
2501 ptr = (void *)skb->data;
2502 tlv = ptr;
2503 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
2504 tlv->len = __cpu_to_le16(sizeof(*cmd));
2505 cmd = (void *)tlv->value;
2506 cmd->vdev_id = __cpu_to_le32(vdev_id);
2507
2508 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
2509 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
2510 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
2511 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
2512
2513 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
2514 return skb;
2515}
2516
2517static struct sk_buff *
2518ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
2519 const struct wmi_sta_keepalive_arg *arg)
2520{
2521 struct wmi_tlv_sta_keepalive_cmd *cmd;
2522 struct wmi_sta_keepalive_arp_resp *arp;
2523 struct sk_buff *skb;
2524 struct wmi_tlv *tlv;
2525 void *ptr;
2526 size_t len;
2527
2528 len = sizeof(*tlv) + sizeof(*cmd) +
2529 sizeof(*tlv) + sizeof(*arp);
2530 skb = ath10k_wmi_alloc_skb(ar, len);
2531 if (!skb)
2532 return ERR_PTR(-ENOMEM);
2533
2534 ptr = (void *)skb->data;
2535 tlv = ptr;
2536 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
2537 tlv->len = __cpu_to_le16(sizeof(*cmd));
2538 cmd = (void *)tlv->value;
2539 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2540 cmd->enabled = __cpu_to_le32(arg->enabled);
2541 cmd->method = __cpu_to_le32(arg->method);
2542 cmd->interval = __cpu_to_le32(arg->interval);
2543
2544 ptr += sizeof(*tlv);
2545 ptr += sizeof(*cmd);
2546
2547 tlv = ptr;
2548 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
2549 tlv->len = __cpu_to_le16(sizeof(*arp));
2550 arp = (void *)tlv->value;
2551
2552 arp->src_ip4_addr = arg->src_ip4_addr;
2553 arp->dest_ip4_addr = arg->dest_ip4_addr;
2554 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
2555
2556 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
2557 arg->vdev_id, arg->enabled, arg->method, arg->interval);
2558 return skb;
2559}
2560
2561static struct sk_buff *
2562ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
2563 const u8 peer_addr[ETH_ALEN],
2564 enum wmi_peer_type peer_type)
2565{
2566 struct wmi_tlv_peer_create_cmd *cmd;
2567 struct wmi_tlv *tlv;
2568 struct sk_buff *skb;
2569
2570 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2571 if (!skb)
2572 return ERR_PTR(-ENOMEM);
2573
2574 tlv = (void *)skb->data;
2575 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
2576 tlv->len = __cpu_to_le16(sizeof(*cmd));
2577 cmd = (void *)tlv->value;
2578 cmd->vdev_id = __cpu_to_le32(vdev_id);
2579 cmd->peer_type = __cpu_to_le32(peer_type);
2580 ether_addr_copy(cmd->peer_addr.addr, peer_addr);
2581
2582 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
2583 return skb;
2584}
2585
2586static struct sk_buff *
2587ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
2588 const u8 peer_addr[ETH_ALEN])
2589{
2590 struct wmi_peer_delete_cmd *cmd;
2591 struct wmi_tlv *tlv;
2592 struct sk_buff *skb;
2593
2594 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2595 if (!skb)
2596 return ERR_PTR(-ENOMEM);
2597
2598 tlv = (void *)skb->data;
2599 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
2600 tlv->len = __cpu_to_le16(sizeof(*cmd));
2601 cmd = (void *)tlv->value;
2602 cmd->vdev_id = __cpu_to_le32(vdev_id);
2603 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2604
2605 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
2606 return skb;
2607}
2608
2609static struct sk_buff *
2610ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
2611 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
2612{
2613 struct wmi_peer_flush_tids_cmd *cmd;
2614 struct wmi_tlv *tlv;
2615 struct sk_buff *skb;
2616
2617 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2618 if (!skb)
2619 return ERR_PTR(-ENOMEM);
2620
2621 tlv = (void *)skb->data;
2622 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
2623 tlv->len = __cpu_to_le16(sizeof(*cmd));
2624 cmd = (void *)tlv->value;
2625 cmd->vdev_id = __cpu_to_le32(vdev_id);
2626 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
2627 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2628
2629 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
2630 return skb;
2631}
2632
2633static struct sk_buff *
2634ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
2635 const u8 *peer_addr,
2636 enum wmi_peer_param param_id,
2637 u32 param_value)
2638{
2639 struct wmi_peer_set_param_cmd *cmd;
2640 struct wmi_tlv *tlv;
2641 struct sk_buff *skb;
2642
2643 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2644 if (!skb)
2645 return ERR_PTR(-ENOMEM);
2646
2647 tlv = (void *)skb->data;
2648 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
2649 tlv->len = __cpu_to_le16(sizeof(*cmd));
2650 cmd = (void *)tlv->value;
2651 cmd->vdev_id = __cpu_to_le32(vdev_id);
2652 cmd->param_id = __cpu_to_le32(param_id);
2653 cmd->param_value = __cpu_to_le32(param_value);
2654 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2655
2656 ath10k_dbg(ar, ATH10K_DBG_WMI,
2657 "wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
2658 vdev_id, peer_addr, param_id, param_value);
2659 return skb;
2660}
2661
2662static struct sk_buff *
2663ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
2664 const struct wmi_peer_assoc_complete_arg *arg)
2665{
2666 struct wmi_tlv_peer_assoc_cmd *cmd;
2667 struct wmi_vht_rate_set *vht_rate;
2668 struct wmi_tlv *tlv;
2669 struct sk_buff *skb;
2670 size_t len, legacy_rate_len, ht_rate_len;
2671 void *ptr;
2672
2673 if (arg->peer_mpdu_density > 16)
2674 return ERR_PTR(-EINVAL);
2675 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2676 return ERR_PTR(-EINVAL);
2677 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2678 return ERR_PTR(-EINVAL);
2679
2680 legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
2681 sizeof(__le32));
2682 ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
2683 len = (sizeof(*tlv) + sizeof(*cmd)) +
2684 (sizeof(*tlv) + legacy_rate_len) +
2685 (sizeof(*tlv) + ht_rate_len) +
2686 (sizeof(*tlv) + sizeof(*vht_rate));
2687 skb = ath10k_wmi_alloc_skb(ar, len);
2688 if (!skb)
2689 return ERR_PTR(-ENOMEM);
2690
2691 ptr = (void *)skb->data;
2692 tlv = ptr;
2693 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
2694 tlv->len = __cpu_to_le16(sizeof(*cmd));
2695 cmd = (void *)tlv->value;
2696
2697 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2698 cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2699 cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
2700 cmd->flags = __cpu_to_le32(arg->peer_flags);
2701 cmd->caps = __cpu_to_le32(arg->peer_caps);
2702 cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2703 cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2704 cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2705 cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2706 cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2707 cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2708 cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2709 cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
2710 cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2711 cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
2712 ether_addr_copy(cmd->mac_addr.addr, arg->addr);
2713
2714 ptr += sizeof(*tlv);
2715 ptr += sizeof(*cmd);
2716
2717 tlv = ptr;
2718 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2719 tlv->len = __cpu_to_le16(legacy_rate_len);
2720 memcpy(tlv->value, arg->peer_legacy_rates.rates,
2721 arg->peer_legacy_rates.num_rates);
2722
2723 ptr += sizeof(*tlv);
2724 ptr += legacy_rate_len;
2725
2726 tlv = ptr;
2727 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2728 tlv->len = __cpu_to_le16(ht_rate_len);
2729 memcpy(tlv->value, arg->peer_ht_rates.rates,
2730 arg->peer_ht_rates.num_rates);
2731
2732 ptr += sizeof(*tlv);
2733 ptr += ht_rate_len;
2734
2735 tlv = ptr;
2736 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
2737 tlv->len = __cpu_to_le16(sizeof(*vht_rate));
2738 vht_rate = (void *)tlv->value;
2739
2740 vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2741 vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2742 vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2743 vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2744
2745 ptr += sizeof(*tlv);
2746 ptr += sizeof(*vht_rate);
2747
2748 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
2749 return skb;
2750}
2751
2752static struct sk_buff *
2753ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
2754 enum wmi_sta_ps_mode psmode)
2755{
2756 struct wmi_sta_powersave_mode_cmd *cmd;
2757 struct wmi_tlv *tlv;
2758 struct sk_buff *skb;
2759
2760 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2761 if (!skb)
2762 return ERR_PTR(-ENOMEM);
2763
2764 tlv = (void *)skb->data;
2765 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
2766 tlv->len = __cpu_to_le16(sizeof(*cmd));
2767 cmd = (void *)tlv->value;
2768 cmd->vdev_id = __cpu_to_le32(vdev_id);
2769 cmd->sta_ps_mode = __cpu_to_le32(psmode);
2770
2771 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
2772 return skb;
2773}
2774
2775static struct sk_buff *
2776ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
2777 enum wmi_sta_powersave_param param_id,
2778 u32 param_value)
2779{
2780 struct wmi_sta_powersave_param_cmd *cmd;
2781 struct wmi_tlv *tlv;
2782 struct sk_buff *skb;
2783
2784 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2785 if (!skb)
2786 return ERR_PTR(-ENOMEM);
2787
2788 tlv = (void *)skb->data;
2789 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
2790 tlv->len = __cpu_to_le16(sizeof(*cmd));
2791 cmd = (void *)tlv->value;
2792 cmd->vdev_id = __cpu_to_le32(vdev_id);
2793 cmd->param_id = __cpu_to_le32(param_id);
2794 cmd->param_value = __cpu_to_le32(param_value);
2795
2796 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
2797 return skb;
2798}
2799
2800static struct sk_buff *
2801ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2802 enum wmi_ap_ps_peer_param param_id, u32 value)
2803{
2804 struct wmi_ap_ps_peer_cmd *cmd;
2805 struct wmi_tlv *tlv;
2806 struct sk_buff *skb;
2807
2808 if (!mac)
2809 return ERR_PTR(-EINVAL);
2810
2811 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2812 if (!skb)
2813 return ERR_PTR(-ENOMEM);
2814
2815 tlv = (void *)skb->data;
2816 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
2817 tlv->len = __cpu_to_le16(sizeof(*cmd));
2818 cmd = (void *)tlv->value;
2819 cmd->vdev_id = __cpu_to_le32(vdev_id);
2820 cmd->param_id = __cpu_to_le32(param_id);
2821 cmd->param_value = __cpu_to_le32(value);
2822 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2823
2824 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
2825 return skb;
2826}
2827
2828static struct sk_buff *
2829ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
2830 const struct wmi_scan_chan_list_arg *arg)
2831{
2832 struct wmi_tlv_scan_chan_list_cmd *cmd;
2833 struct wmi_channel *ci;
2834 struct wmi_channel_arg *ch;
2835 struct wmi_tlv *tlv;
2836 struct sk_buff *skb;
2837 size_t chans_len, len;
2838 int i;
2839 void *ptr, *chans;
2840
2841 chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
2842 len = (sizeof(*tlv) + sizeof(*cmd)) +
2843 (sizeof(*tlv) + chans_len);
2844
2845 skb = ath10k_wmi_alloc_skb(ar, len);
2846 if (!skb)
2847 return ERR_PTR(-ENOMEM);
2848
2849 ptr = (void *)skb->data;
2850 tlv = ptr;
2851 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
2852 tlv->len = __cpu_to_le16(sizeof(*cmd));
2853 cmd = (void *)tlv->value;
2854 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2855
2856 ptr += sizeof(*tlv);
2857 ptr += sizeof(*cmd);
2858
2859 tlv = ptr;
2860 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2861 tlv->len = __cpu_to_le16(chans_len);
2862 chans = (void *)tlv->value;
2863
2864 for (i = 0; i < arg->n_channels; i++) {
2865 ch = &arg->channels[i];
2866
2867 tlv = chans;
2868 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2869 tlv->len = __cpu_to_le16(sizeof(*ci));
2870 ci = (void *)tlv->value;
2871
2872 ath10k_wmi_put_wmi_channel(ar, ci, ch);
2873
2874 chans += sizeof(*tlv);
2875 chans += sizeof(*ci);
2876 }
2877
2878 ptr += sizeof(*tlv);
2879 ptr += chans_len;
2880
2881 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2882 return skb;
2883}
2884
2885static struct sk_buff *
2886ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
2887{
2888 struct wmi_scan_prob_req_oui_cmd *cmd;
2889 struct wmi_tlv *tlv;
2890 struct sk_buff *skb;
2891
2892 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2893 if (!skb)
2894 return ERR_PTR(-ENOMEM);
2895
2896 tlv = (void *)skb->data;
2897 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
2898 tlv->len = __cpu_to_le16(sizeof(*cmd));
2899 cmd = (void *)tlv->value;
2900 cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
2901
2902 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
2903 return skb;
2904}
2905
2906static struct sk_buff *
2907ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2908 const void *bcn, size_t bcn_len,
2909 u32 bcn_paddr, bool dtim_zero,
2910 bool deliver_cab)
2911
2912{
2913 struct wmi_bcn_tx_ref_cmd *cmd;
2914 struct wmi_tlv *tlv;
2915 struct sk_buff *skb;
2916 struct ieee80211_hdr *hdr;
2917 u16 fc;
2918
2919 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2920 if (!skb)
2921 return ERR_PTR(-ENOMEM);
2922
2923 hdr = (struct ieee80211_hdr *)bcn;
2924 fc = le16_to_cpu(hdr->frame_control);
2925
2926 tlv = (void *)skb->data;
2927 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2928 tlv->len = __cpu_to_le16(sizeof(*cmd));
2929 cmd = (void *)tlv->value;
2930 cmd->vdev_id = __cpu_to_le32(vdev_id);
2931 cmd->data_len = __cpu_to_le32(bcn_len);
2932 cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2933 cmd->msdu_id = 0;
2934 cmd->frame_control = __cpu_to_le32(fc);
2935 cmd->flags = 0;
2936
2937 if (dtim_zero)
2938 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2939
2940 if (deliver_cab)
2941 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2942
2943 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2944 return skb;
2945}
2946
2947static struct sk_buff *
2948ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2949 const struct wmi_wmm_params_all_arg *arg)
2950{
2951 struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2952 struct wmi_wmm_params *wmm;
2953 struct wmi_tlv *tlv;
2954 struct sk_buff *skb;
2955 size_t len;
2956 void *ptr;
2957
2958 len = (sizeof(*tlv) + sizeof(*cmd)) +
2959 (4 * (sizeof(*tlv) + sizeof(*wmm)));
2960 skb = ath10k_wmi_alloc_skb(ar, len);
2961 if (!skb)
2962 return ERR_PTR(-ENOMEM);
2963
2964 ptr = (void *)skb->data;
2965
2966 tlv = ptr;
2967 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2968 tlv->len = __cpu_to_le16(sizeof(*cmd));
2969 cmd = (void *)tlv->value;
2970
2971 /* nothing to set here */
2972
2973 ptr += sizeof(*tlv);
2974 ptr += sizeof(*cmd);
2975
2976 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2977 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2978 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2979 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2980
2981 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2982 return skb;
2983}
2984
2985static struct sk_buff *
2986ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
2987{
2988 struct wmi_request_stats_cmd *cmd;
2989 struct wmi_tlv *tlv;
2990 struct sk_buff *skb;
2991
2992 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2993 if (!skb)
2994 return ERR_PTR(-ENOMEM);
2995
2996 tlv = (void *)skb->data;
2997 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
2998 tlv->len = __cpu_to_le16(sizeof(*cmd));
2999 cmd = (void *)tlv->value;
3000 cmd->stats_id = __cpu_to_le32(stats_mask);
3001
3002 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
3003 return skb;
3004}
3005
3006static struct sk_buff *
3007ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
3008 u32 vdev_id,
3009 enum wmi_peer_stats_info_request_type type,
3010 u8 *addr,
3011 u32 reset)
3012{
3013 struct wmi_tlv_request_peer_stats_info *cmd;
3014 struct wmi_tlv *tlv;
3015 struct sk_buff *skb;
3016
3017 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3018 if (!skb)
3019 return ERR_PTR(-ENOMEM);
3020
3021 tlv = (void *)skb->data;
3022 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
3023 tlv->len = __cpu_to_le16(sizeof(*cmd));
3024 cmd = (void *)tlv->value;
3025 cmd->vdev_id = __cpu_to_le32(vdev_id);
3026 cmd->request_type = __cpu_to_le32(type);
3027
3028 if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
3029 ether_addr_copy(cmd->peer_macaddr.addr, addr);
3030
3031 cmd->reset_after_request = __cpu_to_le32(reset);
3032 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
3033 return skb;
3034}
3035
3036static int
3037ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
3038 struct sk_buff *msdu)
3039{
3040 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3041 struct ath10k_wmi *wmi = &ar->wmi;
3042
3043 idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
3044
3045 return 0;
3046}
3047
3048static int
3049ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
3050 dma_addr_t paddr)
3051{
3052 struct ath10k_wmi *wmi = &ar->wmi;
3053 struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
3054 int ret;
3055
3056 pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
3057 if (!pkt_addr)
3058 return -ENOMEM;
3059
3060 pkt_addr->vaddr = skb;
3061 pkt_addr->paddr = paddr;
3062
3063 spin_lock_bh(&ar->data_lock);
3064 ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
3065 wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
3066 spin_unlock_bh(&ar->data_lock);
3067
3068 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
3069 return ret;
3070}
3071
3072static struct sk_buff *
3073ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
3074 dma_addr_t paddr)
3075{
3076 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3077 struct wmi_tlv_mgmt_tx_cmd *cmd;
3078 struct ieee80211_hdr *hdr;
3079 struct ath10k_vif *arvif;
3080 u32 buf_len = msdu->len;
3081 struct wmi_tlv *tlv;
3082 struct sk_buff *skb;
3083 int len, desc_id;
3084 u32 vdev_id;
3085 void *ptr;
3086
3087 if (!cb->vif)
3088 return ERR_PTR(-EINVAL);
3089
3090 hdr = (struct ieee80211_hdr *)msdu->data;
3091 arvif = (void *)cb->vif->drv_priv;
3092 vdev_id = arvif->vdev_id;
3093
3094 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
3095 (!(ieee80211_is_nullfunc(hdr->frame_control) ||
3096 ieee80211_is_qos_nullfunc(hdr->frame_control)))))
3097 return ERR_PTR(-EINVAL);
3098
3099 len = sizeof(*cmd) + 2 * sizeof(*tlv);
3100
3101 if ((ieee80211_is_action(hdr->frame_control) ||
3102 ieee80211_is_deauth(hdr->frame_control) ||
3103 ieee80211_is_disassoc(hdr->frame_control)) &&
3104 ieee80211_has_protected(hdr->frame_control)) {
3105 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
3106 buf_len += IEEE80211_CCMP_MIC_LEN;
3107 }
3108
3109 buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
3110 buf_len = round_up(buf_len, 4);
3111
3112 len += buf_len;
3113 len = round_up(len, 4);
3114 skb = ath10k_wmi_alloc_skb(ar, len);
3115 if (!skb)
3116 return ERR_PTR(-ENOMEM);
3117
3118 desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
3119 if (desc_id < 0)
3120 goto err_free_skb;
3121
3122 cb->msdu_id = desc_id;
3123
3124 ptr = (void *)skb->data;
3125 tlv = ptr;
3126 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
3127 tlv->len = __cpu_to_le16(sizeof(*cmd));
3128 cmd = (void *)tlv->value;
3129 cmd->vdev_id = __cpu_to_le32(vdev_id);
3130 cmd->desc_id = __cpu_to_le32(desc_id);
3131 cmd->chanfreq = 0;
3132 cmd->buf_len = __cpu_to_le32(buf_len);
3133 cmd->frame_len = __cpu_to_le32(msdu->len);
3134 cmd->paddr = __cpu_to_le64(paddr);
3135
3136 ptr += sizeof(*tlv);
3137 ptr += sizeof(*cmd);
3138
3139 tlv = ptr;
3140 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3141 tlv->len = __cpu_to_le16(buf_len);
3142
3143 ptr += sizeof(*tlv);
3144 memcpy(ptr, msdu->data, buf_len);
3145
3146 return skb;
3147
3148err_free_skb:
3149 dev_kfree_skb(skb);
3150 return ERR_PTR(desc_id);
3151}
3152
3153static struct sk_buff *
3154ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
3155 enum wmi_force_fw_hang_type type,
3156 u32 delay_ms)
3157{
3158 struct wmi_force_fw_hang_cmd *cmd;
3159 struct wmi_tlv *tlv;
3160 struct sk_buff *skb;
3161
3162 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3163 if (!skb)
3164 return ERR_PTR(-ENOMEM);
3165
3166 tlv = (void *)skb->data;
3167 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
3168 tlv->len = __cpu_to_le16(sizeof(*cmd));
3169 cmd = (void *)tlv->value;
3170 cmd->type = __cpu_to_le32(type);
3171 cmd->delay_ms = __cpu_to_le32(delay_ms);
3172
3173 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
3174 return skb;
3175}
3176
3177static struct sk_buff *
3178ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
3179 u32 log_level)
3180{
3181 struct wmi_tlv_dbglog_cmd *cmd;
3182 struct wmi_tlv *tlv;
3183 struct sk_buff *skb;
3184 size_t len, bmap_len;
3185 u32 value;
3186 void *ptr;
3187
3188 if (module_enable) {
3189 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3190 module_enable,
3191 WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
3192 } else {
3193 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3194 WMI_TLV_DBGLOG_ALL_MODULES,
3195 WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
3196 }
3197
3198 bmap_len = 0;
3199 len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
3200 skb = ath10k_wmi_alloc_skb(ar, len);
3201 if (!skb)
3202 return ERR_PTR(-ENOMEM);
3203
3204 ptr = (void *)skb->data;
3205
3206 tlv = ptr;
3207 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
3208 tlv->len = __cpu_to_le16(sizeof(*cmd));
3209 cmd = (void *)tlv->value;
3210 cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
3211 cmd->value = __cpu_to_le32(value);
3212
3213 ptr += sizeof(*tlv);
3214 ptr += sizeof(*cmd);
3215
3216 tlv = ptr;
3217 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3218 tlv->len = __cpu_to_le16(bmap_len);
3219
3220 /* nothing to do here */
3221
3222 ptr += sizeof(*tlv);
3223 ptr += sizeof(bmap_len);
3224
3225 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
3226 return skb;
3227}
3228
3229static struct sk_buff *
3230ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
3231{
3232 struct wmi_tlv_pktlog_enable *cmd;
3233 struct wmi_tlv *tlv;
3234 struct sk_buff *skb;
3235 void *ptr;
3236 size_t len;
3237
3238 len = sizeof(*tlv) + sizeof(*cmd);
3239 skb = ath10k_wmi_alloc_skb(ar, len);
3240 if (!skb)
3241 return ERR_PTR(-ENOMEM);
3242
3243 ptr = (void *)skb->data;
3244 tlv = ptr;
3245 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
3246 tlv->len = __cpu_to_le16(sizeof(*cmd));
3247 cmd = (void *)tlv->value;
3248 cmd->filter = __cpu_to_le32(filter);
3249
3250 ptr += sizeof(*tlv);
3251 ptr += sizeof(*cmd);
3252
3253 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
3254 filter);
3255 return skb;
3256}
3257
3258static struct sk_buff *
3259ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar)
3260{
3261 struct wmi_tlv_pdev_get_temp_cmd *cmd;
3262 struct wmi_tlv *tlv;
3263 struct sk_buff *skb;
3264
3265 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3266 if (!skb)
3267 return ERR_PTR(-ENOMEM);
3268
3269 tlv = (void *)skb->data;
3270 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD);
3271 tlv->len = __cpu_to_le16(sizeof(*cmd));
3272 cmd = (void *)tlv->value;
3273 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n");
3274 return skb;
3275}
3276
3277static struct sk_buff *
3278ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
3279{
3280 struct wmi_tlv_pktlog_disable *cmd;
3281 struct wmi_tlv *tlv;
3282 struct sk_buff *skb;
3283 void *ptr;
3284 size_t len;
3285
3286 len = sizeof(*tlv) + sizeof(*cmd);
3287 skb = ath10k_wmi_alloc_skb(ar, len);
3288 if (!skb)
3289 return ERR_PTR(-ENOMEM);
3290
3291 ptr = (void *)skb->data;
3292 tlv = ptr;
3293 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
3294 tlv->len = __cpu_to_le16(sizeof(*cmd));
3295 cmd = (void *)tlv->value;
3296
3297 ptr += sizeof(*tlv);
3298 ptr += sizeof(*cmd);
3299
3300 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
3301 return skb;
3302}
3303
3304static struct sk_buff *
3305ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
3306 u32 tim_ie_offset, struct sk_buff *bcn,
3307 u32 prb_caps, u32 prb_erp, void *prb_ies,
3308 size_t prb_ies_len)
3309{
3310 struct wmi_tlv_bcn_tmpl_cmd *cmd;
3311 struct wmi_tlv_bcn_prb_info *info;
3312 struct wmi_tlv *tlv;
3313 struct sk_buff *skb;
3314 void *ptr;
3315 size_t len;
3316
3317 if (WARN_ON(prb_ies_len > 0 && !prb_ies))
3318 return ERR_PTR(-EINVAL);
3319
3320 len = sizeof(*tlv) + sizeof(*cmd) +
3321 sizeof(*tlv) + sizeof(*info) + prb_ies_len +
3322 sizeof(*tlv) + roundup(bcn->len, 4);
3323 skb = ath10k_wmi_alloc_skb(ar, len);
3324 if (!skb)
3325 return ERR_PTR(-ENOMEM);
3326
3327 ptr = (void *)skb->data;
3328 tlv = ptr;
3329 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
3330 tlv->len = __cpu_to_le16(sizeof(*cmd));
3331 cmd = (void *)tlv->value;
3332 cmd->vdev_id = __cpu_to_le32(vdev_id);
3333 cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
3334 cmd->buf_len = __cpu_to_le32(bcn->len);
3335
3336 ptr += sizeof(*tlv);
3337 ptr += sizeof(*cmd);
3338
3339 /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
3340 * then it is then impossible to pass original ie len.
3341 * This chunk is not used yet so if setting probe resp template yields
3342 * problems with beaconing or crashes firmware look here.
3343 */
3344 tlv = ptr;
3345 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3346 tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
3347 info = (void *)tlv->value;
3348 info->caps = __cpu_to_le32(prb_caps);
3349 info->erp = __cpu_to_le32(prb_erp);
3350 memcpy(info->ies, prb_ies, prb_ies_len);
3351
3352 ptr += sizeof(*tlv);
3353 ptr += sizeof(*info);
3354 ptr += prb_ies_len;
3355
3356 tlv = ptr;
3357 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3358 tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
3359 memcpy(tlv->value, bcn->data, bcn->len);
3360
3361 /* FIXME: Adjust TSF? */
3362
3363 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
3364 vdev_id);
3365 return skb;
3366}
3367
3368static struct sk_buff *
3369ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
3370 struct sk_buff *prb)
3371{
3372 struct wmi_tlv_prb_tmpl_cmd *cmd;
3373 struct wmi_tlv_bcn_prb_info *info;
3374 struct wmi_tlv *tlv;
3375 struct sk_buff *skb;
3376 void *ptr;
3377 size_t len;
3378
3379 len = sizeof(*tlv) + sizeof(*cmd) +
3380 sizeof(*tlv) + sizeof(*info) +
3381 sizeof(*tlv) + roundup(prb->len, 4);
3382 skb = ath10k_wmi_alloc_skb(ar, len);
3383 if (!skb)
3384 return ERR_PTR(-ENOMEM);
3385
3386 ptr = (void *)skb->data;
3387 tlv = ptr;
3388 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
3389 tlv->len = __cpu_to_le16(sizeof(*cmd));
3390 cmd = (void *)tlv->value;
3391 cmd->vdev_id = __cpu_to_le32(vdev_id);
3392 cmd->buf_len = __cpu_to_le32(prb->len);
3393
3394 ptr += sizeof(*tlv);
3395 ptr += sizeof(*cmd);
3396
3397 tlv = ptr;
3398 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3399 tlv->len = __cpu_to_le16(sizeof(*info));
3400 info = (void *)tlv->value;
3401 info->caps = 0;
3402 info->erp = 0;
3403
3404 ptr += sizeof(*tlv);
3405 ptr += sizeof(*info);
3406
3407 tlv = ptr;
3408 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3409 tlv->len = __cpu_to_le16(roundup(prb->len, 4));
3410 memcpy(tlv->value, prb->data, prb->len);
3411
3412 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
3413 vdev_id);
3414 return skb;
3415}
3416
3417static struct sk_buff *
3418ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
3419 const u8 *p2p_ie)
3420{
3421 struct wmi_tlv_p2p_go_bcn_ie *cmd;
3422 struct wmi_tlv *tlv;
3423 struct sk_buff *skb;
3424 void *ptr;
3425 size_t len;
3426
3427 len = sizeof(*tlv) + sizeof(*cmd) +
3428 sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
3429 skb = ath10k_wmi_alloc_skb(ar, len);
3430 if (!skb)
3431 return ERR_PTR(-ENOMEM);
3432
3433 ptr = (void *)skb->data;
3434 tlv = ptr;
3435 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
3436 tlv->len = __cpu_to_le16(sizeof(*cmd));
3437 cmd = (void *)tlv->value;
3438 cmd->vdev_id = __cpu_to_le32(vdev_id);
3439 cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
3440
3441 ptr += sizeof(*tlv);
3442 ptr += sizeof(*cmd);
3443
3444 tlv = ptr;
3445 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3446 tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
3447 memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
3448
3449 ptr += sizeof(*tlv);
3450 ptr += roundup(p2p_ie[1] + 2, 4);
3451
3452 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
3453 vdev_id);
3454 return skb;
3455}
3456
3457static struct sk_buff *
3458ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
3459 enum wmi_tdls_state state)
3460{
3461 struct wmi_tdls_set_state_cmd *cmd;
3462 struct wmi_tlv *tlv;
3463 struct sk_buff *skb;
3464 void *ptr;
3465 size_t len;
3466 /* Set to options from wmi_tlv_tdls_options,
3467 * for now none of them are enabled.
3468 */
3469 u32 options = 0;
3470
3471 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
3472 options |= WMI_TLV_TDLS_BUFFER_STA_EN;
3473
3474 /* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS
3475 * link inactivity detecting logic.
3476 */
3477 if (state == WMI_TDLS_ENABLE_ACTIVE)
3478 state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL;
3479
3480 len = sizeof(*tlv) + sizeof(*cmd);
3481 skb = ath10k_wmi_alloc_skb(ar, len);
3482 if (!skb)
3483 return ERR_PTR(-ENOMEM);
3484
3485 ptr = (void *)skb->data;
3486 tlv = ptr;
3487 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
3488 tlv->len = __cpu_to_le16(sizeof(*cmd));
3489
3490 cmd = (void *)tlv->value;
3491 cmd->vdev_id = __cpu_to_le32(vdev_id);
3492 cmd->state = __cpu_to_le32(state);
3493 cmd->notification_interval_ms = __cpu_to_le32(5000);
3494 cmd->tx_discovery_threshold = __cpu_to_le32(100);
3495 cmd->tx_teardown_threshold = __cpu_to_le32(5);
3496 cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
3497 cmd->rssi_delta = __cpu_to_le32(-20);
3498 cmd->tdls_options = __cpu_to_le32(options);
3499 cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
3500 cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
3501 cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
3502 cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
3503 cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
3504
3505 ptr += sizeof(*tlv);
3506 ptr += sizeof(*cmd);
3507
3508 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
3509 state, vdev_id);
3510 return skb;
3511}
3512
3513static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
3514{
3515 u32 peer_qos = 0;
3516
3517 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
3518 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
3519 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
3520 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
3521 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
3522 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
3523 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
3524 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
3525
3526 peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
3527
3528 return peer_qos;
3529}
3530
3531static struct sk_buff *
3532ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
3533 const struct wmi_tdls_peer_update_cmd_arg *arg,
3534 const struct wmi_tdls_peer_capab_arg *cap,
3535 const struct wmi_channel_arg *chan_arg)
3536{
3537 struct wmi_tdls_peer_update_cmd *cmd;
3538 struct wmi_tdls_peer_capab *peer_cap;
3539 struct wmi_channel *chan;
3540 struct wmi_tlv *tlv;
3541 struct sk_buff *skb;
3542 u32 peer_qos;
3543 void *ptr;
3544 int len;
3545 int i;
3546
3547 len = sizeof(*tlv) + sizeof(*cmd) +
3548 sizeof(*tlv) + sizeof(*peer_cap) +
3549 sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
3550
3551 skb = ath10k_wmi_alloc_skb(ar, len);
3552 if (!skb)
3553 return ERR_PTR(-ENOMEM);
3554
3555 ptr = (void *)skb->data;
3556 tlv = ptr;
3557 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
3558 tlv->len = __cpu_to_le16(sizeof(*cmd));
3559
3560 cmd = (void *)tlv->value;
3561 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
3562 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
3563 cmd->peer_state = __cpu_to_le32(arg->peer_state);
3564
3565 ptr += sizeof(*tlv);
3566 ptr += sizeof(*cmd);
3567
3568 tlv = ptr;
3569 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
3570 tlv->len = __cpu_to_le16(sizeof(*peer_cap));
3571 peer_cap = (void *)tlv->value;
3572 peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
3573 cap->peer_max_sp);
3574 peer_cap->peer_qos = __cpu_to_le32(peer_qos);
3575 peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
3576 peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
3577 peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
3578 peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
3579 peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
3580 peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
3581
3582 for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
3583 peer_cap->peer_operclass[i] = cap->peer_operclass[i];
3584
3585 peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
3586 peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
3587 peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
3588
3589 ptr += sizeof(*tlv);
3590 ptr += sizeof(*peer_cap);
3591
3592 tlv = ptr;
3593 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3594 tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
3595
3596 ptr += sizeof(*tlv);
3597
3598 for (i = 0; i < cap->peer_chan_len; i++) {
3599 tlv = ptr;
3600 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
3601 tlv->len = __cpu_to_le16(sizeof(*chan));
3602 chan = (void *)tlv->value;
3603 ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
3604
3605 ptr += sizeof(*tlv);
3606 ptr += sizeof(*chan);
3607 }
3608
3609 ath10k_dbg(ar, ATH10K_DBG_WMI,
3610 "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
3611 arg->vdev_id, arg->peer_state, cap->peer_chan_len);
3612 return skb;
3613}
3614
3615static struct sk_buff *
3616ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
3617 u32 duration, u32 next_offset,
3618 u32 enabled)
3619{
3620 struct wmi_tlv_set_quiet_cmd *cmd;
3621 struct wmi_tlv *tlv;
3622 struct sk_buff *skb;
3623
3624 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3625 if (!skb)
3626 return ERR_PTR(-ENOMEM);
3627
3628 tlv = (void *)skb->data;
3629 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD);
3630 tlv->len = __cpu_to_le16(sizeof(*cmd));
3631 cmd = (void *)tlv->value;
3632
3633 /* vdev_id is not in use, set to 0 */
3634 cmd->vdev_id = __cpu_to_le32(0);
3635 cmd->period = __cpu_to_le32(period);
3636 cmd->duration = __cpu_to_le32(duration);
3637 cmd->next_start = __cpu_to_le32(next_offset);
3638 cmd->enabled = __cpu_to_le32(enabled);
3639
3640 ath10k_dbg(ar, ATH10K_DBG_WMI,
3641 "wmi tlv quiet param: period %u duration %u enabled %d\n",
3642 period, duration, enabled);
3643 return skb;
3644}
3645
3646static struct sk_buff *
3647ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
3648{
3649 struct wmi_tlv_wow_enable_cmd *cmd;
3650 struct wmi_tlv *tlv;
3651 struct sk_buff *skb;
3652 size_t len;
3653
3654 len = sizeof(*tlv) + sizeof(*cmd);
3655 skb = ath10k_wmi_alloc_skb(ar, len);
3656 if (!skb)
3657 return ERR_PTR(-ENOMEM);
3658
3659 tlv = (struct wmi_tlv *)skb->data;
3660 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
3661 tlv->len = __cpu_to_le16(sizeof(*cmd));
3662 cmd = (void *)tlv->value;
3663
3664 cmd->enable = __cpu_to_le32(1);
3665 if (!ar->bus_param.link_can_suspend)
3666 cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED);
3667
3668 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
3669 return skb;
3670}
3671
3672static struct sk_buff *
3673ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
3674 u32 vdev_id,
3675 enum wmi_wow_wakeup_event event,
3676 u32 enable)
3677{
3678 struct wmi_tlv_wow_add_del_event_cmd *cmd;
3679 struct wmi_tlv *tlv;
3680 struct sk_buff *skb;
3681 size_t len;
3682
3683 len = sizeof(*tlv) + sizeof(*cmd);
3684 skb = ath10k_wmi_alloc_skb(ar, len);
3685 if (!skb)
3686 return ERR_PTR(-ENOMEM);
3687
3688 tlv = (struct wmi_tlv *)skb->data;
3689 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
3690 tlv->len = __cpu_to_le16(sizeof(*cmd));
3691 cmd = (void *)tlv->value;
3692
3693 cmd->vdev_id = __cpu_to_le32(vdev_id);
3694 cmd->is_add = __cpu_to_le32(enable);
3695 cmd->event_bitmap = __cpu_to_le32(1 << event);
3696
3697 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
3698 wow_wakeup_event(event), enable, vdev_id);
3699 return skb;
3700}
3701
3702static struct sk_buff *
3703ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
3704{
3705 struct wmi_tlv_wow_host_wakeup_ind *cmd;
3706 struct wmi_tlv *tlv;
3707 struct sk_buff *skb;
3708 size_t len;
3709
3710 len = sizeof(*tlv) + sizeof(*cmd);
3711 skb = ath10k_wmi_alloc_skb(ar, len);
3712 if (!skb)
3713 return ERR_PTR(-ENOMEM);
3714
3715 tlv = (struct wmi_tlv *)skb->data;
3716 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
3717 tlv->len = __cpu_to_le16(sizeof(*cmd));
3718 cmd = (void *)tlv->value;
3719
3720 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
3721 return skb;
3722}
3723
3724static struct sk_buff *
3725ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
3726 u32 pattern_id, const u8 *pattern,
3727 const u8 *bitmask, int pattern_len,
3728 int pattern_offset)
3729{
3730 struct wmi_tlv_wow_add_pattern_cmd *cmd;
3731 struct wmi_tlv_wow_bitmap_pattern *bitmap;
3732 struct wmi_tlv *tlv;
3733 struct sk_buff *skb;
3734 void *ptr;
3735 size_t len;
3736
3737 len = sizeof(*tlv) + sizeof(*cmd) +
3738 sizeof(*tlv) + /* array struct */
3739 sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
3740 sizeof(*tlv) + /* empty ipv4 sync */
3741 sizeof(*tlv) + /* empty ipv6 sync */
3742 sizeof(*tlv) + /* empty magic */
3743 sizeof(*tlv) + /* empty info timeout */
3744 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
3745
3746 skb = ath10k_wmi_alloc_skb(ar, len);
3747 if (!skb)
3748 return ERR_PTR(-ENOMEM);
3749
3750 /* cmd */
3751 ptr = (void *)skb->data;
3752 tlv = ptr;
3753 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
3754 tlv->len = __cpu_to_le16(sizeof(*cmd));
3755 cmd = (void *)tlv->value;
3756
3757 cmd->vdev_id = __cpu_to_le32(vdev_id);
3758 cmd->pattern_id = __cpu_to_le32(pattern_id);
3759 cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3760
3761 ptr += sizeof(*tlv);
3762 ptr += sizeof(*cmd);
3763
3764 /* bitmap */
3765 tlv = ptr;
3766 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3767 tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
3768
3769 ptr += sizeof(*tlv);
3770
3771 tlv = ptr;
3772 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
3773 tlv->len = __cpu_to_le16(sizeof(*bitmap));
3774 bitmap = (void *)tlv->value;
3775
3776 memcpy(bitmap->patternbuf, pattern, pattern_len);
3777 memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
3778 bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
3779 bitmap->pattern_len = __cpu_to_le32(pattern_len);
3780 bitmap->bitmask_len = __cpu_to_le32(pattern_len);
3781 bitmap->pattern_id = __cpu_to_le32(pattern_id);
3782
3783 ptr += sizeof(*tlv);
3784 ptr += sizeof(*bitmap);
3785
3786 /* ipv4 sync */
3787 tlv = ptr;
3788 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3789 tlv->len = __cpu_to_le16(0);
3790
3791 ptr += sizeof(*tlv);
3792
3793 /* ipv6 sync */
3794 tlv = ptr;
3795 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3796 tlv->len = __cpu_to_le16(0);
3797
3798 ptr += sizeof(*tlv);
3799
3800 /* magic */
3801 tlv = ptr;
3802 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3803 tlv->len = __cpu_to_le16(0);
3804
3805 ptr += sizeof(*tlv);
3806
3807 /* pattern info timeout */
3808 tlv = ptr;
3809 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3810 tlv->len = __cpu_to_le16(0);
3811
3812 ptr += sizeof(*tlv);
3813
3814 /* ratelimit interval */
3815 tlv = ptr;
3816 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3817 tlv->len = __cpu_to_le16(sizeof(u32));
3818
3819 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
3820 vdev_id, pattern_id, pattern_offset);
3821 return skb;
3822}
3823
3824static struct sk_buff *
3825ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
3826 u32 pattern_id)
3827{
3828 struct wmi_tlv_wow_del_pattern_cmd *cmd;
3829 struct wmi_tlv *tlv;
3830 struct sk_buff *skb;
3831 size_t len;
3832
3833 len = sizeof(*tlv) + sizeof(*cmd);
3834 skb = ath10k_wmi_alloc_skb(ar, len);
3835 if (!skb)
3836 return ERR_PTR(-ENOMEM);
3837
3838 tlv = (struct wmi_tlv *)skb->data;
3839 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
3840 tlv->len = __cpu_to_le16(sizeof(*cmd));
3841 cmd = (void *)tlv->value;
3842
3843 cmd->vdev_id = __cpu_to_le32(vdev_id);
3844 cmd->pattern_id = __cpu_to_le32(pattern_id);
3845 cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3846
3847 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
3848 vdev_id, pattern_id);
3849 return skb;
3850}
3851
3852/* Request FW to start PNO operation */
3853static struct sk_buff *
3854ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
3855 u32 vdev_id,
3856 struct wmi_pno_scan_req *pno)
3857{
3858 struct nlo_configured_parameters *nlo_list;
3859 struct wmi_tlv_wow_nlo_config_cmd *cmd;
3860 struct wmi_tlv *tlv;
3861 struct sk_buff *skb;
3862 __le32 *channel_list;
3863 u16 tlv_len;
3864 size_t len;
3865 void *ptr;
3866 u32 i;
3867
3868 len = sizeof(*tlv) + sizeof(*cmd) +
3869 sizeof(*tlv) +
3870 /* TLV place holder for array of structures
3871 * nlo_configured_parameters(nlo_list)
3872 */
3873 sizeof(*tlv);
3874 /* TLV place holder for array of uint32 channel_list */
3875
3876 len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
3877 WMI_NLO_MAX_CHAN);
3878 len += sizeof(struct nlo_configured_parameters) *
3879 min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
3880
3881 skb = ath10k_wmi_alloc_skb(ar, len);
3882 if (!skb)
3883 return ERR_PTR(-ENOMEM);
3884
3885 ptr = (void *)skb->data;
3886 tlv = ptr;
3887 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
3888 tlv->len = __cpu_to_le16(sizeof(*cmd));
3889 cmd = (void *)tlv->value;
3890
3891 /* wmi_tlv_wow_nlo_config_cmd parameters*/
3892 cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
3893 cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
3894
3895 /* current FW does not support min-max range for dwell time */
3896 cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
3897 cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
3898
3899 if (pno->do_passive_scan)
3900 cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
3901
3902 /* copy scan interval */
3903 cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
3904 cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
3905 cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
3906 cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
3907
3908 if (pno->enable_pno_scan_randomization) {
3909 cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
3910 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
3911 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
3912 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
3913 }
3914
3915 ptr += sizeof(*tlv);
3916 ptr += sizeof(*cmd);
3917
3918 /* nlo_configured_parameters(nlo_list) */
3919 cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
3920 WMI_NLO_MAX_SSIDS));
3921 tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
3922 sizeof(struct nlo_configured_parameters);
3923
3924 tlv = ptr;
3925 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3926 tlv->len = __cpu_to_le16(tlv_len);
3927
3928 ptr += sizeof(*tlv);
3929 nlo_list = ptr;
3930 for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
3931 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
3932 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3933 tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
3934 sizeof(*tlv));
3935
3936 /* copy ssid and it's length */
3937 nlo_list[i].ssid.valid = __cpu_to_le32(true);
3938 nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
3939 memcpy(nlo_list[i].ssid.ssid.ssid,
3940 pno->a_networks[i].ssid.ssid,
3941 __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
3942
3943 /* copy rssi threshold */
3944 if (pno->a_networks[i].rssi_threshold &&
3945 pno->a_networks[i].rssi_threshold > -300) {
3946 nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
3947 nlo_list[i].rssi_cond.rssi =
3948 __cpu_to_le32(pno->a_networks[i].rssi_threshold);
3949 }
3950
3951 nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
3952 nlo_list[i].bcast_nw_type.bcast_nw_type =
3953 __cpu_to_le32(pno->a_networks[i].bcast_nw_type);
3954 }
3955
3956 ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
3957
3958 /* copy channel info */
3959 cmd->num_of_channels = __cpu_to_le32(min_t(u8,
3960 pno->a_networks[0].channel_count,
3961 WMI_NLO_MAX_CHAN));
3962
3963 tlv = ptr;
3964 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3965 tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
3966 sizeof(u_int32_t));
3967 ptr += sizeof(*tlv);
3968
3969 channel_list = (__le32 *)ptr;
3970 for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
3971 channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
3972
3973 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
3974 vdev_id);
3975
3976 return skb;
3977}
3978
3979/* Request FW to stop ongoing PNO operation */
3980static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
3981 u32 vdev_id)
3982{
3983 struct wmi_tlv_wow_nlo_config_cmd *cmd;
3984 struct wmi_tlv *tlv;
3985 struct sk_buff *skb;
3986 void *ptr;
3987 size_t len;
3988
3989 len = sizeof(*tlv) + sizeof(*cmd) +
3990 sizeof(*tlv) +
3991 /* TLV place holder for array of structures
3992 * nlo_configured_parameters(nlo_list)
3993 */
3994 sizeof(*tlv);
3995 /* TLV place holder for array of uint32 channel_list */
3996 skb = ath10k_wmi_alloc_skb(ar, len);
3997 if (!skb)
3998 return ERR_PTR(-ENOMEM);
3999
4000 ptr = (void *)skb->data;
4001 tlv = ptr;
4002 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
4003 tlv->len = __cpu_to_le16(sizeof(*cmd));
4004 cmd = (void *)tlv->value;
4005
4006 cmd->vdev_id = __cpu_to_le32(vdev_id);
4007 cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
4008
4009 ptr += sizeof(*tlv);
4010 ptr += sizeof(*cmd);
4011
4012 /* nlo_configured_parameters(nlo_list) */
4013 tlv = ptr;
4014 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
4015 tlv->len = __cpu_to_le16(0);
4016
4017 ptr += sizeof(*tlv);
4018
4019 /* channel list */
4020 tlv = ptr;
4021 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
4022 tlv->len = __cpu_to_le16(0);
4023
4024 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
4025 return skb;
4026}
4027
4028static struct sk_buff *
4029ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
4030 struct wmi_pno_scan_req *pno_scan)
4031{
4032 if (pno_scan->enable)
4033 return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
4034 else
4035 return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
4036}
4037
4038static struct sk_buff *
4039ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
4040{
4041 struct wmi_tlv_adaptive_qcs *cmd;
4042 struct wmi_tlv *tlv;
4043 struct sk_buff *skb;
4044 void *ptr;
4045 size_t len;
4046
4047 len = sizeof(*tlv) + sizeof(*cmd);
4048 skb = ath10k_wmi_alloc_skb(ar, len);
4049 if (!skb)
4050 return ERR_PTR(-ENOMEM);
4051
4052 ptr = (void *)skb->data;
4053 tlv = ptr;
4054 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
4055 tlv->len = __cpu_to_le16(sizeof(*cmd));
4056 cmd = (void *)tlv->value;
4057 cmd->enable = __cpu_to_le32(enable ? 1 : 0);
4058
4059 ptr += sizeof(*tlv);
4060 ptr += sizeof(*cmd);
4061
4062 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
4063 return skb;
4064}
4065
4066static struct sk_buff *
4067ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
4068{
4069 struct wmi_echo_cmd *cmd;
4070 struct wmi_tlv *tlv;
4071 struct sk_buff *skb;
4072 void *ptr;
4073 size_t len;
4074
4075 len = sizeof(*tlv) + sizeof(*cmd);
4076 skb = ath10k_wmi_alloc_skb(ar, len);
4077 if (!skb)
4078 return ERR_PTR(-ENOMEM);
4079
4080 ptr = (void *)skb->data;
4081 tlv = ptr;
4082 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
4083 tlv->len = __cpu_to_le16(sizeof(*cmd));
4084 cmd = (void *)tlv->value;
4085 cmd->value = cpu_to_le32(value);
4086
4087 ptr += sizeof(*tlv);
4088 ptr += sizeof(*cmd);
4089
4090 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
4091 return skb;
4092}
4093
4094static struct sk_buff *
4095ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
4096 const struct wmi_vdev_spectral_conf_arg *arg)
4097{
4098 struct wmi_vdev_spectral_conf_cmd *cmd;
4099 struct sk_buff *skb;
4100 struct wmi_tlv *tlv;
4101 void *ptr;
4102 size_t len;
4103
4104 len = sizeof(*tlv) + sizeof(*cmd);
4105 skb = ath10k_wmi_alloc_skb(ar, len);
4106 if (!skb)
4107 return ERR_PTR(-ENOMEM);
4108
4109 ptr = (void *)skb->data;
4110 tlv = ptr;
4111 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
4112 tlv->len = __cpu_to_le16(sizeof(*cmd));
4113 cmd = (void *)tlv->value;
4114 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
4115 cmd->scan_count = __cpu_to_le32(arg->scan_count);
4116 cmd->scan_period = __cpu_to_le32(arg->scan_period);
4117 cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
4118 cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
4119 cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
4120 cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
4121 cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
4122 cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
4123 cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
4124 cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
4125 cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
4126 cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
4127 cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
4128 cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
4129 cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
4130 cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
4131 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
4132 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
4133
4134 return skb;
4135}
4136
4137static struct sk_buff *
4138ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
4139 u32 trigger, u32 enable)
4140{
4141 struct wmi_vdev_spectral_enable_cmd *cmd;
4142 struct sk_buff *skb;
4143 struct wmi_tlv *tlv;
4144 void *ptr;
4145 size_t len;
4146
4147 len = sizeof(*tlv) + sizeof(*cmd);
4148 skb = ath10k_wmi_alloc_skb(ar, len);
4149 if (!skb)
4150 return ERR_PTR(-ENOMEM);
4151
4152 ptr = (void *)skb->data;
4153 tlv = ptr;
4154 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
4155 tlv->len = __cpu_to_le16(sizeof(*cmd));
4156 cmd = (void *)tlv->value;
4157 cmd->vdev_id = __cpu_to_le32(vdev_id);
4158 cmd->trigger_cmd = __cpu_to_le32(trigger);
4159 cmd->enable_cmd = __cpu_to_le32(enable);
4160
4161 return skb;
4162}
4163
4164/****************/
4165/* TLV mappings */
4166/****************/
4167
4168static struct wmi_cmd_map wmi_tlv_cmd_map = {
4169 .init_cmdid = WMI_TLV_INIT_CMDID,
4170 .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
4171 .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
4172 .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
4173 .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
4174 .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
4175 .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
4176 .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
4177 .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
4178 .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
4179 .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
4180 .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
4181 .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
4182 .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
4183 .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
4184 .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
4185 .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
4186 .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
4187 .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
4188 .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
4189 .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
4190 .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
4191 .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
4192 .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
4193 .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
4194 .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
4195 .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
4196 .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
4197 .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
4198 .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
4199 .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
4200 .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
4201 .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
4202 .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
4203 .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
4204 .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
4205 .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
4206 .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
4207 .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
4208 .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
4209 .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
4210 .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
4211 .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
4212 .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
4213 .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
4214 .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
4215 .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
4216 .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
4217 .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
4218 .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
4219 .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
4220 .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
4221 .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
4222 .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
4223 .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
4224 .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
4225 .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
4226 .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
4227 .roam_scan_rssi_change_threshold =
4228 WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
4229 .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4230 .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4231 .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
4232 .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
4233 .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
4234 .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
4235 .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
4236 .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
4237 .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
4238 .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
4239 .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
4240 .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
4241 .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
4242 .wlan_profile_set_hist_intvl_cmdid =
4243 WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
4244 .wlan_profile_get_profile_data_cmdid =
4245 WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
4246 .wlan_profile_enable_profile_id_cmdid =
4247 WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
4248 .wlan_profile_list_profile_id_cmdid =
4249 WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
4250 .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
4251 .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
4252 .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
4253 .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
4254 .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
4255 .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
4256 .wow_enable_disable_wake_event_cmdid =
4257 WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
4258 .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
4259 .wow_hostwakeup_from_sleep_cmdid =
4260 WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
4261 .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
4262 .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
4263 .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
4264 .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
4265 .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
4266 .request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
4267 .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
4268 .network_list_offload_config_cmdid =
4269 WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
4270 .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
4271 .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
4272 .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
4273 .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
4274 .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
4275 .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
4276 .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
4277 .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
4278 .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
4279 .echo_cmdid = WMI_TLV_ECHO_CMDID,
4280 .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
4281 .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
4282 .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
4283 .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
4284 .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
4285 .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
4286 .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
4287 .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
4288 .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
4289 .pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
4290 .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
4291 .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
4292 .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
4293 .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
4294 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
4295 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
4296 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
4297 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
4298 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
4299 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
4300 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
4301 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
4302 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
4303 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
4304 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
4305 .nan_cmdid = WMI_CMD_UNSUPPORTED,
4306 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
4307 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
4308 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
4309 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4310 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4311 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
4312 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
4313 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
4314 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
4315 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
4316 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
4317 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
4318 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
4319 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
4320 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
4321 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4322 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4323 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
4324 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
4325 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
4326};
4327
4328static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
4329 .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
4330 .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
4331 .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
4332 .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
4333 .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
4334 .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
4335 .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
4336 .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
4337 .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
4338 .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
4339 .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
4340 .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
4341 .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
4342 .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
4343 .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
4344 .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
4345 .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
4346 .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
4347 .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
4348 .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
4349 .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
4350 .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
4351 .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
4352 .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
4353 .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
4354 .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
4355 .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4356 .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4357 .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
4358 .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
4359 .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
4360 .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
4361 .bcnflt_stats_update_period =
4362 WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
4363 .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
4364 .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
4365 .dcs = WMI_TLV_PDEV_PARAM_DCS,
4366 .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
4367 .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
4368 .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
4369 .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
4370 .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
4371 .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
4372 .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
4373 .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
4374 .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
4375 .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
4376 .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
4377 .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
4378 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
4379 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
4380 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4381 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
4382 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
4383 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4384 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
4385 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
4386 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4387 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4388 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4389 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4390 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4391 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4392 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
4393 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
4394 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4395 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4396 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4397 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4398 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4399 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4400 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
4401 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
4402 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
4403 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4404 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4405 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
4406 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
4407 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
4408 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
4409 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
4410 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
4411 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
4412 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
4413 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
4414 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
4415 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4416 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
4417 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
4418 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
4419 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4420 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4421 .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
4422 .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
4423 .peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
4424};
4425
4426static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
4427 .smps_state = WMI_TLV_PEER_SMPS_STATE,
4428 .ampdu = WMI_TLV_PEER_AMPDU,
4429 .authorize = WMI_TLV_PEER_AUTHORIZE,
4430 .chan_width = WMI_TLV_PEER_CHAN_WIDTH,
4431 .nss = WMI_TLV_PEER_NSS,
4432 .use_4addr = WMI_TLV_PEER_USE_4ADDR,
4433 .membership = WMI_TLV_PEER_MEMBERSHIP,
4434 .user_pos = WMI_TLV_PEER_USERPOS,
4435 .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
4436 .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
4437 .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
4438 .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
4439 .phymode = WMI_TLV_PEER_PHYMODE,
4440 .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
4441 .dummy_var = WMI_TLV_PEER_DUMMY_VAR,
4442};
4443
4444static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
4445 .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
4446 .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
4447 .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
4448 .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
4449 .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
4450 .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
4451 .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
4452 .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
4453 .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
4454 .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
4455 .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
4456 .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
4457 .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
4458 .wmi_vdev_oc_scheduler_air_time_limit =
4459 WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
4460 .wds = WMI_TLV_VDEV_PARAM_WDS,
4461 .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
4462 .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
4463 .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
4464 .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
4465 .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
4466 .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
4467 .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
4468 .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
4469 .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
4470 .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
4471 .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
4472 .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
4473 .sgi = WMI_TLV_VDEV_PARAM_SGI,
4474 .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
4475 .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
4476 .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
4477 .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
4478 .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
4479 .nss = WMI_TLV_VDEV_PARAM_NSS,
4480 .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
4481 .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
4482 .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
4483 .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
4484 .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
4485 .ap_keepalive_min_idle_inactive_time_secs =
4486 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
4487 .ap_keepalive_max_idle_inactive_time_secs =
4488 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
4489 .ap_keepalive_max_unresponsive_time_secs =
4490 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
4491 .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
4492 .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4493 .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
4494 .txbf = WMI_TLV_VDEV_PARAM_TXBF,
4495 .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
4496 .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
4497 .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
4498 .ap_detect_out_of_sync_sleeping_sta_time_secs =
4499 WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4500 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
4501 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
4502 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
4503 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
4504 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
4505 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4506 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
4507 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
4508 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
4509 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
4510 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
4511 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
4512 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
4513 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
4514 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
4515 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4516};
4517
4518static const struct wmi_ops wmi_tlv_ops = {
4519 .rx = ath10k_wmi_tlv_op_rx,
4520 .map_svc = wmi_tlv_svc_map,
4521 .map_svc_ext = wmi_tlv_svc_map_ext,
4522
4523 .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
4524 .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
4525 .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
4526 .pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev,
4527 .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
4528 .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
4529 .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
4530 .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
4531 .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
4532 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
4533 .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
4534 .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
4535 .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
4536 .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
4537 .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
4538 .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
4539 .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
4540 .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
4541
4542 .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
4543 .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
4544 .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
4545 .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
4546 .gen_init = ath10k_wmi_tlv_op_gen_init,
4547 .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
4548 .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
4549 .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
4550 .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
4551 .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
4552 .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
4553 .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
4554 .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
4555 .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
4556 .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
4557 .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
4558 .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
4559 .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
4560 .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
4561 .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
4562 .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
4563 .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
4564 .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
4565 .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
4566 .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
4567 .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
4568 .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
4569 .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
4570 .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
4571 .gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
4572 .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
4573 /* .gen_mgmt_tx = not implemented; HTT is used */
4574 .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
4575 .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
4576 .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
4577 .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
4578 .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
4579 .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode,
4580 .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature,
4581 /* .gen_addba_clear_resp not implemented */
4582 /* .gen_addba_send not implemented */
4583 /* .gen_addba_set_resp not implemented */
4584 /* .gen_delba_send not implemented */
4585 .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
4586 .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
4587 .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
4588 .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
4589 .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
4590 .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
4591 .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
4592 .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
4593 .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
4594 .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
4595 .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
4596 .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
4597 .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
4598 .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
4599 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
4600 .get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype,
4601 .gen_echo = ath10k_wmi_tlv_op_gen_echo,
4602 .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
4603 .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
4604};
4605
4606static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
4607 .auth = WMI_TLV_PEER_AUTH,
4608 .qos = WMI_TLV_PEER_QOS,
4609 .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
4610 .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
4611 .apsd = WMI_TLV_PEER_APSD,
4612 .ht = WMI_TLV_PEER_HT,
4613 .bw40 = WMI_TLV_PEER_40MHZ,
4614 .stbc = WMI_TLV_PEER_STBC,
4615 .ldbc = WMI_TLV_PEER_LDPC,
4616 .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
4617 .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
4618 .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
4619 .vht = WMI_TLV_PEER_VHT,
4620 .bw80 = WMI_TLV_PEER_80MHZ,
4621 .pmf = WMI_TLV_PEER_PMF,
4622 .bw160 = WMI_TLV_PEER_160MHZ,
4623};
4624
4625/************/
4626/* TLV init */
4627/************/
4628
4629void ath10k_wmi_tlv_attach(struct ath10k *ar)
4630{
4631 ar->wmi.cmd = &wmi_tlv_cmd_map;
4632 ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
4633 ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
4634 ar->wmi.peer_param = &wmi_tlv_peer_param_map;
4635 ar->wmi.ops = &wmi_tlv_ops;
4636 ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
4637}
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17#include "core.h"
18#include "debug.h"
19#include "mac.h"
20#include "hw.h"
21#include "mac.h"
22#include "wmi.h"
23#include "wmi-ops.h"
24#include "wmi-tlv.h"
25#include "p2p.h"
26#include "testmode.h"
27
28/***************/
29/* TLV helpers */
30/**************/
31
32struct wmi_tlv_policy {
33 size_t min_len;
34};
35
36static const struct wmi_tlv_policy wmi_tlv_policies[] = {
37 [WMI_TLV_TAG_ARRAY_BYTE]
38 = { .min_len = 0 },
39 [WMI_TLV_TAG_ARRAY_UINT32]
40 = { .min_len = 0 },
41 [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
42 = { .min_len = sizeof(struct wmi_scan_event) },
43 [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
44 = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
45 [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
46 = { .min_len = sizeof(struct wmi_chan_info_event) },
47 [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
48 = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
49 [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
50 = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
51 [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
52 = { .min_len = sizeof(struct wmi_host_swba_event) },
53 [WMI_TLV_TAG_STRUCT_TIM_INFO]
54 = { .min_len = sizeof(struct wmi_tim_info) },
55 [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
56 = { .min_len = sizeof(struct wmi_p2p_noa_info) },
57 [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
58 = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
59 [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
60 = { .min_len = sizeof(struct hal_reg_capabilities) },
61 [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
62 = { .min_len = sizeof(struct wlan_host_mem_req) },
63 [WMI_TLV_TAG_STRUCT_READY_EVENT]
64 = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
65 [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
66 = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
67 [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
68 = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
69 [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
70 = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
71 [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
72 = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
73 [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
74 = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
75 [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
76 = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
77};
78
79static int
80ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
81 int (*iter)(struct ath10k *ar, u16 tag, u16 len,
82 const void *ptr, void *data),
83 void *data)
84{
85 const void *begin = ptr;
86 const struct wmi_tlv *tlv;
87 u16 tlv_tag, tlv_len;
88 int ret;
89
90 while (len > 0) {
91 if (len < sizeof(*tlv)) {
92 ath10k_dbg(ar, ATH10K_DBG_WMI,
93 "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
94 ptr - begin, len, sizeof(*tlv));
95 return -EINVAL;
96 }
97
98 tlv = ptr;
99 tlv_tag = __le16_to_cpu(tlv->tag);
100 tlv_len = __le16_to_cpu(tlv->len);
101 ptr += sizeof(*tlv);
102 len -= sizeof(*tlv);
103
104 if (tlv_len > len) {
105 ath10k_dbg(ar, ATH10K_DBG_WMI,
106 "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
107 tlv_tag, ptr - begin, len, tlv_len);
108 return -EINVAL;
109 }
110
111 if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
112 wmi_tlv_policies[tlv_tag].min_len &&
113 wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
114 ath10k_dbg(ar, ATH10K_DBG_WMI,
115 "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
116 tlv_tag, ptr - begin, tlv_len,
117 wmi_tlv_policies[tlv_tag].min_len);
118 return -EINVAL;
119 }
120
121 ret = iter(ar, tlv_tag, tlv_len, ptr, data);
122 if (ret)
123 return ret;
124
125 ptr += tlv_len;
126 len -= tlv_len;
127 }
128
129 return 0;
130}
131
132static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
133 const void *ptr, void *data)
134{
135 const void **tb = data;
136
137 if (tag < WMI_TLV_TAG_MAX)
138 tb[tag] = ptr;
139
140 return 0;
141}
142
143static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
144 const void *ptr, size_t len)
145{
146 return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
147 (void *)tb);
148}
149
150static const void **
151ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
152 size_t len, gfp_t gfp)
153{
154 const void **tb;
155 int ret;
156
157 tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
158 if (!tb)
159 return ERR_PTR(-ENOMEM);
160
161 ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
162 if (ret) {
163 kfree(tb);
164 return ERR_PTR(ret);
165 }
166
167 return tb;
168}
169
170static u16 ath10k_wmi_tlv_len(const void *ptr)
171{
172 return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
173}
174
175/**************/
176/* TLV events */
177/**************/
178static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
179 struct sk_buff *skb)
180{
181 const void **tb;
182 const struct wmi_tlv_bcn_tx_status_ev *ev;
183 struct ath10k_vif *arvif;
184 u32 vdev_id, tx_status;
185 int ret;
186
187 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
188 if (IS_ERR(tb)) {
189 ret = PTR_ERR(tb);
190 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
191 return ret;
192 }
193
194 ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
195 if (!ev) {
196 kfree(tb);
197 return -EPROTO;
198 }
199
200 tx_status = __le32_to_cpu(ev->tx_status);
201 vdev_id = __le32_to_cpu(ev->vdev_id);
202
203 switch (tx_status) {
204 case WMI_TLV_BCN_TX_STATUS_OK:
205 break;
206 case WMI_TLV_BCN_TX_STATUS_XRETRY:
207 case WMI_TLV_BCN_TX_STATUS_DROP:
208 case WMI_TLV_BCN_TX_STATUS_FILTERED:
209 /* FIXME: It's probably worth telling mac80211 to stop the
210 * interface as it is crippled.
211 */
212 ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
213 vdev_id, tx_status);
214 break;
215 }
216
217 arvif = ath10k_get_arvif(ar, vdev_id);
218 if (arvif && arvif->is_up && arvif->vif->csa_active)
219 ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
220
221 kfree(tb);
222 return 0;
223}
224
225static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
226 struct sk_buff *skb)
227{
228 const void **tb;
229 const struct wmi_tlv_diag_data_ev *ev;
230 const struct wmi_tlv_diag_item *item;
231 const void *data;
232 int ret, num_items, len;
233
234 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
235 if (IS_ERR(tb)) {
236 ret = PTR_ERR(tb);
237 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
238 return ret;
239 }
240
241 ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
242 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
243 if (!ev || !data) {
244 kfree(tb);
245 return -EPROTO;
246 }
247
248 num_items = __le32_to_cpu(ev->num_items);
249 len = ath10k_wmi_tlv_len(data);
250
251 while (num_items--) {
252 if (len == 0)
253 break;
254 if (len < sizeof(*item)) {
255 ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
256 break;
257 }
258
259 item = data;
260
261 if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
262 ath10k_warn(ar, "failed to parse diag data: item is too long\n");
263 break;
264 }
265
266 trace_ath10k_wmi_diag_container(ar,
267 item->type,
268 __le32_to_cpu(item->timestamp),
269 __le32_to_cpu(item->code),
270 __le16_to_cpu(item->len),
271 item->payload);
272
273 len -= sizeof(*item);
274 len -= roundup(__le16_to_cpu(item->len), 4);
275
276 data += sizeof(*item);
277 data += roundup(__le16_to_cpu(item->len), 4);
278 }
279
280 if (num_items != -1 || len != 0)
281 ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
282 num_items, len);
283
284 kfree(tb);
285 return 0;
286}
287
288static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
289 struct sk_buff *skb)
290{
291 const void **tb;
292 const void *data;
293 int ret, len;
294
295 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
296 if (IS_ERR(tb)) {
297 ret = PTR_ERR(tb);
298 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
299 return ret;
300 }
301
302 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
303 if (!data) {
304 kfree(tb);
305 return -EPROTO;
306 }
307 len = ath10k_wmi_tlv_len(data);
308
309 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
310 trace_ath10k_wmi_diag(ar, data, len);
311
312 kfree(tb);
313 return 0;
314}
315
316static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
317 struct sk_buff *skb)
318{
319 const void **tb;
320 const struct wmi_tlv_p2p_noa_ev *ev;
321 const struct wmi_p2p_noa_info *noa;
322 int ret, vdev_id;
323
324 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
325 if (IS_ERR(tb)) {
326 ret = PTR_ERR(tb);
327 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
328 return ret;
329 }
330
331 ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
332 noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
333
334 if (!ev || !noa) {
335 kfree(tb);
336 return -EPROTO;
337 }
338
339 vdev_id = __le32_to_cpu(ev->vdev_id);
340
341 ath10k_dbg(ar, ATH10K_DBG_WMI,
342 "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
343 vdev_id, noa->num_descriptors);
344
345 ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
346 kfree(tb);
347 return 0;
348}
349
350static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
351 struct sk_buff *skb)
352{
353 const void **tb;
354 const struct wmi_tlv_tx_pause_ev *ev;
355 int ret, vdev_id;
356 u32 pause_id, action, vdev_map, peer_id, tid_map;
357
358 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
359 if (IS_ERR(tb)) {
360 ret = PTR_ERR(tb);
361 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
362 return ret;
363 }
364
365 ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
366 if (!ev) {
367 kfree(tb);
368 return -EPROTO;
369 }
370
371 pause_id = __le32_to_cpu(ev->pause_id);
372 action = __le32_to_cpu(ev->action);
373 vdev_map = __le32_to_cpu(ev->vdev_map);
374 peer_id = __le32_to_cpu(ev->peer_id);
375 tid_map = __le32_to_cpu(ev->tid_map);
376
377 ath10k_dbg(ar, ATH10K_DBG_WMI,
378 "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
379 pause_id, action, vdev_map, peer_id, tid_map);
380
381 switch (pause_id) {
382 case WMI_TLV_TX_PAUSE_ID_MCC:
383 case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
384 case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
385 case WMI_TLV_TX_PAUSE_ID_AP_PS:
386 case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
387 for (vdev_id = 0; vdev_map; vdev_id++) {
388 if (!(vdev_map & BIT(vdev_id)))
389 continue;
390
391 vdev_map &= ~BIT(vdev_id);
392 ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
393 action);
394 }
395 break;
396 case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
397 case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
398 case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
399 case WMI_TLV_TX_PAUSE_ID_HOST:
400 ath10k_dbg(ar, ATH10K_DBG_MAC,
401 "mac ignoring unsupported tx pause id %d\n",
402 pause_id);
403 break;
404 default:
405 ath10k_dbg(ar, ATH10K_DBG_MAC,
406 "mac ignoring unknown tx pause vdev %d\n",
407 pause_id);
408 break;
409 }
410
411 kfree(tb);
412 return 0;
413}
414
415/***********/
416/* TLV ops */
417/***********/
418
419static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
420{
421 struct wmi_cmd_hdr *cmd_hdr;
422 enum wmi_tlv_event_id id;
423 bool consumed;
424
425 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
426 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
427
428 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
429 goto out;
430
431 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
432
433 consumed = ath10k_tm_event_wmi(ar, id, skb);
434
435 /* Ready event must be handled normally also in UTF mode so that we
436 * know the UTF firmware has booted, others we are just bypass WMI
437 * events to testmode.
438 */
439 if (consumed && id != WMI_TLV_READY_EVENTID) {
440 ath10k_dbg(ar, ATH10K_DBG_WMI,
441 "wmi tlv testmode consumed 0x%x\n", id);
442 goto out;
443 }
444
445 switch (id) {
446 case WMI_TLV_MGMT_RX_EVENTID:
447 ath10k_wmi_event_mgmt_rx(ar, skb);
448 /* mgmt_rx() owns the skb now! */
449 return;
450 case WMI_TLV_SCAN_EVENTID:
451 ath10k_wmi_event_scan(ar, skb);
452 break;
453 case WMI_TLV_CHAN_INFO_EVENTID:
454 ath10k_wmi_event_chan_info(ar, skb);
455 break;
456 case WMI_TLV_ECHO_EVENTID:
457 ath10k_wmi_event_echo(ar, skb);
458 break;
459 case WMI_TLV_DEBUG_MESG_EVENTID:
460 ath10k_wmi_event_debug_mesg(ar, skb);
461 break;
462 case WMI_TLV_UPDATE_STATS_EVENTID:
463 ath10k_wmi_event_update_stats(ar, skb);
464 break;
465 case WMI_TLV_VDEV_START_RESP_EVENTID:
466 ath10k_wmi_event_vdev_start_resp(ar, skb);
467 break;
468 case WMI_TLV_VDEV_STOPPED_EVENTID:
469 ath10k_wmi_event_vdev_stopped(ar, skb);
470 break;
471 case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
472 ath10k_wmi_event_peer_sta_kickout(ar, skb);
473 break;
474 case WMI_TLV_HOST_SWBA_EVENTID:
475 ath10k_wmi_event_host_swba(ar, skb);
476 break;
477 case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
478 ath10k_wmi_event_tbttoffset_update(ar, skb);
479 break;
480 case WMI_TLV_PHYERR_EVENTID:
481 ath10k_wmi_event_phyerr(ar, skb);
482 break;
483 case WMI_TLV_ROAM_EVENTID:
484 ath10k_wmi_event_roam(ar, skb);
485 break;
486 case WMI_TLV_PROFILE_MATCH:
487 ath10k_wmi_event_profile_match(ar, skb);
488 break;
489 case WMI_TLV_DEBUG_PRINT_EVENTID:
490 ath10k_wmi_event_debug_print(ar, skb);
491 break;
492 case WMI_TLV_PDEV_QVIT_EVENTID:
493 ath10k_wmi_event_pdev_qvit(ar, skb);
494 break;
495 case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
496 ath10k_wmi_event_wlan_profile_data(ar, skb);
497 break;
498 case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
499 ath10k_wmi_event_rtt_measurement_report(ar, skb);
500 break;
501 case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
502 ath10k_wmi_event_tsf_measurement_report(ar, skb);
503 break;
504 case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
505 ath10k_wmi_event_rtt_error_report(ar, skb);
506 break;
507 case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
508 ath10k_wmi_event_wow_wakeup_host(ar, skb);
509 break;
510 case WMI_TLV_DCS_INTERFERENCE_EVENTID:
511 ath10k_wmi_event_dcs_interference(ar, skb);
512 break;
513 case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
514 ath10k_wmi_event_pdev_tpc_config(ar, skb);
515 break;
516 case WMI_TLV_PDEV_FTM_INTG_EVENTID:
517 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
518 break;
519 case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
520 ath10k_wmi_event_gtk_offload_status(ar, skb);
521 break;
522 case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
523 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
524 break;
525 case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
526 ath10k_wmi_event_delba_complete(ar, skb);
527 break;
528 case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
529 ath10k_wmi_event_addba_complete(ar, skb);
530 break;
531 case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
532 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
533 break;
534 case WMI_TLV_SERVICE_READY_EVENTID:
535 ath10k_wmi_event_service_ready(ar, skb);
536 return;
537 case WMI_TLV_READY_EVENTID:
538 ath10k_wmi_event_ready(ar, skb);
539 break;
540 case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
541 ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
542 break;
543 case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
544 ath10k_wmi_tlv_event_diag_data(ar, skb);
545 break;
546 case WMI_TLV_DIAG_EVENTID:
547 ath10k_wmi_tlv_event_diag(ar, skb);
548 break;
549 case WMI_TLV_P2P_NOA_EVENTID:
550 ath10k_wmi_tlv_event_p2p_noa(ar, skb);
551 break;
552 case WMI_TLV_TX_PAUSE_EVENTID:
553 ath10k_wmi_tlv_event_tx_pause(ar, skb);
554 break;
555 default:
556 ath10k_warn(ar, "Unknown eventid: %d\n", id);
557 break;
558 }
559
560out:
561 dev_kfree_skb(skb);
562}
563
564static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
565 struct sk_buff *skb,
566 struct wmi_scan_ev_arg *arg)
567{
568 const void **tb;
569 const struct wmi_scan_event *ev;
570 int ret;
571
572 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
573 if (IS_ERR(tb)) {
574 ret = PTR_ERR(tb);
575 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
576 return ret;
577 }
578
579 ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
580 if (!ev) {
581 kfree(tb);
582 return -EPROTO;
583 }
584
585 arg->event_type = ev->event_type;
586 arg->reason = ev->reason;
587 arg->channel_freq = ev->channel_freq;
588 arg->scan_req_id = ev->scan_req_id;
589 arg->scan_id = ev->scan_id;
590 arg->vdev_id = ev->vdev_id;
591
592 kfree(tb);
593 return 0;
594}
595
596static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
597 struct sk_buff *skb,
598 struct wmi_mgmt_rx_ev_arg *arg)
599{
600 const void **tb;
601 const struct wmi_tlv_mgmt_rx_ev *ev;
602 const u8 *frame;
603 u32 msdu_len;
604 int ret;
605
606 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
607 if (IS_ERR(tb)) {
608 ret = PTR_ERR(tb);
609 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
610 return ret;
611 }
612
613 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
614 frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
615
616 if (!ev || !frame) {
617 kfree(tb);
618 return -EPROTO;
619 }
620
621 arg->channel = ev->channel;
622 arg->buf_len = ev->buf_len;
623 arg->status = ev->status;
624 arg->snr = ev->snr;
625 arg->phy_mode = ev->phy_mode;
626 arg->rate = ev->rate;
627
628 msdu_len = __le32_to_cpu(arg->buf_len);
629
630 if (skb->len < (frame - skb->data) + msdu_len) {
631 kfree(tb);
632 return -EPROTO;
633 }
634
635 /* shift the sk_buff to point to `frame` */
636 skb_trim(skb, 0);
637 skb_put(skb, frame - skb->data);
638 skb_pull(skb, frame - skb->data);
639 skb_put(skb, msdu_len);
640
641 kfree(tb);
642 return 0;
643}
644
645static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
646 struct sk_buff *skb,
647 struct wmi_ch_info_ev_arg *arg)
648{
649 const void **tb;
650 const struct wmi_chan_info_event *ev;
651 int ret;
652
653 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
654 if (IS_ERR(tb)) {
655 ret = PTR_ERR(tb);
656 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
657 return ret;
658 }
659
660 ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
661 if (!ev) {
662 kfree(tb);
663 return -EPROTO;
664 }
665
666 arg->err_code = ev->err_code;
667 arg->freq = ev->freq;
668 arg->cmd_flags = ev->cmd_flags;
669 arg->noise_floor = ev->noise_floor;
670 arg->rx_clear_count = ev->rx_clear_count;
671 arg->cycle_count = ev->cycle_count;
672
673 kfree(tb);
674 return 0;
675}
676
677static int
678ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
679 struct wmi_vdev_start_ev_arg *arg)
680{
681 const void **tb;
682 const struct wmi_vdev_start_response_event *ev;
683 int ret;
684
685 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
686 if (IS_ERR(tb)) {
687 ret = PTR_ERR(tb);
688 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
689 return ret;
690 }
691
692 ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
693 if (!ev) {
694 kfree(tb);
695 return -EPROTO;
696 }
697
698 skb_pull(skb, sizeof(*ev));
699 arg->vdev_id = ev->vdev_id;
700 arg->req_id = ev->req_id;
701 arg->resp_type = ev->resp_type;
702 arg->status = ev->status;
703
704 kfree(tb);
705 return 0;
706}
707
708static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
709 struct sk_buff *skb,
710 struct wmi_peer_kick_ev_arg *arg)
711{
712 const void **tb;
713 const struct wmi_peer_sta_kickout_event *ev;
714 int ret;
715
716 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
717 if (IS_ERR(tb)) {
718 ret = PTR_ERR(tb);
719 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
720 return ret;
721 }
722
723 ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
724 if (!ev) {
725 kfree(tb);
726 return -EPROTO;
727 }
728
729 arg->mac_addr = ev->peer_macaddr.addr;
730
731 kfree(tb);
732 return 0;
733}
734
735struct wmi_tlv_swba_parse {
736 const struct wmi_host_swba_event *ev;
737 bool tim_done;
738 bool noa_done;
739 size_t n_tim;
740 size_t n_noa;
741 struct wmi_swba_ev_arg *arg;
742};
743
744static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
745 const void *ptr, void *data)
746{
747 struct wmi_tlv_swba_parse *swba = data;
748 struct wmi_tim_info_arg *tim_info_arg;
749 const struct wmi_tim_info *tim_info_ev = ptr;
750
751 if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
752 return -EPROTO;
753
754 if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
755 return -ENOBUFS;
756
757 if (__le32_to_cpu(tim_info_ev->tim_len) >
758 sizeof(tim_info_ev->tim_bitmap)) {
759 ath10k_warn(ar, "refusing to parse invalid swba structure\n");
760 return -EPROTO;
761 }
762
763 tim_info_arg = &swba->arg->tim_info[swba->n_tim];
764 tim_info_arg->tim_len = tim_info_ev->tim_len;
765 tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
766 tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
767 tim_info_arg->tim_changed = tim_info_ev->tim_changed;
768 tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
769
770 swba->n_tim++;
771
772 return 0;
773}
774
775static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
776 const void *ptr, void *data)
777{
778 struct wmi_tlv_swba_parse *swba = data;
779
780 if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
781 return -EPROTO;
782
783 if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
784 return -ENOBUFS;
785
786 swba->arg->noa_info[swba->n_noa++] = ptr;
787 return 0;
788}
789
790static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
791 const void *ptr, void *data)
792{
793 struct wmi_tlv_swba_parse *swba = data;
794 int ret;
795
796 switch (tag) {
797 case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
798 swba->ev = ptr;
799 break;
800 case WMI_TLV_TAG_ARRAY_STRUCT:
801 if (!swba->tim_done) {
802 swba->tim_done = true;
803 ret = ath10k_wmi_tlv_iter(ar, ptr, len,
804 ath10k_wmi_tlv_swba_tim_parse,
805 swba);
806 if (ret)
807 return ret;
808 } else if (!swba->noa_done) {
809 swba->noa_done = true;
810 ret = ath10k_wmi_tlv_iter(ar, ptr, len,
811 ath10k_wmi_tlv_swba_noa_parse,
812 swba);
813 if (ret)
814 return ret;
815 }
816 break;
817 default:
818 break;
819 }
820 return 0;
821}
822
823static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
824 struct sk_buff *skb,
825 struct wmi_swba_ev_arg *arg)
826{
827 struct wmi_tlv_swba_parse swba = { .arg = arg };
828 u32 map;
829 size_t n_vdevs;
830 int ret;
831
832 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
833 ath10k_wmi_tlv_swba_parse, &swba);
834 if (ret) {
835 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
836 return ret;
837 }
838
839 if (!swba.ev)
840 return -EPROTO;
841
842 arg->vdev_map = swba.ev->vdev_map;
843
844 for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
845 if (map & BIT(0))
846 n_vdevs++;
847
848 if (n_vdevs != swba.n_tim ||
849 n_vdevs != swba.n_noa)
850 return -EPROTO;
851
852 return 0;
853}
854
855static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
856 struct sk_buff *skb,
857 struct wmi_phyerr_hdr_arg *arg)
858{
859 const void **tb;
860 const struct wmi_tlv_phyerr_ev *ev;
861 const void *phyerrs;
862 int ret;
863
864 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
865 if (IS_ERR(tb)) {
866 ret = PTR_ERR(tb);
867 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
868 return ret;
869 }
870
871 ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
872 phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
873
874 if (!ev || !phyerrs) {
875 kfree(tb);
876 return -EPROTO;
877 }
878
879 arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
880 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
881 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
882 arg->buf_len = __le32_to_cpu(ev->buf_len);
883 arg->phyerrs = phyerrs;
884
885 kfree(tb);
886 return 0;
887}
888
889#define WMI_TLV_ABI_VER_NS0 0x5F414351
890#define WMI_TLV_ABI_VER_NS1 0x00004C4D
891#define WMI_TLV_ABI_VER_NS2 0x00000000
892#define WMI_TLV_ABI_VER_NS3 0x00000000
893
894#define WMI_TLV_ABI_VER0_MAJOR 1
895#define WMI_TLV_ABI_VER0_MINOR 0
896#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
897 (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
898#define WMI_TLV_ABI_VER1 53
899
900static int
901ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
902 const void *ptr, void *data)
903{
904 struct wmi_svc_rdy_ev_arg *arg = data;
905 int i;
906
907 if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
908 return -EPROTO;
909
910 for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
911 if (!arg->mem_reqs[i]) {
912 arg->mem_reqs[i] = ptr;
913 return 0;
914 }
915 }
916
917 return -ENOMEM;
918}
919
920static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
921 struct sk_buff *skb,
922 struct wmi_svc_rdy_ev_arg *arg)
923{
924 const void **tb;
925 const struct hal_reg_capabilities *reg;
926 const struct wmi_tlv_svc_rdy_ev *ev;
927 const __le32 *svc_bmap;
928 const struct wlan_host_mem_req *mem_reqs;
929 int ret;
930
931 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
932 if (IS_ERR(tb)) {
933 ret = PTR_ERR(tb);
934 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
935 return ret;
936 }
937
938 ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
939 reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
940 svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
941 mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
942
943 if (!ev || !reg || !svc_bmap || !mem_reqs) {
944 kfree(tb);
945 return -EPROTO;
946 }
947
948 /* This is an internal ABI compatibility check for WMI TLV so check it
949 * here instead of the generic WMI code.
950 */
951 ath10k_dbg(ar, ATH10K_DBG_WMI,
952 "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
953 __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
954 __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
955 __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
956 __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
957 __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
958
959 if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
960 __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
961 __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
962 __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
963 __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
964 kfree(tb);
965 return -ENOTSUPP;
966 }
967
968 arg->min_tx_power = ev->hw_min_tx_power;
969 arg->max_tx_power = ev->hw_max_tx_power;
970 arg->ht_cap = ev->ht_cap_info;
971 arg->vht_cap = ev->vht_cap_info;
972 arg->sw_ver0 = ev->abi.abi_ver0;
973 arg->sw_ver1 = ev->abi.abi_ver1;
974 arg->fw_build = ev->fw_build_vers;
975 arg->phy_capab = ev->phy_capability;
976 arg->num_rf_chains = ev->num_rf_chains;
977 arg->eeprom_rd = reg->eeprom_rd;
978 arg->num_mem_reqs = ev->num_mem_reqs;
979 arg->service_map = svc_bmap;
980 arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
981
982 ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
983 ath10k_wmi_tlv_parse_mem_reqs, arg);
984 if (ret) {
985 kfree(tb);
986 ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
987 return ret;
988 }
989
990 kfree(tb);
991 return 0;
992}
993
994static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
995 struct sk_buff *skb,
996 struct wmi_rdy_ev_arg *arg)
997{
998 const void **tb;
999 const struct wmi_tlv_rdy_ev *ev;
1000 int ret;
1001
1002 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1003 if (IS_ERR(tb)) {
1004 ret = PTR_ERR(tb);
1005 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1006 return ret;
1007 }
1008
1009 ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
1010 if (!ev) {
1011 kfree(tb);
1012 return -EPROTO;
1013 }
1014
1015 arg->sw_version = ev->abi.abi_ver0;
1016 arg->abi_version = ev->abi.abi_ver1;
1017 arg->status = ev->status;
1018 arg->mac_addr = ev->mac_addr.addr;
1019
1020 kfree(tb);
1021 return 0;
1022}
1023
1024static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
1025 struct ath10k_fw_stats_vdev *dst)
1026{
1027 int i;
1028
1029 dst->vdev_id = __le32_to_cpu(src->vdev_id);
1030 dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
1031 dst->data_snr = __le32_to_cpu(src->data_snr);
1032 dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
1033 dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
1034 dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
1035 dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
1036 dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
1037 dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
1038
1039 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
1040 dst->num_tx_frames[i] =
1041 __le32_to_cpu(src->num_tx_frames[i]);
1042
1043 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
1044 dst->num_tx_frames_retries[i] =
1045 __le32_to_cpu(src->num_tx_frames_retries[i]);
1046
1047 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
1048 dst->num_tx_frames_failures[i] =
1049 __le32_to_cpu(src->num_tx_frames_failures[i]);
1050
1051 for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
1052 dst->tx_rate_history[i] =
1053 __le32_to_cpu(src->tx_rate_history[i]);
1054
1055 for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
1056 dst->beacon_rssi_history[i] =
1057 __le32_to_cpu(src->beacon_rssi_history[i]);
1058}
1059
1060static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
1061 struct sk_buff *skb,
1062 struct ath10k_fw_stats *stats)
1063{
1064 const void **tb;
1065 const struct wmi_tlv_stats_ev *ev;
1066 const void *data;
1067 u32 num_pdev_stats;
1068 u32 num_vdev_stats;
1069 u32 num_peer_stats;
1070 u32 num_bcnflt_stats;
1071 u32 num_chan_stats;
1072 size_t data_len;
1073 int ret;
1074 int i;
1075
1076 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1077 if (IS_ERR(tb)) {
1078 ret = PTR_ERR(tb);
1079 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1080 return ret;
1081 }
1082
1083 ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
1084 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
1085
1086 if (!ev || !data) {
1087 kfree(tb);
1088 return -EPROTO;
1089 }
1090
1091 data_len = ath10k_wmi_tlv_len(data);
1092 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1093 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1094 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1095 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
1096 num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
1097
1098 ath10k_dbg(ar, ATH10K_DBG_WMI,
1099 "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
1100 num_pdev_stats, num_vdev_stats, num_peer_stats,
1101 num_bcnflt_stats, num_chan_stats);
1102
1103 for (i = 0; i < num_pdev_stats; i++) {
1104 const struct wmi_pdev_stats *src;
1105 struct ath10k_fw_stats_pdev *dst;
1106
1107 src = data;
1108 if (data_len < sizeof(*src))
1109 return -EPROTO;
1110
1111 data += sizeof(*src);
1112 data_len -= sizeof(*src);
1113
1114 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1115 if (!dst)
1116 continue;
1117
1118 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1119 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1120 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1121 list_add_tail(&dst->list, &stats->pdevs);
1122 }
1123
1124 for (i = 0; i < num_vdev_stats; i++) {
1125 const struct wmi_tlv_vdev_stats *src;
1126 struct ath10k_fw_stats_vdev *dst;
1127
1128 src = data;
1129 if (data_len < sizeof(*src))
1130 return -EPROTO;
1131
1132 data += sizeof(*src);
1133 data_len -= sizeof(*src);
1134
1135 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1136 if (!dst)
1137 continue;
1138
1139 ath10k_wmi_tlv_pull_vdev_stats(src, dst);
1140 list_add_tail(&dst->list, &stats->vdevs);
1141 }
1142
1143 for (i = 0; i < num_peer_stats; i++) {
1144 const struct wmi_10x_peer_stats *src;
1145 struct ath10k_fw_stats_peer *dst;
1146
1147 src = data;
1148 if (data_len < sizeof(*src))
1149 return -EPROTO;
1150
1151 data += sizeof(*src);
1152 data_len -= sizeof(*src);
1153
1154 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1155 if (!dst)
1156 continue;
1157
1158 ath10k_wmi_pull_peer_stats(&src->old, dst);
1159 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1160 list_add_tail(&dst->list, &stats->peers);
1161 }
1162
1163 kfree(tb);
1164 return 0;
1165}
1166
1167static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
1168 struct sk_buff *skb,
1169 struct wmi_roam_ev_arg *arg)
1170{
1171 const void **tb;
1172 const struct wmi_tlv_roam_ev *ev;
1173 int ret;
1174
1175 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1176 if (IS_ERR(tb)) {
1177 ret = PTR_ERR(tb);
1178 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1179 return ret;
1180 }
1181
1182 ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
1183 if (!ev) {
1184 kfree(tb);
1185 return -EPROTO;
1186 }
1187
1188 arg->vdev_id = ev->vdev_id;
1189 arg->reason = ev->reason;
1190 arg->rssi = ev->rssi;
1191
1192 kfree(tb);
1193 return 0;
1194}
1195
1196static int
1197ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
1198 struct wmi_wow_ev_arg *arg)
1199{
1200 const void **tb;
1201 const struct wmi_tlv_wow_event_info *ev;
1202 int ret;
1203
1204 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1205 if (IS_ERR(tb)) {
1206 ret = PTR_ERR(tb);
1207 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1208 return ret;
1209 }
1210
1211 ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
1212 if (!ev) {
1213 kfree(tb);
1214 return -EPROTO;
1215 }
1216
1217 arg->vdev_id = __le32_to_cpu(ev->vdev_id);
1218 arg->flag = __le32_to_cpu(ev->flag);
1219 arg->wake_reason = __le32_to_cpu(ev->wake_reason);
1220 arg->data_len = __le32_to_cpu(ev->data_len);
1221
1222 kfree(tb);
1223 return 0;
1224}
1225
1226static struct sk_buff *
1227ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
1228{
1229 struct wmi_tlv_pdev_suspend *cmd;
1230 struct wmi_tlv *tlv;
1231 struct sk_buff *skb;
1232
1233 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1234 if (!skb)
1235 return ERR_PTR(-ENOMEM);
1236
1237 tlv = (void *)skb->data;
1238 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
1239 tlv->len = __cpu_to_le16(sizeof(*cmd));
1240 cmd = (void *)tlv->value;
1241 cmd->opt = __cpu_to_le32(opt);
1242
1243 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
1244 return skb;
1245}
1246
1247static struct sk_buff *
1248ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
1249{
1250 struct wmi_tlv_resume_cmd *cmd;
1251 struct wmi_tlv *tlv;
1252 struct sk_buff *skb;
1253
1254 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1255 if (!skb)
1256 return ERR_PTR(-ENOMEM);
1257
1258 tlv = (void *)skb->data;
1259 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
1260 tlv->len = __cpu_to_le16(sizeof(*cmd));
1261 cmd = (void *)tlv->value;
1262 cmd->reserved = __cpu_to_le32(0);
1263
1264 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
1265 return skb;
1266}
1267
1268static struct sk_buff *
1269ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
1270 u16 rd, u16 rd2g, u16 rd5g,
1271 u16 ctl2g, u16 ctl5g,
1272 enum wmi_dfs_region dfs_reg)
1273{
1274 struct wmi_tlv_pdev_set_rd_cmd *cmd;
1275 struct wmi_tlv *tlv;
1276 struct sk_buff *skb;
1277
1278 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1279 if (!skb)
1280 return ERR_PTR(-ENOMEM);
1281
1282 tlv = (void *)skb->data;
1283 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
1284 tlv->len = __cpu_to_le16(sizeof(*cmd));
1285 cmd = (void *)tlv->value;
1286 cmd->regd = __cpu_to_le32(rd);
1287 cmd->regd_2ghz = __cpu_to_le32(rd2g);
1288 cmd->regd_5ghz = __cpu_to_le32(rd5g);
1289 cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
1290 cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
1291
1292 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
1293 return skb;
1294}
1295
1296static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
1297{
1298 return WMI_TXBF_CONF_AFTER_ASSOC;
1299}
1300
1301static struct sk_buff *
1302ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
1303 u32 param_value)
1304{
1305 struct wmi_tlv_pdev_set_param_cmd *cmd;
1306 struct wmi_tlv *tlv;
1307 struct sk_buff *skb;
1308
1309 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1310 if (!skb)
1311 return ERR_PTR(-ENOMEM);
1312
1313 tlv = (void *)skb->data;
1314 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
1315 tlv->len = __cpu_to_le16(sizeof(*cmd));
1316 cmd = (void *)tlv->value;
1317 cmd->param_id = __cpu_to_le32(param_id);
1318 cmd->param_value = __cpu_to_le32(param_value);
1319
1320 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
1321 return skb;
1322}
1323
1324static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1325{
1326 struct sk_buff *skb;
1327 struct wmi_tlv *tlv;
1328 struct wmi_tlv_init_cmd *cmd;
1329 struct wmi_tlv_resource_config *cfg;
1330 struct wmi_host_mem_chunks *chunks;
1331 size_t len, chunks_len;
1332 void *ptr;
1333
1334 chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
1335 len = (sizeof(*tlv) + sizeof(*cmd)) +
1336 (sizeof(*tlv) + sizeof(*cfg)) +
1337 (sizeof(*tlv) + chunks_len);
1338
1339 skb = ath10k_wmi_alloc_skb(ar, len);
1340 if (!skb)
1341 return ERR_PTR(-ENOMEM);
1342
1343 ptr = skb->data;
1344
1345 tlv = ptr;
1346 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1347 tlv->len = __cpu_to_le16(sizeof(*cmd));
1348 cmd = (void *)tlv->value;
1349 ptr += sizeof(*tlv);
1350 ptr += sizeof(*cmd);
1351
1352 tlv = ptr;
1353 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1354 tlv->len = __cpu_to_le16(sizeof(*cfg));
1355 cfg = (void *)tlv->value;
1356 ptr += sizeof(*tlv);
1357 ptr += sizeof(*cfg);
1358
1359 tlv = ptr;
1360 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1361 tlv->len = __cpu_to_le16(chunks_len);
1362 chunks = (void *)tlv->value;
1363
1364 ptr += sizeof(*tlv);
1365 ptr += chunks_len;
1366
1367 cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1368 cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1369 cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1370 cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1371 cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1372 cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1373 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1374
1375 cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1376 cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1377
1378 if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1379 cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1380 cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1381 } else {
1382 cfg->num_offload_peers = __cpu_to_le32(0);
1383 cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1384 }
1385
1386 cfg->num_peer_keys = __cpu_to_le32(2);
1387 cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1388 cfg->ast_skid_limit = __cpu_to_le32(0x10);
1389 cfg->tx_chain_mask = __cpu_to_le32(0x7);
1390 cfg->rx_chain_mask = __cpu_to_le32(0x7);
1391 cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1392 cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1393 cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1394 cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1395 cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
1396 cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1397 cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1398 cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1399 cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1400 cfg->num_mcast_groups = __cpu_to_le32(0);
1401 cfg->num_mcast_table_elems = __cpu_to_le32(0);
1402 cfg->mcast2ucast_mode = __cpu_to_le32(0);
1403 cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1404 cfg->num_wds_entries = __cpu_to_le32(0x20);
1405 cfg->dma_burst_size = __cpu_to_le32(0);
1406 cfg->mac_aggr_delim = __cpu_to_le32(0);
1407 cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1408 cfg->vow_config = __cpu_to_le32(0);
1409 cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1410 cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
1411 cfg->max_frag_entries = __cpu_to_le32(2);
1412 cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
1413 cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1414 cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1415 cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1416 cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
1417 cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1418 cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1419 cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1420 cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1421
1422 ath10k_wmi_put_host_mem_chunks(ar, chunks);
1423
1424 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1425 return skb;
1426}
1427
1428static struct sk_buff *
1429ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1430 const struct wmi_start_scan_arg *arg)
1431{
1432 struct wmi_tlv_start_scan_cmd *cmd;
1433 struct wmi_tlv *tlv;
1434 struct sk_buff *skb;
1435 size_t len, chan_len, ssid_len, bssid_len, ie_len;
1436 __le32 *chans;
1437 struct wmi_ssid *ssids;
1438 struct wmi_mac_addr *addrs;
1439 void *ptr;
1440 int i, ret;
1441
1442 ret = ath10k_wmi_start_scan_verify(arg);
1443 if (ret)
1444 return ERR_PTR(ret);
1445
1446 chan_len = arg->n_channels * sizeof(__le32);
1447 ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1448 bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1449 ie_len = roundup(arg->ie_len, 4);
1450 len = (sizeof(*tlv) + sizeof(*cmd)) +
1451 (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
1452 (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
1453 (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
1454 (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
1455
1456 skb = ath10k_wmi_alloc_skb(ar, len);
1457 if (!skb)
1458 return ERR_PTR(-ENOMEM);
1459
1460 ptr = (void *)skb->data;
1461 tlv = ptr;
1462 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
1463 tlv->len = __cpu_to_le16(sizeof(*cmd));
1464 cmd = (void *)tlv->value;
1465
1466 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
1467 cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
1468 cmd->num_channels = __cpu_to_le32(arg->n_channels);
1469 cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
1470 cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
1471 cmd->ie_len = __cpu_to_le32(arg->ie_len);
1472 cmd->num_probes = __cpu_to_le32(3);
1473
1474 /* FIXME: There are some scan flag inconsistencies across firmwares,
1475 * e.g. WMI-TLV inverts the logic behind the following flag.
1476 */
1477 cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
1478
1479 ptr += sizeof(*tlv);
1480 ptr += sizeof(*cmd);
1481
1482 tlv = ptr;
1483 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
1484 tlv->len = __cpu_to_le16(chan_len);
1485 chans = (void *)tlv->value;
1486 for (i = 0; i < arg->n_channels; i++)
1487 chans[i] = __cpu_to_le32(arg->channels[i]);
1488
1489 ptr += sizeof(*tlv);
1490 ptr += chan_len;
1491
1492 tlv = ptr;
1493 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
1494 tlv->len = __cpu_to_le16(ssid_len);
1495 ssids = (void *)tlv->value;
1496 for (i = 0; i < arg->n_ssids; i++) {
1497 ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
1498 memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
1499 }
1500
1501 ptr += sizeof(*tlv);
1502 ptr += ssid_len;
1503
1504 tlv = ptr;
1505 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
1506 tlv->len = __cpu_to_le16(bssid_len);
1507 addrs = (void *)tlv->value;
1508 for (i = 0; i < arg->n_bssids; i++)
1509 ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
1510
1511 ptr += sizeof(*tlv);
1512 ptr += bssid_len;
1513
1514 tlv = ptr;
1515 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1516 tlv->len = __cpu_to_le16(ie_len);
1517 memcpy(tlv->value, arg->ie, arg->ie_len);
1518
1519 ptr += sizeof(*tlv);
1520 ptr += ie_len;
1521
1522 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
1523 return skb;
1524}
1525
1526static struct sk_buff *
1527ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
1528 const struct wmi_stop_scan_arg *arg)
1529{
1530 struct wmi_stop_scan_cmd *cmd;
1531 struct wmi_tlv *tlv;
1532 struct sk_buff *skb;
1533 u32 scan_id;
1534 u32 req_id;
1535
1536 if (arg->req_id > 0xFFF)
1537 return ERR_PTR(-EINVAL);
1538 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
1539 return ERR_PTR(-EINVAL);
1540
1541 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1542 if (!skb)
1543 return ERR_PTR(-ENOMEM);
1544
1545 scan_id = arg->u.scan_id;
1546 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
1547
1548 req_id = arg->req_id;
1549 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1550
1551 tlv = (void *)skb->data;
1552 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
1553 tlv->len = __cpu_to_le16(sizeof(*cmd));
1554 cmd = (void *)tlv->value;
1555 cmd->req_type = __cpu_to_le32(arg->req_type);
1556 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
1557 cmd->scan_id = __cpu_to_le32(scan_id);
1558 cmd->scan_req_id = __cpu_to_le32(req_id);
1559
1560 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
1561 return skb;
1562}
1563
1564static struct sk_buff *
1565ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
1566 u32 vdev_id,
1567 enum wmi_vdev_type vdev_type,
1568 enum wmi_vdev_subtype vdev_subtype,
1569 const u8 mac_addr[ETH_ALEN])
1570{
1571 struct wmi_vdev_create_cmd *cmd;
1572 struct wmi_tlv *tlv;
1573 struct sk_buff *skb;
1574
1575 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1576 if (!skb)
1577 return ERR_PTR(-ENOMEM);
1578
1579 tlv = (void *)skb->data;
1580 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
1581 tlv->len = __cpu_to_le16(sizeof(*cmd));
1582 cmd = (void *)tlv->value;
1583 cmd->vdev_id = __cpu_to_le32(vdev_id);
1584 cmd->vdev_type = __cpu_to_le32(vdev_type);
1585 cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
1586 ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
1587
1588 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
1589 return skb;
1590}
1591
1592static struct sk_buff *
1593ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
1594{
1595 struct wmi_vdev_delete_cmd *cmd;
1596 struct wmi_tlv *tlv;
1597 struct sk_buff *skb;
1598
1599 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1600 if (!skb)
1601 return ERR_PTR(-ENOMEM);
1602
1603 tlv = (void *)skb->data;
1604 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
1605 tlv->len = __cpu_to_le16(sizeof(*cmd));
1606 cmd = (void *)tlv->value;
1607 cmd->vdev_id = __cpu_to_le32(vdev_id);
1608
1609 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
1610 return skb;
1611}
1612
1613static struct sk_buff *
1614ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
1615 const struct wmi_vdev_start_request_arg *arg,
1616 bool restart)
1617{
1618 struct wmi_tlv_vdev_start_cmd *cmd;
1619 struct wmi_channel *ch;
1620 struct wmi_p2p_noa_descriptor *noa;
1621 struct wmi_tlv *tlv;
1622 struct sk_buff *skb;
1623 size_t len;
1624 void *ptr;
1625 u32 flags = 0;
1626
1627 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
1628 return ERR_PTR(-EINVAL);
1629 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1630 return ERR_PTR(-EINVAL);
1631
1632 len = (sizeof(*tlv) + sizeof(*cmd)) +
1633 (sizeof(*tlv) + sizeof(*ch)) +
1634 (sizeof(*tlv) + 0);
1635 skb = ath10k_wmi_alloc_skb(ar, len);
1636 if (!skb)
1637 return ERR_PTR(-ENOMEM);
1638
1639 if (arg->hidden_ssid)
1640 flags |= WMI_VDEV_START_HIDDEN_SSID;
1641 if (arg->pmf_enabled)
1642 flags |= WMI_VDEV_START_PMF_ENABLED;
1643
1644 ptr = (void *)skb->data;
1645
1646 tlv = ptr;
1647 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
1648 tlv->len = __cpu_to_le16(sizeof(*cmd));
1649 cmd = (void *)tlv->value;
1650 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1651 cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
1652 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
1653 cmd->flags = __cpu_to_le32(flags);
1654 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
1655 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
1656 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
1657
1658 if (arg->ssid) {
1659 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
1660 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1661 }
1662
1663 ptr += sizeof(*tlv);
1664 ptr += sizeof(*cmd);
1665
1666 tlv = ptr;
1667 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
1668 tlv->len = __cpu_to_le16(sizeof(*ch));
1669 ch = (void *)tlv->value;
1670 ath10k_wmi_put_wmi_channel(ch, &arg->channel);
1671
1672 ptr += sizeof(*tlv);
1673 ptr += sizeof(*ch);
1674
1675 tlv = ptr;
1676 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1677 tlv->len = 0;
1678 noa = (void *)tlv->value;
1679
1680 /* Note: This is a nested TLV containing:
1681 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
1682 */
1683
1684 ptr += sizeof(*tlv);
1685 ptr += 0;
1686
1687 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
1688 return skb;
1689}
1690
1691static struct sk_buff *
1692ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
1693{
1694 struct wmi_vdev_stop_cmd *cmd;
1695 struct wmi_tlv *tlv;
1696 struct sk_buff *skb;
1697
1698 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1699 if (!skb)
1700 return ERR_PTR(-ENOMEM);
1701
1702 tlv = (void *)skb->data;
1703 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
1704 tlv->len = __cpu_to_le16(sizeof(*cmd));
1705 cmd = (void *)tlv->value;
1706 cmd->vdev_id = __cpu_to_le32(vdev_id);
1707
1708 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
1709 return skb;
1710}
1711
1712static struct sk_buff *
1713ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
1714 const u8 *bssid)
1715
1716{
1717 struct wmi_vdev_up_cmd *cmd;
1718 struct wmi_tlv *tlv;
1719 struct sk_buff *skb;
1720
1721 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1722 if (!skb)
1723 return ERR_PTR(-ENOMEM);
1724
1725 tlv = (void *)skb->data;
1726 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
1727 tlv->len = __cpu_to_le16(sizeof(*cmd));
1728 cmd = (void *)tlv->value;
1729 cmd->vdev_id = __cpu_to_le32(vdev_id);
1730 cmd->vdev_assoc_id = __cpu_to_le32(aid);
1731 ether_addr_copy(cmd->vdev_bssid.addr, bssid);
1732
1733 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
1734 return skb;
1735}
1736
1737static struct sk_buff *
1738ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
1739{
1740 struct wmi_vdev_down_cmd *cmd;
1741 struct wmi_tlv *tlv;
1742 struct sk_buff *skb;
1743
1744 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1745 if (!skb)
1746 return ERR_PTR(-ENOMEM);
1747
1748 tlv = (void *)skb->data;
1749 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
1750 tlv->len = __cpu_to_le16(sizeof(*cmd));
1751 cmd = (void *)tlv->value;
1752 cmd->vdev_id = __cpu_to_le32(vdev_id);
1753
1754 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
1755 return skb;
1756}
1757
1758static struct sk_buff *
1759ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1760 u32 param_id, u32 param_value)
1761{
1762 struct wmi_vdev_set_param_cmd *cmd;
1763 struct wmi_tlv *tlv;
1764 struct sk_buff *skb;
1765
1766 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1767 if (!skb)
1768 return ERR_PTR(-ENOMEM);
1769
1770 tlv = (void *)skb->data;
1771 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
1772 tlv->len = __cpu_to_le16(sizeof(*cmd));
1773 cmd = (void *)tlv->value;
1774 cmd->vdev_id = __cpu_to_le32(vdev_id);
1775 cmd->param_id = __cpu_to_le32(param_id);
1776 cmd->param_value = __cpu_to_le32(param_value);
1777
1778 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
1779 return skb;
1780}
1781
1782static struct sk_buff *
1783ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
1784 const struct wmi_vdev_install_key_arg *arg)
1785{
1786 struct wmi_vdev_install_key_cmd *cmd;
1787 struct wmi_tlv *tlv;
1788 struct sk_buff *skb;
1789 size_t len;
1790 void *ptr;
1791
1792 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
1793 return ERR_PTR(-EINVAL);
1794 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
1795 return ERR_PTR(-EINVAL);
1796
1797 len = sizeof(*tlv) + sizeof(*cmd) +
1798 sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
1799 skb = ath10k_wmi_alloc_skb(ar, len);
1800 if (!skb)
1801 return ERR_PTR(-ENOMEM);
1802
1803 ptr = (void *)skb->data;
1804 tlv = ptr;
1805 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
1806 tlv->len = __cpu_to_le16(sizeof(*cmd));
1807 cmd = (void *)tlv->value;
1808 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1809 cmd->key_idx = __cpu_to_le32(arg->key_idx);
1810 cmd->key_flags = __cpu_to_le32(arg->key_flags);
1811 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
1812 cmd->key_len = __cpu_to_le32(arg->key_len);
1813 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
1814 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
1815
1816 if (arg->macaddr)
1817 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1818
1819 ptr += sizeof(*tlv);
1820 ptr += sizeof(*cmd);
1821
1822 tlv = ptr;
1823 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1824 tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
1825 if (arg->key_data)
1826 memcpy(tlv->value, arg->key_data, arg->key_len);
1827
1828 ptr += sizeof(*tlv);
1829 ptr += roundup(arg->key_len, sizeof(__le32));
1830
1831 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
1832 return skb;
1833}
1834
1835static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
1836 const struct wmi_sta_uapsd_auto_trig_arg *arg)
1837{
1838 struct wmi_sta_uapsd_auto_trig_param *ac;
1839 struct wmi_tlv *tlv;
1840
1841 tlv = ptr;
1842 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
1843 tlv->len = __cpu_to_le16(sizeof(*ac));
1844 ac = (void *)tlv->value;
1845
1846 ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
1847 ac->user_priority = __cpu_to_le32(arg->user_priority);
1848 ac->service_interval = __cpu_to_le32(arg->service_interval);
1849 ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
1850 ac->delay_interval = __cpu_to_le32(arg->delay_interval);
1851
1852 ath10k_dbg(ar, ATH10K_DBG_WMI,
1853 "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
1854 ac->wmm_ac, ac->user_priority, ac->service_interval,
1855 ac->suspend_interval, ac->delay_interval);
1856
1857 return ptr + sizeof(*tlv) + sizeof(*ac);
1858}
1859
1860static struct sk_buff *
1861ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
1862 const u8 peer_addr[ETH_ALEN],
1863 const struct wmi_sta_uapsd_auto_trig_arg *args,
1864 u32 num_ac)
1865{
1866 struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
1867 struct wmi_sta_uapsd_auto_trig_param *ac;
1868 struct wmi_tlv *tlv;
1869 struct sk_buff *skb;
1870 size_t len;
1871 size_t ac_tlv_len;
1872 void *ptr;
1873 int i;
1874
1875 ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
1876 len = sizeof(*tlv) + sizeof(*cmd) +
1877 sizeof(*tlv) + ac_tlv_len;
1878 skb = ath10k_wmi_alloc_skb(ar, len);
1879 if (!skb)
1880 return ERR_PTR(-ENOMEM);
1881
1882 ptr = (void *)skb->data;
1883 tlv = ptr;
1884 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
1885 tlv->len = __cpu_to_le16(sizeof(*cmd));
1886 cmd = (void *)tlv->value;
1887 cmd->vdev_id = __cpu_to_le32(vdev_id);
1888 cmd->num_ac = __cpu_to_le32(num_ac);
1889 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1890
1891 ptr += sizeof(*tlv);
1892 ptr += sizeof(*cmd);
1893
1894 tlv = ptr;
1895 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1896 tlv->len = __cpu_to_le16(ac_tlv_len);
1897 ac = (void *)tlv->value;
1898
1899 ptr += sizeof(*tlv);
1900 for (i = 0; i < num_ac; i++)
1901 ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
1902
1903 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
1904 return skb;
1905}
1906
1907static void *ath10k_wmi_tlv_put_wmm(void *ptr,
1908 const struct wmi_wmm_params_arg *arg)
1909{
1910 struct wmi_wmm_params *wmm;
1911 struct wmi_tlv *tlv;
1912
1913 tlv = ptr;
1914 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
1915 tlv->len = __cpu_to_le16(sizeof(*wmm));
1916 wmm = (void *)tlv->value;
1917 ath10k_wmi_set_wmm_param(wmm, arg);
1918
1919 return ptr + sizeof(*tlv) + sizeof(*wmm);
1920}
1921
1922static struct sk_buff *
1923ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
1924 const struct wmi_wmm_params_all_arg *arg)
1925{
1926 struct wmi_tlv_vdev_set_wmm_cmd *cmd;
1927 struct wmi_tlv *tlv;
1928 struct sk_buff *skb;
1929 size_t len;
1930 void *ptr;
1931
1932 len = sizeof(*tlv) + sizeof(*cmd);
1933 skb = ath10k_wmi_alloc_skb(ar, len);
1934 if (!skb)
1935 return ERR_PTR(-ENOMEM);
1936
1937 ptr = (void *)skb->data;
1938 tlv = ptr;
1939 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
1940 tlv->len = __cpu_to_le16(sizeof(*cmd));
1941 cmd = (void *)tlv->value;
1942 cmd->vdev_id = __cpu_to_le32(vdev_id);
1943
1944 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
1945 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
1946 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
1947 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
1948
1949 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
1950 return skb;
1951}
1952
1953static struct sk_buff *
1954ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
1955 const struct wmi_sta_keepalive_arg *arg)
1956{
1957 struct wmi_tlv_sta_keepalive_cmd *cmd;
1958 struct wmi_sta_keepalive_arp_resp *arp;
1959 struct sk_buff *skb;
1960 struct wmi_tlv *tlv;
1961 void *ptr;
1962 size_t len;
1963
1964 len = sizeof(*tlv) + sizeof(*cmd) +
1965 sizeof(*tlv) + sizeof(*arp);
1966 skb = ath10k_wmi_alloc_skb(ar, len);
1967 if (!skb)
1968 return ERR_PTR(-ENOMEM);
1969
1970 ptr = (void *)skb->data;
1971 tlv = ptr;
1972 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
1973 tlv->len = __cpu_to_le16(sizeof(*cmd));
1974 cmd = (void *)tlv->value;
1975 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1976 cmd->enabled = __cpu_to_le32(arg->enabled);
1977 cmd->method = __cpu_to_le32(arg->method);
1978 cmd->interval = __cpu_to_le32(arg->interval);
1979
1980 ptr += sizeof(*tlv);
1981 ptr += sizeof(*cmd);
1982
1983 tlv = ptr;
1984 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
1985 tlv->len = __cpu_to_le16(sizeof(*arp));
1986 arp = (void *)tlv->value;
1987
1988 arp->src_ip4_addr = arg->src_ip4_addr;
1989 arp->dest_ip4_addr = arg->dest_ip4_addr;
1990 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
1991
1992 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
1993 arg->vdev_id, arg->enabled, arg->method, arg->interval);
1994 return skb;
1995}
1996
1997static struct sk_buff *
1998ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
1999 const u8 peer_addr[ETH_ALEN],
2000 enum wmi_peer_type peer_type)
2001{
2002 struct wmi_tlv_peer_create_cmd *cmd;
2003 struct wmi_tlv *tlv;
2004 struct sk_buff *skb;
2005
2006 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2007 if (!skb)
2008 return ERR_PTR(-ENOMEM);
2009
2010 tlv = (void *)skb->data;
2011 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
2012 tlv->len = __cpu_to_le16(sizeof(*cmd));
2013 cmd = (void *)tlv->value;
2014 cmd->vdev_id = __cpu_to_le32(vdev_id);
2015 cmd->peer_type = __cpu_to_le32(peer_type);
2016 ether_addr_copy(cmd->peer_addr.addr, peer_addr);
2017
2018 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
2019 return skb;
2020}
2021
2022static struct sk_buff *
2023ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
2024 const u8 peer_addr[ETH_ALEN])
2025{
2026 struct wmi_peer_delete_cmd *cmd;
2027 struct wmi_tlv *tlv;
2028 struct sk_buff *skb;
2029
2030 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2031 if (!skb)
2032 return ERR_PTR(-ENOMEM);
2033
2034 tlv = (void *)skb->data;
2035 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
2036 tlv->len = __cpu_to_le16(sizeof(*cmd));
2037 cmd = (void *)tlv->value;
2038 cmd->vdev_id = __cpu_to_le32(vdev_id);
2039 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2040
2041 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
2042 return skb;
2043}
2044
2045static struct sk_buff *
2046ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
2047 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
2048{
2049 struct wmi_peer_flush_tids_cmd *cmd;
2050 struct wmi_tlv *tlv;
2051 struct sk_buff *skb;
2052
2053 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2054 if (!skb)
2055 return ERR_PTR(-ENOMEM);
2056
2057 tlv = (void *)skb->data;
2058 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
2059 tlv->len = __cpu_to_le16(sizeof(*cmd));
2060 cmd = (void *)tlv->value;
2061 cmd->vdev_id = __cpu_to_le32(vdev_id);
2062 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
2063 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2064
2065 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
2066 return skb;
2067}
2068
2069static struct sk_buff *
2070ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
2071 const u8 *peer_addr,
2072 enum wmi_peer_param param_id,
2073 u32 param_value)
2074{
2075 struct wmi_peer_set_param_cmd *cmd;
2076 struct wmi_tlv *tlv;
2077 struct sk_buff *skb;
2078
2079 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2080 if (!skb)
2081 return ERR_PTR(-ENOMEM);
2082
2083 tlv = (void *)skb->data;
2084 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
2085 tlv->len = __cpu_to_le16(sizeof(*cmd));
2086 cmd = (void *)tlv->value;
2087 cmd->vdev_id = __cpu_to_le32(vdev_id);
2088 cmd->param_id = __cpu_to_le32(param_id);
2089 cmd->param_value = __cpu_to_le32(param_value);
2090 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2091
2092 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
2093 return skb;
2094}
2095
2096static struct sk_buff *
2097ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
2098 const struct wmi_peer_assoc_complete_arg *arg)
2099{
2100 struct wmi_tlv_peer_assoc_cmd *cmd;
2101 struct wmi_vht_rate_set *vht_rate;
2102 struct wmi_tlv *tlv;
2103 struct sk_buff *skb;
2104 size_t len, legacy_rate_len, ht_rate_len;
2105 void *ptr;
2106
2107 if (arg->peer_mpdu_density > 16)
2108 return ERR_PTR(-EINVAL);
2109 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2110 return ERR_PTR(-EINVAL);
2111 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2112 return ERR_PTR(-EINVAL);
2113
2114 legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
2115 sizeof(__le32));
2116 ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
2117 len = (sizeof(*tlv) + sizeof(*cmd)) +
2118 (sizeof(*tlv) + legacy_rate_len) +
2119 (sizeof(*tlv) + ht_rate_len) +
2120 (sizeof(*tlv) + sizeof(*vht_rate));
2121 skb = ath10k_wmi_alloc_skb(ar, len);
2122 if (!skb)
2123 return ERR_PTR(-ENOMEM);
2124
2125 ptr = (void *)skb->data;
2126 tlv = ptr;
2127 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
2128 tlv->len = __cpu_to_le16(sizeof(*cmd));
2129 cmd = (void *)tlv->value;
2130
2131 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2132 cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2133 cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
2134 cmd->flags = __cpu_to_le32(arg->peer_flags);
2135 cmd->caps = __cpu_to_le32(arg->peer_caps);
2136 cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2137 cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2138 cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2139 cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2140 cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2141 cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2142 cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2143 cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
2144 cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2145 cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
2146 ether_addr_copy(cmd->mac_addr.addr, arg->addr);
2147
2148 ptr += sizeof(*tlv);
2149 ptr += sizeof(*cmd);
2150
2151 tlv = ptr;
2152 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2153 tlv->len = __cpu_to_le16(legacy_rate_len);
2154 memcpy(tlv->value, arg->peer_legacy_rates.rates,
2155 arg->peer_legacy_rates.num_rates);
2156
2157 ptr += sizeof(*tlv);
2158 ptr += legacy_rate_len;
2159
2160 tlv = ptr;
2161 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2162 tlv->len = __cpu_to_le16(ht_rate_len);
2163 memcpy(tlv->value, arg->peer_ht_rates.rates,
2164 arg->peer_ht_rates.num_rates);
2165
2166 ptr += sizeof(*tlv);
2167 ptr += ht_rate_len;
2168
2169 tlv = ptr;
2170 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
2171 tlv->len = __cpu_to_le16(sizeof(*vht_rate));
2172 vht_rate = (void *)tlv->value;
2173
2174 vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2175 vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2176 vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2177 vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2178
2179 ptr += sizeof(*tlv);
2180 ptr += sizeof(*vht_rate);
2181
2182 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
2183 return skb;
2184}
2185
2186static struct sk_buff *
2187ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
2188 enum wmi_sta_ps_mode psmode)
2189{
2190 struct wmi_sta_powersave_mode_cmd *cmd;
2191 struct wmi_tlv *tlv;
2192 struct sk_buff *skb;
2193
2194 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2195 if (!skb)
2196 return ERR_PTR(-ENOMEM);
2197
2198 tlv = (void *)skb->data;
2199 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
2200 tlv->len = __cpu_to_le16(sizeof(*cmd));
2201 cmd = (void *)tlv->value;
2202 cmd->vdev_id = __cpu_to_le32(vdev_id);
2203 cmd->sta_ps_mode = __cpu_to_le32(psmode);
2204
2205 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
2206 return skb;
2207}
2208
2209static struct sk_buff *
2210ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
2211 enum wmi_sta_powersave_param param_id,
2212 u32 param_value)
2213{
2214 struct wmi_sta_powersave_param_cmd *cmd;
2215 struct wmi_tlv *tlv;
2216 struct sk_buff *skb;
2217
2218 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2219 if (!skb)
2220 return ERR_PTR(-ENOMEM);
2221
2222 tlv = (void *)skb->data;
2223 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
2224 tlv->len = __cpu_to_le16(sizeof(*cmd));
2225 cmd = (void *)tlv->value;
2226 cmd->vdev_id = __cpu_to_le32(vdev_id);
2227 cmd->param_id = __cpu_to_le32(param_id);
2228 cmd->param_value = __cpu_to_le32(param_value);
2229
2230 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
2231 return skb;
2232}
2233
2234static struct sk_buff *
2235ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2236 enum wmi_ap_ps_peer_param param_id, u32 value)
2237{
2238 struct wmi_ap_ps_peer_cmd *cmd;
2239 struct wmi_tlv *tlv;
2240 struct sk_buff *skb;
2241
2242 if (!mac)
2243 return ERR_PTR(-EINVAL);
2244
2245 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2246 if (!skb)
2247 return ERR_PTR(-ENOMEM);
2248
2249 tlv = (void *)skb->data;
2250 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
2251 tlv->len = __cpu_to_le16(sizeof(*cmd));
2252 cmd = (void *)tlv->value;
2253 cmd->vdev_id = __cpu_to_le32(vdev_id);
2254 cmd->param_id = __cpu_to_le32(param_id);
2255 cmd->param_value = __cpu_to_le32(value);
2256 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2257
2258 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
2259 return skb;
2260}
2261
2262static struct sk_buff *
2263ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
2264 const struct wmi_scan_chan_list_arg *arg)
2265{
2266 struct wmi_tlv_scan_chan_list_cmd *cmd;
2267 struct wmi_channel *ci;
2268 struct wmi_channel_arg *ch;
2269 struct wmi_tlv *tlv;
2270 struct sk_buff *skb;
2271 size_t chans_len, len;
2272 int i;
2273 void *ptr, *chans;
2274
2275 chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
2276 len = (sizeof(*tlv) + sizeof(*cmd)) +
2277 (sizeof(*tlv) + chans_len);
2278
2279 skb = ath10k_wmi_alloc_skb(ar, len);
2280 if (!skb)
2281 return ERR_PTR(-ENOMEM);
2282
2283 ptr = (void *)skb->data;
2284 tlv = ptr;
2285 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
2286 tlv->len = __cpu_to_le16(sizeof(*cmd));
2287 cmd = (void *)tlv->value;
2288 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2289
2290 ptr += sizeof(*tlv);
2291 ptr += sizeof(*cmd);
2292
2293 tlv = ptr;
2294 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2295 tlv->len = __cpu_to_le16(chans_len);
2296 chans = (void *)tlv->value;
2297
2298 for (i = 0; i < arg->n_channels; i++) {
2299 ch = &arg->channels[i];
2300
2301 tlv = chans;
2302 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2303 tlv->len = __cpu_to_le16(sizeof(*ci));
2304 ci = (void *)tlv->value;
2305
2306 ath10k_wmi_put_wmi_channel(ci, ch);
2307
2308 chans += sizeof(*tlv);
2309 chans += sizeof(*ci);
2310 }
2311
2312 ptr += sizeof(*tlv);
2313 ptr += chans_len;
2314
2315 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2316 return skb;
2317}
2318
2319static struct sk_buff *
2320ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2321 const void *bcn, size_t bcn_len,
2322 u32 bcn_paddr, bool dtim_zero,
2323 bool deliver_cab)
2324
2325{
2326 struct wmi_bcn_tx_ref_cmd *cmd;
2327 struct wmi_tlv *tlv;
2328 struct sk_buff *skb;
2329 struct ieee80211_hdr *hdr;
2330 u16 fc;
2331
2332 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2333 if (!skb)
2334 return ERR_PTR(-ENOMEM);
2335
2336 hdr = (struct ieee80211_hdr *)bcn;
2337 fc = le16_to_cpu(hdr->frame_control);
2338
2339 tlv = (void *)skb->data;
2340 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2341 tlv->len = __cpu_to_le16(sizeof(*cmd));
2342 cmd = (void *)tlv->value;
2343 cmd->vdev_id = __cpu_to_le32(vdev_id);
2344 cmd->data_len = __cpu_to_le32(bcn_len);
2345 cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2346 cmd->msdu_id = 0;
2347 cmd->frame_control = __cpu_to_le32(fc);
2348 cmd->flags = 0;
2349
2350 if (dtim_zero)
2351 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2352
2353 if (deliver_cab)
2354 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2355
2356 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2357 return skb;
2358}
2359
2360static struct sk_buff *
2361ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2362 const struct wmi_wmm_params_all_arg *arg)
2363{
2364 struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2365 struct wmi_wmm_params *wmm;
2366 struct wmi_tlv *tlv;
2367 struct sk_buff *skb;
2368 size_t len;
2369 void *ptr;
2370
2371 len = (sizeof(*tlv) + sizeof(*cmd)) +
2372 (4 * (sizeof(*tlv) + sizeof(*wmm)));
2373 skb = ath10k_wmi_alloc_skb(ar, len);
2374 if (!skb)
2375 return ERR_PTR(-ENOMEM);
2376
2377 ptr = (void *)skb->data;
2378
2379 tlv = ptr;
2380 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2381 tlv->len = __cpu_to_le16(sizeof(*cmd));
2382 cmd = (void *)tlv->value;
2383
2384 /* nothing to set here */
2385
2386 ptr += sizeof(*tlv);
2387 ptr += sizeof(*cmd);
2388
2389 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2390 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2391 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2392 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2393
2394 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2395 return skb;
2396}
2397
2398static struct sk_buff *
2399ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
2400{
2401 struct wmi_request_stats_cmd *cmd;
2402 struct wmi_tlv *tlv;
2403 struct sk_buff *skb;
2404
2405 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2406 if (!skb)
2407 return ERR_PTR(-ENOMEM);
2408
2409 tlv = (void *)skb->data;
2410 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
2411 tlv->len = __cpu_to_le16(sizeof(*cmd));
2412 cmd = (void *)tlv->value;
2413 cmd->stats_id = __cpu_to_le32(stats_mask);
2414
2415 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
2416 return skb;
2417}
2418
2419static struct sk_buff *
2420ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
2421 enum wmi_force_fw_hang_type type,
2422 u32 delay_ms)
2423{
2424 struct wmi_force_fw_hang_cmd *cmd;
2425 struct wmi_tlv *tlv;
2426 struct sk_buff *skb;
2427
2428 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2429 if (!skb)
2430 return ERR_PTR(-ENOMEM);
2431
2432 tlv = (void *)skb->data;
2433 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
2434 tlv->len = __cpu_to_le16(sizeof(*cmd));
2435 cmd = (void *)tlv->value;
2436 cmd->type = __cpu_to_le32(type);
2437 cmd->delay_ms = __cpu_to_le32(delay_ms);
2438
2439 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
2440 return skb;
2441}
2442
2443static struct sk_buff *
2444ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
2445 u32 log_level) {
2446 struct wmi_tlv_dbglog_cmd *cmd;
2447 struct wmi_tlv *tlv;
2448 struct sk_buff *skb;
2449 size_t len, bmap_len;
2450 u32 value;
2451 void *ptr;
2452
2453 if (module_enable) {
2454 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
2455 module_enable,
2456 WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
2457 } else {
2458 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
2459 WMI_TLV_DBGLOG_ALL_MODULES,
2460 WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
2461 }
2462
2463 bmap_len = 0;
2464 len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
2465 skb = ath10k_wmi_alloc_skb(ar, len);
2466 if (!skb)
2467 return ERR_PTR(-ENOMEM);
2468
2469 ptr = (void *)skb->data;
2470
2471 tlv = ptr;
2472 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
2473 tlv->len = __cpu_to_le16(sizeof(*cmd));
2474 cmd = (void *)tlv->value;
2475 cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
2476 cmd->value = __cpu_to_le32(value);
2477
2478 ptr += sizeof(*tlv);
2479 ptr += sizeof(*cmd);
2480
2481 tlv = ptr;
2482 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2483 tlv->len = __cpu_to_le16(bmap_len);
2484
2485 /* nothing to do here */
2486
2487 ptr += sizeof(*tlv);
2488 ptr += sizeof(bmap_len);
2489
2490 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
2491 return skb;
2492}
2493
2494static struct sk_buff *
2495ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
2496{
2497 struct wmi_tlv_pktlog_enable *cmd;
2498 struct wmi_tlv *tlv;
2499 struct sk_buff *skb;
2500 void *ptr;
2501 size_t len;
2502
2503 len = sizeof(*tlv) + sizeof(*cmd);
2504 skb = ath10k_wmi_alloc_skb(ar, len);
2505 if (!skb)
2506 return ERR_PTR(-ENOMEM);
2507
2508 ptr = (void *)skb->data;
2509 tlv = ptr;
2510 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
2511 tlv->len = __cpu_to_le16(sizeof(*cmd));
2512 cmd = (void *)tlv->value;
2513 cmd->filter = __cpu_to_le32(filter);
2514
2515 ptr += sizeof(*tlv);
2516 ptr += sizeof(*cmd);
2517
2518 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
2519 filter);
2520 return skb;
2521}
2522
2523static struct sk_buff *
2524ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
2525{
2526 struct wmi_tlv_pktlog_disable *cmd;
2527 struct wmi_tlv *tlv;
2528 struct sk_buff *skb;
2529 void *ptr;
2530 size_t len;
2531
2532 len = sizeof(*tlv) + sizeof(*cmd);
2533 skb = ath10k_wmi_alloc_skb(ar, len);
2534 if (!skb)
2535 return ERR_PTR(-ENOMEM);
2536
2537 ptr = (void *)skb->data;
2538 tlv = ptr;
2539 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
2540 tlv->len = __cpu_to_le16(sizeof(*cmd));
2541 cmd = (void *)tlv->value;
2542
2543 ptr += sizeof(*tlv);
2544 ptr += sizeof(*cmd);
2545
2546 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
2547 return skb;
2548}
2549
2550static struct sk_buff *
2551ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
2552 u32 tim_ie_offset, struct sk_buff *bcn,
2553 u32 prb_caps, u32 prb_erp, void *prb_ies,
2554 size_t prb_ies_len)
2555{
2556 struct wmi_tlv_bcn_tmpl_cmd *cmd;
2557 struct wmi_tlv_bcn_prb_info *info;
2558 struct wmi_tlv *tlv;
2559 struct sk_buff *skb;
2560 void *ptr;
2561 size_t len;
2562
2563 if (WARN_ON(prb_ies_len > 0 && !prb_ies))
2564 return ERR_PTR(-EINVAL);
2565
2566 len = sizeof(*tlv) + sizeof(*cmd) +
2567 sizeof(*tlv) + sizeof(*info) + prb_ies_len +
2568 sizeof(*tlv) + roundup(bcn->len, 4);
2569 skb = ath10k_wmi_alloc_skb(ar, len);
2570 if (!skb)
2571 return ERR_PTR(-ENOMEM);
2572
2573 ptr = (void *)skb->data;
2574 tlv = ptr;
2575 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
2576 tlv->len = __cpu_to_le16(sizeof(*cmd));
2577 cmd = (void *)tlv->value;
2578 cmd->vdev_id = __cpu_to_le32(vdev_id);
2579 cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
2580 cmd->buf_len = __cpu_to_le32(bcn->len);
2581
2582 ptr += sizeof(*tlv);
2583 ptr += sizeof(*cmd);
2584
2585 /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
2586 * then it is then impossible to pass original ie len.
2587 * This chunk is not used yet so if setting probe resp template yields
2588 * problems with beaconing or crashes firmware look here.
2589 */
2590 tlv = ptr;
2591 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
2592 tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
2593 info = (void *)tlv->value;
2594 info->caps = __cpu_to_le32(prb_caps);
2595 info->erp = __cpu_to_le32(prb_erp);
2596 memcpy(info->ies, prb_ies, prb_ies_len);
2597
2598 ptr += sizeof(*tlv);
2599 ptr += sizeof(*info);
2600 ptr += prb_ies_len;
2601
2602 tlv = ptr;
2603 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2604 tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
2605 memcpy(tlv->value, bcn->data, bcn->len);
2606
2607 /* FIXME: Adjust TSF? */
2608
2609 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
2610 vdev_id);
2611 return skb;
2612}
2613
2614static struct sk_buff *
2615ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
2616 struct sk_buff *prb)
2617{
2618 struct wmi_tlv_prb_tmpl_cmd *cmd;
2619 struct wmi_tlv_bcn_prb_info *info;
2620 struct wmi_tlv *tlv;
2621 struct sk_buff *skb;
2622 void *ptr;
2623 size_t len;
2624
2625 len = sizeof(*tlv) + sizeof(*cmd) +
2626 sizeof(*tlv) + sizeof(*info) +
2627 sizeof(*tlv) + roundup(prb->len, 4);
2628 skb = ath10k_wmi_alloc_skb(ar, len);
2629 if (!skb)
2630 return ERR_PTR(-ENOMEM);
2631
2632 ptr = (void *)skb->data;
2633 tlv = ptr;
2634 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
2635 tlv->len = __cpu_to_le16(sizeof(*cmd));
2636 cmd = (void *)tlv->value;
2637 cmd->vdev_id = __cpu_to_le32(vdev_id);
2638 cmd->buf_len = __cpu_to_le32(prb->len);
2639
2640 ptr += sizeof(*tlv);
2641 ptr += sizeof(*cmd);
2642
2643 tlv = ptr;
2644 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
2645 tlv->len = __cpu_to_le16(sizeof(*info));
2646 info = (void *)tlv->value;
2647 info->caps = 0;
2648 info->erp = 0;
2649
2650 ptr += sizeof(*tlv);
2651 ptr += sizeof(*info);
2652
2653 tlv = ptr;
2654 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2655 tlv->len = __cpu_to_le16(roundup(prb->len, 4));
2656 memcpy(tlv->value, prb->data, prb->len);
2657
2658 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
2659 vdev_id);
2660 return skb;
2661}
2662
2663static struct sk_buff *
2664ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
2665 const u8 *p2p_ie)
2666{
2667 struct wmi_tlv_p2p_go_bcn_ie *cmd;
2668 struct wmi_tlv *tlv;
2669 struct sk_buff *skb;
2670 void *ptr;
2671 size_t len;
2672
2673 len = sizeof(*tlv) + sizeof(*cmd) +
2674 sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
2675 skb = ath10k_wmi_alloc_skb(ar, len);
2676 if (!skb)
2677 return ERR_PTR(-ENOMEM);
2678
2679 ptr = (void *)skb->data;
2680 tlv = ptr;
2681 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
2682 tlv->len = __cpu_to_le16(sizeof(*cmd));
2683 cmd = (void *)tlv->value;
2684 cmd->vdev_id = __cpu_to_le32(vdev_id);
2685 cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
2686
2687 ptr += sizeof(*tlv);
2688 ptr += sizeof(*cmd);
2689
2690 tlv = ptr;
2691 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2692 tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
2693 memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
2694
2695 ptr += sizeof(*tlv);
2696 ptr += roundup(p2p_ie[1] + 2, 4);
2697
2698 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
2699 vdev_id);
2700 return skb;
2701}
2702
2703static struct sk_buff *
2704ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
2705 enum wmi_tdls_state state)
2706{
2707 struct wmi_tdls_set_state_cmd *cmd;
2708 struct wmi_tlv *tlv;
2709 struct sk_buff *skb;
2710 void *ptr;
2711 size_t len;
2712 /* Set to options from wmi_tlv_tdls_options,
2713 * for now none of them are enabled.
2714 */
2715 u32 options = 0;
2716
2717 len = sizeof(*tlv) + sizeof(*cmd);
2718 skb = ath10k_wmi_alloc_skb(ar, len);
2719 if (!skb)
2720 return ERR_PTR(-ENOMEM);
2721
2722 ptr = (void *)skb->data;
2723 tlv = ptr;
2724 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
2725 tlv->len = __cpu_to_le16(sizeof(*cmd));
2726
2727 cmd = (void *)tlv->value;
2728 cmd->vdev_id = __cpu_to_le32(vdev_id);
2729 cmd->state = __cpu_to_le32(state);
2730 cmd->notification_interval_ms = __cpu_to_le32(5000);
2731 cmd->tx_discovery_threshold = __cpu_to_le32(100);
2732 cmd->tx_teardown_threshold = __cpu_to_le32(5);
2733 cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
2734 cmd->rssi_delta = __cpu_to_le32(-20);
2735 cmd->tdls_options = __cpu_to_le32(options);
2736 cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
2737 cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
2738 cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
2739 cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
2740 cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
2741
2742 ptr += sizeof(*tlv);
2743 ptr += sizeof(*cmd);
2744
2745 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
2746 state, vdev_id);
2747 return skb;
2748}
2749
2750static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
2751{
2752 u32 peer_qos = 0;
2753
2754 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2755 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
2756 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2757 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
2758 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2759 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
2760 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2761 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
2762
2763 peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
2764
2765 return peer_qos;
2766}
2767
2768static struct sk_buff *
2769ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
2770 const struct wmi_tdls_peer_update_cmd_arg *arg,
2771 const struct wmi_tdls_peer_capab_arg *cap,
2772 const struct wmi_channel_arg *chan_arg)
2773{
2774 struct wmi_tdls_peer_update_cmd *cmd;
2775 struct wmi_tdls_peer_capab *peer_cap;
2776 struct wmi_channel *chan;
2777 struct wmi_tlv *tlv;
2778 struct sk_buff *skb;
2779 u32 peer_qos;
2780 void *ptr;
2781 int len;
2782 int i;
2783
2784 len = sizeof(*tlv) + sizeof(*cmd) +
2785 sizeof(*tlv) + sizeof(*peer_cap) +
2786 sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
2787
2788 skb = ath10k_wmi_alloc_skb(ar, len);
2789 if (!skb)
2790 return ERR_PTR(-ENOMEM);
2791
2792 ptr = (void *)skb->data;
2793 tlv = ptr;
2794 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
2795 tlv->len = __cpu_to_le16(sizeof(*cmd));
2796
2797 cmd = (void *)tlv->value;
2798 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2799 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
2800 cmd->peer_state = __cpu_to_le32(arg->peer_state);
2801
2802 ptr += sizeof(*tlv);
2803 ptr += sizeof(*cmd);
2804
2805 tlv = ptr;
2806 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
2807 tlv->len = __cpu_to_le16(sizeof(*peer_cap));
2808 peer_cap = (void *)tlv->value;
2809 peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
2810 cap->peer_max_sp);
2811 peer_cap->peer_qos = __cpu_to_le32(peer_qos);
2812 peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
2813 peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
2814 peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
2815 peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
2816 peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
2817 peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
2818
2819 for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
2820 peer_cap->peer_operclass[i] = cap->peer_operclass[i];
2821
2822 peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
2823 peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
2824 peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
2825
2826 ptr += sizeof(*tlv);
2827 ptr += sizeof(*peer_cap);
2828
2829 tlv = ptr;
2830 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2831 tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
2832
2833 ptr += sizeof(*tlv);
2834
2835 for (i = 0; i < cap->peer_chan_len; i++) {
2836 tlv = ptr;
2837 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2838 tlv->len = __cpu_to_le16(sizeof(*chan));
2839 chan = (void *)tlv->value;
2840 ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
2841
2842 ptr += sizeof(*tlv);
2843 ptr += sizeof(*chan);
2844 }
2845
2846 ath10k_dbg(ar, ATH10K_DBG_WMI,
2847 "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
2848 arg->vdev_id, arg->peer_state, cap->peer_chan_len);
2849 return skb;
2850}
2851
2852static struct sk_buff *
2853ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
2854{
2855 struct wmi_tlv_wow_enable_cmd *cmd;
2856 struct wmi_tlv *tlv;
2857 struct sk_buff *skb;
2858 size_t len;
2859
2860 len = sizeof(*tlv) + sizeof(*cmd);
2861 skb = ath10k_wmi_alloc_skb(ar, len);
2862 if (!skb)
2863 return ERR_PTR(-ENOMEM);
2864
2865 tlv = (struct wmi_tlv *)skb->data;
2866 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
2867 tlv->len = __cpu_to_le16(sizeof(*cmd));
2868 cmd = (void *)tlv->value;
2869
2870 cmd->enable = __cpu_to_le32(1);
2871
2872 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
2873 return skb;
2874}
2875
2876static struct sk_buff *
2877ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
2878 u32 vdev_id,
2879 enum wmi_wow_wakeup_event event,
2880 u32 enable)
2881{
2882 struct wmi_tlv_wow_add_del_event_cmd *cmd;
2883 struct wmi_tlv *tlv;
2884 struct sk_buff *skb;
2885 size_t len;
2886
2887 len = sizeof(*tlv) + sizeof(*cmd);
2888 skb = ath10k_wmi_alloc_skb(ar, len);
2889 if (!skb)
2890 return ERR_PTR(-ENOMEM);
2891
2892 tlv = (struct wmi_tlv *)skb->data;
2893 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
2894 tlv->len = __cpu_to_le16(sizeof(*cmd));
2895 cmd = (void *)tlv->value;
2896
2897 cmd->vdev_id = __cpu_to_le32(vdev_id);
2898 cmd->is_add = __cpu_to_le32(enable);
2899 cmd->event_bitmap = __cpu_to_le32(1 << event);
2900
2901 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
2902 wow_wakeup_event(event), enable, vdev_id);
2903 return skb;
2904}
2905
2906static struct sk_buff *
2907ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
2908{
2909 struct wmi_tlv_wow_host_wakeup_ind *cmd;
2910 struct wmi_tlv *tlv;
2911 struct sk_buff *skb;
2912 size_t len;
2913
2914 len = sizeof(*tlv) + sizeof(*cmd);
2915 skb = ath10k_wmi_alloc_skb(ar, len);
2916 if (!skb)
2917 return ERR_PTR(-ENOMEM);
2918
2919 tlv = (struct wmi_tlv *)skb->data;
2920 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
2921 tlv->len = __cpu_to_le16(sizeof(*cmd));
2922 cmd = (void *)tlv->value;
2923
2924 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
2925 return skb;
2926}
2927
2928static struct sk_buff *
2929ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
2930 u32 pattern_id, const u8 *pattern,
2931 const u8 *bitmask, int pattern_len,
2932 int pattern_offset)
2933{
2934 struct wmi_tlv_wow_add_pattern_cmd *cmd;
2935 struct wmi_tlv_wow_bitmap_pattern *bitmap;
2936 struct wmi_tlv *tlv;
2937 struct sk_buff *skb;
2938 void *ptr;
2939 size_t len;
2940
2941 len = sizeof(*tlv) + sizeof(*cmd) +
2942 sizeof(*tlv) + /* array struct */
2943 sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
2944 sizeof(*tlv) + /* empty ipv4 sync */
2945 sizeof(*tlv) + /* empty ipv6 sync */
2946 sizeof(*tlv) + /* empty magic */
2947 sizeof(*tlv) + /* empty info timeout */
2948 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
2949
2950 skb = ath10k_wmi_alloc_skb(ar, len);
2951 if (!skb)
2952 return ERR_PTR(-ENOMEM);
2953
2954 /* cmd */
2955 ptr = (void *)skb->data;
2956 tlv = ptr;
2957 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
2958 tlv->len = __cpu_to_le16(sizeof(*cmd));
2959 cmd = (void *)tlv->value;
2960
2961 cmd->vdev_id = __cpu_to_le32(vdev_id);
2962 cmd->pattern_id = __cpu_to_le32(pattern_id);
2963 cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
2964
2965 ptr += sizeof(*tlv);
2966 ptr += sizeof(*cmd);
2967
2968 /* bitmap */
2969 tlv = ptr;
2970 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2971 tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
2972
2973 ptr += sizeof(*tlv);
2974
2975 tlv = ptr;
2976 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
2977 tlv->len = __cpu_to_le16(sizeof(*bitmap));
2978 bitmap = (void *)tlv->value;
2979
2980 memcpy(bitmap->patternbuf, pattern, pattern_len);
2981 memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
2982 bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
2983 bitmap->pattern_len = __cpu_to_le32(pattern_len);
2984 bitmap->bitmask_len = __cpu_to_le32(pattern_len);
2985 bitmap->pattern_id = __cpu_to_le32(pattern_id);
2986
2987 ptr += sizeof(*tlv);
2988 ptr += sizeof(*bitmap);
2989
2990 /* ipv4 sync */
2991 tlv = ptr;
2992 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2993 tlv->len = __cpu_to_le16(0);
2994
2995 ptr += sizeof(*tlv);
2996
2997 /* ipv6 sync */
2998 tlv = ptr;
2999 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3000 tlv->len = __cpu_to_le16(0);
3001
3002 ptr += sizeof(*tlv);
3003
3004 /* magic */
3005 tlv = ptr;
3006 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3007 tlv->len = __cpu_to_le16(0);
3008
3009 ptr += sizeof(*tlv);
3010
3011 /* pattern info timeout */
3012 tlv = ptr;
3013 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3014 tlv->len = __cpu_to_le16(0);
3015
3016 ptr += sizeof(*tlv);
3017
3018 /* ratelimit interval */
3019 tlv = ptr;
3020 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3021 tlv->len = __cpu_to_le16(sizeof(u32));
3022
3023 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
3024 vdev_id, pattern_id, pattern_offset);
3025 return skb;
3026}
3027
3028static struct sk_buff *
3029ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
3030 u32 pattern_id)
3031{
3032 struct wmi_tlv_wow_del_pattern_cmd *cmd;
3033 struct wmi_tlv *tlv;
3034 struct sk_buff *skb;
3035 size_t len;
3036
3037 len = sizeof(*tlv) + sizeof(*cmd);
3038 skb = ath10k_wmi_alloc_skb(ar, len);
3039 if (!skb)
3040 return ERR_PTR(-ENOMEM);
3041
3042 tlv = (struct wmi_tlv *)skb->data;
3043 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
3044 tlv->len = __cpu_to_le16(sizeof(*cmd));
3045 cmd = (void *)tlv->value;
3046
3047 cmd->vdev_id = __cpu_to_le32(vdev_id);
3048 cmd->pattern_id = __cpu_to_le32(pattern_id);
3049 cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3050
3051 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
3052 vdev_id, pattern_id);
3053 return skb;
3054}
3055
3056static struct sk_buff *
3057ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
3058{
3059 struct wmi_tlv_adaptive_qcs *cmd;
3060 struct wmi_tlv *tlv;
3061 struct sk_buff *skb;
3062 void *ptr;
3063 size_t len;
3064
3065 len = sizeof(*tlv) + sizeof(*cmd);
3066 skb = ath10k_wmi_alloc_skb(ar, len);
3067 if (!skb)
3068 return ERR_PTR(-ENOMEM);
3069
3070 ptr = (void *)skb->data;
3071 tlv = ptr;
3072 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
3073 tlv->len = __cpu_to_le16(sizeof(*cmd));
3074 cmd = (void *)tlv->value;
3075 cmd->enable = __cpu_to_le32(enable ? 1 : 0);
3076
3077 ptr += sizeof(*tlv);
3078 ptr += sizeof(*cmd);
3079
3080 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
3081 return skb;
3082}
3083
3084/****************/
3085/* TLV mappings */
3086/****************/
3087
3088static struct wmi_cmd_map wmi_tlv_cmd_map = {
3089 .init_cmdid = WMI_TLV_INIT_CMDID,
3090 .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
3091 .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
3092 .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
3093 .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
3094 .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
3095 .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
3096 .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
3097 .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
3098 .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
3099 .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
3100 .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
3101 .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
3102 .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
3103 .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
3104 .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
3105 .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
3106 .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
3107 .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
3108 .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
3109 .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
3110 .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
3111 .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
3112 .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
3113 .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
3114 .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
3115 .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
3116 .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
3117 .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
3118 .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
3119 .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
3120 .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
3121 .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
3122 .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
3123 .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
3124 .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
3125 .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
3126 .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
3127 .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
3128 .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
3129 .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
3130 .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
3131 .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
3132 .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
3133 .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
3134 .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
3135 .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
3136 .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
3137 .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
3138 .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
3139 .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
3140 .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
3141 .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
3142 .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
3143 .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
3144 .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
3145 .roam_scan_rssi_change_threshold =
3146 WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
3147 .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
3148 .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
3149 .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
3150 .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
3151 .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
3152 .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
3153 .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
3154 .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
3155 .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
3156 .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
3157 .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
3158 .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
3159 .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
3160 .wlan_profile_set_hist_intvl_cmdid =
3161 WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
3162 .wlan_profile_get_profile_data_cmdid =
3163 WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
3164 .wlan_profile_enable_profile_id_cmdid =
3165 WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
3166 .wlan_profile_list_profile_id_cmdid =
3167 WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
3168 .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
3169 .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
3170 .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
3171 .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
3172 .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
3173 .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
3174 .wow_enable_disable_wake_event_cmdid =
3175 WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
3176 .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
3177 .wow_hostwakeup_from_sleep_cmdid =
3178 WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
3179 .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
3180 .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
3181 .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
3182 .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
3183 .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
3184 .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
3185 .network_list_offload_config_cmdid =
3186 WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
3187 .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
3188 .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
3189 .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
3190 .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
3191 .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
3192 .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
3193 .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
3194 .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
3195 .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
3196 .echo_cmdid = WMI_TLV_ECHO_CMDID,
3197 .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
3198 .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
3199 .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
3200 .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
3201 .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
3202 .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
3203 .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
3204 .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
3205 .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
3206 .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
3207 .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
3208 .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
3209 .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
3210 .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
3211 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
3212 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
3213 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
3214 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
3215 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
3216 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
3217 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
3218 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
3219 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
3220 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
3221 .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
3222 .nan_cmdid = WMI_CMD_UNSUPPORTED,
3223 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
3224 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
3225 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
3226 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
3227 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
3228 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
3229 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
3230 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
3231 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
3232 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
3233 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
3234 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
3235 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
3236 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
3237 .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
3238 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
3239 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
3240 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
3241 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
3242 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
3243};
3244
3245static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
3246 .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
3247 .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
3248 .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
3249 .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
3250 .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
3251 .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
3252 .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
3253 .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
3254 .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
3255 .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
3256 .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
3257 .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
3258 .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
3259 .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
3260 .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
3261 .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
3262 .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
3263 .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
3264 .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
3265 .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
3266 .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
3267 .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
3268 .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
3269 .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
3270 .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
3271 .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
3272 .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
3273 .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
3274 .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
3275 .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
3276 .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
3277 .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
3278 .bcnflt_stats_update_period =
3279 WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
3280 .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
3281 .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
3282 .dcs = WMI_TLV_PDEV_PARAM_DCS,
3283 .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
3284 .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
3285 .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
3286 .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
3287 .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
3288 .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
3289 .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
3290 .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
3291 .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
3292 .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
3293 .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
3294 .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
3295 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
3296 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
3297 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
3298 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
3299 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
3300 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
3301 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
3302 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
3303 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
3304 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
3305 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
3306 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
3307 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
3308 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
3309 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
3310 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
3311 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3312 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3313 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3314 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3315 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3316 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3317 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
3318 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
3319 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
3320 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
3321 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
3322 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
3323 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
3324 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
3325 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
3326 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
3327 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
3328 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
3329 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
3330 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
3331 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
3332 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
3333 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
3334 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
3335 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
3336 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
3337 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
3338};
3339
3340static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
3341 .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
3342 .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
3343 .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
3344 .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
3345 .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
3346 .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
3347 .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
3348 .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
3349 .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
3350 .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
3351 .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
3352 .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
3353 .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
3354 .wmi_vdev_oc_scheduler_air_time_limit =
3355 WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
3356 .wds = WMI_TLV_VDEV_PARAM_WDS,
3357 .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
3358 .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
3359 .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
3360 .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
3361 .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
3362 .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
3363 .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
3364 .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
3365 .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
3366 .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
3367 .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
3368 .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
3369 .sgi = WMI_TLV_VDEV_PARAM_SGI,
3370 .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
3371 .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
3372 .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
3373 .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
3374 .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
3375 .nss = WMI_TLV_VDEV_PARAM_NSS,
3376 .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
3377 .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
3378 .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
3379 .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
3380 .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
3381 .ap_keepalive_min_idle_inactive_time_secs =
3382 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
3383 .ap_keepalive_max_idle_inactive_time_secs =
3384 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
3385 .ap_keepalive_max_unresponsive_time_secs =
3386 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
3387 .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
3388 .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
3389 .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
3390 .txbf = WMI_TLV_VDEV_PARAM_TXBF,
3391 .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
3392 .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
3393 .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
3394 .ap_detect_out_of_sync_sleeping_sta_time_secs =
3395 WMI_TLV_VDEV_PARAM_UNSUPPORTED,
3396 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
3397 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
3398 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
3399 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
3400 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
3401 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
3402 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
3403 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
3404 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
3405 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
3406 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
3407 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
3408 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
3409 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
3410 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
3411 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
3412};
3413
3414static const struct wmi_ops wmi_tlv_ops = {
3415 .rx = ath10k_wmi_tlv_op_rx,
3416 .map_svc = wmi_tlv_svc_map,
3417
3418 .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
3419 .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
3420 .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
3421 .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
3422 .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
3423 .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
3424 .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
3425 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
3426 .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
3427 .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
3428 .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
3429 .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
3430 .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
3431 .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
3432
3433 .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
3434 .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
3435 .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
3436 .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
3437 .gen_init = ath10k_wmi_tlv_op_gen_init,
3438 .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
3439 .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
3440 .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
3441 .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
3442 .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
3443 .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
3444 .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
3445 .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
3446 .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
3447 .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
3448 .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
3449 .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
3450 .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
3451 .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
3452 .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
3453 .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
3454 .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
3455 .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
3456 .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
3457 .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
3458 .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
3459 .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
3460 .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
3461 .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
3462 /* .gen_mgmt_tx = not implemented; HTT is used */
3463 .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
3464 .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
3465 .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
3466 /* .gen_pdev_set_quiet_mode not implemented */
3467 /* .gen_pdev_get_temperature not implemented */
3468 /* .gen_addba_clear_resp not implemented */
3469 /* .gen_addba_send not implemented */
3470 /* .gen_addba_set_resp not implemented */
3471 /* .gen_delba_send not implemented */
3472 .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
3473 .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
3474 .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
3475 .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
3476 .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
3477 .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
3478 .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
3479 .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
3480 .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
3481 .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
3482 .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
3483 .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
3484 .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
3485 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
3486 .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
3487};
3488
3489static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
3490 .auth = WMI_TLV_PEER_AUTH,
3491 .qos = WMI_TLV_PEER_QOS,
3492 .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
3493 .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
3494 .apsd = WMI_TLV_PEER_APSD,
3495 .ht = WMI_TLV_PEER_HT,
3496 .bw40 = WMI_TLV_PEER_40MHZ,
3497 .stbc = WMI_TLV_PEER_STBC,
3498 .ldbc = WMI_TLV_PEER_LDPC,
3499 .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
3500 .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
3501 .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
3502 .vht = WMI_TLV_PEER_VHT,
3503 .bw80 = WMI_TLV_PEER_80MHZ,
3504 .pmf = WMI_TLV_PEER_PMF,
3505};
3506
3507/************/
3508/* TLV init */
3509/************/
3510
3511void ath10k_wmi_tlv_attach(struct ath10k *ar)
3512{
3513 ar->wmi.cmd = &wmi_tlv_cmd_map;
3514 ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
3515 ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
3516 ar->wmi.ops = &wmi_tlv_ops;
3517 ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
3518}