Loading...
Note: File does not exist in v5.14.15.
1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6#include <linux/skbuff.h>
7#include <linux/ctype.h>
8#include <net/mac80211.h>
9#include <net/cfg80211.h>
10#include <linux/completion.h>
11#include <linux/if_ether.h>
12#include <linux/types.h>
13#include <linux/pci.h>
14#include <linux/uuid.h>
15#include <linux/time.h>
16#include <linux/of.h>
17#include "core.h"
18#include "debug.h"
19#include "mac.h"
20#include "hw.h"
21#include "peer.h"
22
23struct ath12k_wmi_svc_ready_parse {
24 bool wmi_svc_bitmap_done;
25};
26
27struct ath12k_wmi_dma_ring_caps_parse {
28 struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
29 u32 n_dma_ring_caps;
30};
31
32struct ath12k_wmi_service_ext_arg {
33 u32 default_conc_scan_config_bits;
34 u32 default_fw_config_bits;
35 struct ath12k_wmi_ppe_threshold_arg ppet;
36 u32 he_cap_info;
37 u32 mpdu_density;
38 u32 max_bssid_rx_filters;
39 u32 num_hw_modes;
40 u32 num_phy;
41};
42
43struct ath12k_wmi_svc_rdy_ext_parse {
44 struct ath12k_wmi_service_ext_arg arg;
45 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
46 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
47 u32 n_hw_mode_caps;
48 u32 tot_phy_id;
49 struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
50 struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
51 u32 n_mac_phy_caps;
52 const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
53 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
54 u32 n_ext_hal_reg_caps;
55 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
56 bool hw_mode_done;
57 bool mac_phy_done;
58 bool ext_hal_reg_done;
59 bool mac_phy_chainmask_combo_done;
60 bool mac_phy_chainmask_cap_done;
61 bool oem_dma_ring_cap_done;
62 bool dma_ring_cap_done;
63};
64
65struct ath12k_wmi_svc_rdy_ext2_arg {
66 u32 reg_db_version;
67 u32 hw_min_max_tx_power_2ghz;
68 u32 hw_min_max_tx_power_5ghz;
69 u32 chwidth_num_peer_caps;
70 u32 preamble_puncture_bw;
71 u32 max_user_per_ppdu_ofdma;
72 u32 max_user_per_ppdu_mumimo;
73 u32 target_cap_flags;
74 u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
75 u32 max_num_linkview_peers;
76 u32 max_num_msduq_supported_per_tid;
77 u32 default_num_msduq_supported_per_tid;
78};
79
80struct ath12k_wmi_svc_rdy_ext2_parse {
81 struct ath12k_wmi_svc_rdy_ext2_arg arg;
82 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
83 bool dma_ring_cap_done;
84 bool spectral_bin_scaling_done;
85 bool mac_phy_caps_ext_done;
86};
87
88struct ath12k_wmi_rdy_parse {
89 u32 num_extra_mac_addr;
90};
91
92struct ath12k_wmi_dma_buf_release_arg {
93 struct ath12k_wmi_dma_buf_release_fixed_params fixed;
94 const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
95 const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
96 u32 num_buf_entry;
97 u32 num_meta;
98 bool buf_entry_done;
99 bool meta_data_done;
100};
101
102struct ath12k_wmi_tlv_policy {
103 size_t min_len;
104};
105
106struct wmi_tlv_mgmt_rx_parse {
107 const struct ath12k_wmi_mgmt_rx_params *fixed;
108 const u8 *frame_buf;
109 bool frame_buf_done;
110};
111
112static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
113 [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
114 [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
115 [WMI_TAG_SERVICE_READY_EVENT] = {
116 .min_len = sizeof(struct wmi_service_ready_event) },
117 [WMI_TAG_SERVICE_READY_EXT_EVENT] = {
118 .min_len = sizeof(struct wmi_service_ready_ext_event) },
119 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
120 .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
121 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
122 .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
123 [WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
124 .min_len = sizeof(struct wmi_vdev_start_resp_event) },
125 [WMI_TAG_PEER_DELETE_RESP_EVENT] = {
126 .min_len = sizeof(struct wmi_peer_delete_resp_event) },
127 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
128 .min_len = sizeof(struct wmi_bcn_tx_status_event) },
129 [WMI_TAG_VDEV_STOPPED_EVENT] = {
130 .min_len = sizeof(struct wmi_vdev_stopped_event) },
131 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
132 .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
133 [WMI_TAG_MGMT_RX_HDR] = {
134 .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
135 [WMI_TAG_MGMT_TX_COMPL_EVENT] = {
136 .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
137 [WMI_TAG_SCAN_EVENT] = {
138 .min_len = sizeof(struct wmi_scan_event) },
139 [WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
140 .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
141 [WMI_TAG_ROAM_EVENT] = {
142 .min_len = sizeof(struct wmi_roam_event) },
143 [WMI_TAG_CHAN_INFO_EVENT] = {
144 .min_len = sizeof(struct wmi_chan_info_event) },
145 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
146 .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
147 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
148 .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
149 [WMI_TAG_READY_EVENT] = {
150 .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
151 [WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
152 .min_len = sizeof(struct wmi_service_available_event) },
153 [WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
154 .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
155 [WMI_TAG_RFKILL_EVENT] = {
156 .min_len = sizeof(struct wmi_rfkill_state_change_event) },
157 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
158 .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
159 [WMI_TAG_HOST_SWFDA_EVENT] = {
160 .min_len = sizeof(struct wmi_fils_discovery_event) },
161 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
162 .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
163 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
164 .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
165};
166
167static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
168{
169 return le32_encode_bits(cmd, WMI_TLV_TAG) |
170 le32_encode_bits(len, WMI_TLV_LEN);
171}
172
173static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
174{
175 return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
176}
177
178void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
179 struct ath12k_wmi_resource_config_arg *config)
180{
181 config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
182
183 if (ab->num_radios == 2) {
184 config->num_peers = TARGET_NUM_PEERS(DBS);
185 config->num_tids = TARGET_NUM_TIDS(DBS);
186 } else if (ab->num_radios == 3) {
187 config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
188 config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
189 } else {
190 /* Control should not reach here */
191 config->num_peers = TARGET_NUM_PEERS(SINGLE);
192 config->num_tids = TARGET_NUM_TIDS(SINGLE);
193 }
194 config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
195 config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
196 config->num_peer_keys = TARGET_NUM_PEER_KEYS;
197 config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
198 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
199 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
200 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
201 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
202 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
203 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
204
205 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
206 config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
207 else
208 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
209
210 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
211 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
212 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
213 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
214 config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
215 config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
216 config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
217 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
218 config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
219 config->dma_burst_size = TARGET_DMA_BURST_SIZE;
220 config->rx_skip_defrag_timeout_dup_detection_check =
221 TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
222 config->vow_config = TARGET_VOW_CONFIG;
223 config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
224 config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
225 config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
226 config->rx_batchmode = TARGET_RX_BATCHMODE;
227 /* Indicates host supports peer map v3 and unmap v2 support */
228 config->peer_map_unmap_version = 0x32;
229 config->twt_ap_pdev_count = ab->num_radios;
230 config->twt_ap_sta_count = 1000;
231}
232
233void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
234 struct ath12k_wmi_resource_config_arg *config)
235{
236 config->num_vdevs = 4;
237 config->num_peers = 16;
238 config->num_tids = 32;
239
240 config->num_offload_peers = 3;
241 config->num_offload_reorder_buffs = 3;
242 config->num_peer_keys = TARGET_NUM_PEER_KEYS;
243 config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
244 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
245 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
246 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
247 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
248 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
249 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
250 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
251 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
252 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
253 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
254 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
255 config->num_mcast_groups = 0;
256 config->num_mcast_table_elems = 0;
257 config->mcast2ucast_mode = 0;
258 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
259 config->num_wds_entries = 0;
260 config->dma_burst_size = 0;
261 config->rx_skip_defrag_timeout_dup_detection_check = 0;
262 config->vow_config = TARGET_VOW_CONFIG;
263 config->gtk_offload_max_vdev = 2;
264 config->num_msdu_desc = 0x400;
265 config->beacon_tx_offload_max_vdev = 2;
266 config->rx_batchmode = TARGET_RX_BATCHMODE;
267
268 config->peer_map_unmap_version = 0x1;
269 config->use_pdev_id = 1;
270 config->max_frag_entries = 0xa;
271 config->num_tdls_vdevs = 0x1;
272 config->num_tdls_conn_table_entries = 8;
273 config->beacon_tx_offload_max_vdev = 0x2;
274 config->num_multicast_filter_entries = 0x20;
275 config->num_wow_filters = 0x16;
276 config->num_keep_alive_pattern = 0;
277}
278
279#define PRIMAP(_hw_mode_) \
280 [_hw_mode_] = _hw_mode_##_PRI
281
282static const int ath12k_hw_mode_pri_map[] = {
283 PRIMAP(WMI_HOST_HW_MODE_SINGLE),
284 PRIMAP(WMI_HOST_HW_MODE_DBS),
285 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
286 PRIMAP(WMI_HOST_HW_MODE_SBS),
287 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
288 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
289 /* keep last */
290 PRIMAP(WMI_HOST_HW_MODE_MAX),
291};
292
293static int
294ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
295 int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
296 const void *ptr, void *data),
297 void *data)
298{
299 const void *begin = ptr;
300 const struct wmi_tlv *tlv;
301 u16 tlv_tag, tlv_len;
302 int ret;
303
304 while (len > 0) {
305 if (len < sizeof(*tlv)) {
306 ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
307 ptr - begin, len, sizeof(*tlv));
308 return -EINVAL;
309 }
310
311 tlv = ptr;
312 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
313 tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
314 ptr += sizeof(*tlv);
315 len -= sizeof(*tlv);
316
317 if (tlv_len > len) {
318 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
319 tlv_tag, ptr - begin, len, tlv_len);
320 return -EINVAL;
321 }
322
323 if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
324 ath12k_wmi_tlv_policies[tlv_tag].min_len &&
325 ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
326 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
327 tlv_tag, ptr - begin, tlv_len,
328 ath12k_wmi_tlv_policies[tlv_tag].min_len);
329 return -EINVAL;
330 }
331
332 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
333 if (ret)
334 return ret;
335
336 ptr += tlv_len;
337 len -= tlv_len;
338 }
339
340 return 0;
341}
342
343static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
344 const void *ptr, void *data)
345{
346 const void **tb = data;
347
348 if (tag < WMI_TAG_MAX)
349 tb[tag] = ptr;
350
351 return 0;
352}
353
354static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
355 const void *ptr, size_t len)
356{
357 return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
358 (void *)tb);
359}
360
361static const void **
362ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
363 size_t len, gfp_t gfp)
364{
365 const void **tb;
366 int ret;
367
368 tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
369 if (!tb)
370 return ERR_PTR(-ENOMEM);
371
372 ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
373 if (ret) {
374 kfree(tb);
375 return ERR_PTR(ret);
376 }
377
378 return tb;
379}
380
381static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
382 u32 cmd_id)
383{
384 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
385 struct ath12k_base *ab = wmi->wmi_ab->ab;
386 struct wmi_cmd_hdr *cmd_hdr;
387 int ret;
388
389 if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
390 return -ENOMEM;
391
392 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
393 cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
394
395 memset(skb_cb, 0, sizeof(*skb_cb));
396 ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
397
398 if (ret)
399 goto err_pull;
400
401 return 0;
402
403err_pull:
404 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
405 return ret;
406}
407
408int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
409 u32 cmd_id)
410{
411 struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
412 int ret = -EOPNOTSUPP;
413
414 might_sleep();
415
416 wait_event_timeout(wmi_ab->tx_credits_wq, ({
417 ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
418
419 if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
420 ret = -ESHUTDOWN;
421
422 (ret != -EAGAIN);
423 }), WMI_SEND_TIMEOUT_HZ);
424
425 if (ret == -EAGAIN)
426 ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
427
428 return ret;
429}
430
431static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
432 const void *ptr,
433 struct ath12k_wmi_service_ext_arg *arg)
434{
435 const struct wmi_service_ready_ext_event *ev = ptr;
436 int i;
437
438 if (!ev)
439 return -EINVAL;
440
441 /* Move this to host based bitmap */
442 arg->default_conc_scan_config_bits =
443 le32_to_cpu(ev->default_conc_scan_config_bits);
444 arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
445 arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
446 arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
447 arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
448 arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
449 arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
450
451 for (i = 0; i < WMI_MAX_NUM_SS; i++)
452 arg->ppet.ppet16_ppet8_ru3_ru0[i] =
453 le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
454
455 return 0;
456}
457
458static int
459ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
460 struct ath12k_wmi_svc_rdy_ext_parse *svc,
461 u8 hw_mode_id, u8 phy_id,
462 struct ath12k_pdev *pdev)
463{
464 const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
465 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
466 const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
467 const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
468 struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
469 struct ath12k_band_cap *cap_band;
470 struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
471 struct ath12k_fw_pdev *fw_pdev;
472 u32 phy_map;
473 u32 hw_idx, phy_idx = 0;
474 int i;
475
476 if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
477 return -EINVAL;
478
479 for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
480 if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
481 break;
482
483 phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
484 phy_idx = fls(phy_map);
485 }
486
487 if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
488 return -EINVAL;
489
490 phy_idx += phy_id;
491 if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
492 return -EINVAL;
493
494 mac_caps = wmi_mac_phy_caps + phy_idx;
495
496 pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
497 pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
498 pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
499
500 fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
501 fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
502 fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
503 fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
504 ab->fw_pdev_count++;
505
506 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
507 * band to band for a single radio, need to see how this should be
508 * handled.
509 */
510 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
511 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
512 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
513 } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
514 pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
515 pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
516 pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
517 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
518 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
519 } else {
520 return -EINVAL;
521 }
522
523 /* tx/rx chainmask reported from fw depends on the actual hw chains used,
524 * For example, for 4x4 capable macphys, first 4 chains can be used for first
525 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
526 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
527 * will be advertised for second mac or vice-versa. Compute the shift value
528 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
529 * mac80211.
530 */
531 pdev_cap->tx_chain_mask_shift =
532 find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
533 pdev_cap->rx_chain_mask_shift =
534 find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
535
536 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
537 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
538 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
539 cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
540 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
541 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
542 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
543 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
544 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
545 cap_band->he_cap_phy_info[i] =
546 le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
547
548 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
549 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
550
551 for (i = 0; i < WMI_MAX_NUM_SS; i++)
552 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
553 le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
554 }
555
556 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
557 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
558 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
559 cap_band->max_bw_supported =
560 le32_to_cpu(mac_caps->max_bw_supported_5g);
561 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
562 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
563 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
564 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
565 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
566 cap_band->he_cap_phy_info[i] =
567 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
568
569 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
570 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
571
572 for (i = 0; i < WMI_MAX_NUM_SS; i++)
573 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
574 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
575
576 cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
577 cap_band->max_bw_supported =
578 le32_to_cpu(mac_caps->max_bw_supported_5g);
579 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
580 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
581 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
582 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
583 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
584 cap_band->he_cap_phy_info[i] =
585 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
586
587 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
588 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
589
590 for (i = 0; i < WMI_MAX_NUM_SS; i++)
591 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
592 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
593 }
594
595 return 0;
596}
597
598static int
599ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
600 const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
601 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
602 u8 phy_idx,
603 struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
604{
605 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
606
607 if (!reg_caps || !ext_caps)
608 return -EINVAL;
609
610 if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
611 return -EINVAL;
612
613 ext_reg_cap = &ext_caps[phy_idx];
614
615 param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
616 param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
617 param->eeprom_reg_domain_ext =
618 le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
619 param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
620 param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
621 /* check if param->wireless_mode is needed */
622 param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
623 param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
624 param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
625 param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
626
627 return 0;
628}
629
630static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
631 const void *evt_buf,
632 struct ath12k_wmi_target_cap_arg *cap)
633{
634 const struct wmi_service_ready_event *ev = evt_buf;
635
636 if (!ev) {
637 ath12k_err(ab, "%s: failed by NULL param\n",
638 __func__);
639 return -EINVAL;
640 }
641
642 cap->phy_capability = le32_to_cpu(ev->phy_capability);
643 cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
644 cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
645 cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
646 cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
647 cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
648 cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
649 cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
650 cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
651 cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
652 cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
653 cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
654 cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
655 cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
656 cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
657 cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
658 cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
659
660 return 0;
661}
662
663/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
664 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
665 * 4-byte word.
666 */
667static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
668 const u32 *wmi_svc_bm)
669{
670 int i, j;
671
672 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
673 do {
674 if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
675 set_bit(j, wmi->wmi_ab->svc_map);
676 } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
677 }
678}
679
680static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
681 const void *ptr, void *data)
682{
683 struct ath12k_wmi_svc_ready_parse *svc_ready = data;
684 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
685 u16 expect_len;
686
687 switch (tag) {
688 case WMI_TAG_SERVICE_READY_EVENT:
689 if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
690 return -EINVAL;
691 break;
692
693 case WMI_TAG_ARRAY_UINT32:
694 if (!svc_ready->wmi_svc_bitmap_done) {
695 expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
696 if (len < expect_len) {
697 ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
698 len, tag);
699 return -EINVAL;
700 }
701
702 ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
703
704 svc_ready->wmi_svc_bitmap_done = true;
705 }
706 break;
707 default:
708 break;
709 }
710
711 return 0;
712}
713
714static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
715{
716 struct ath12k_wmi_svc_ready_parse svc_ready = { };
717 int ret;
718
719 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
720 ath12k_wmi_svc_rdy_parse,
721 &svc_ready);
722 if (ret) {
723 ath12k_warn(ab, "failed to parse tlv %d\n", ret);
724 return ret;
725 }
726
727 return 0;
728}
729
730struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
731{
732 struct sk_buff *skb;
733 struct ath12k_base *ab = wmi_ab->ab;
734 u32 round_len = roundup(len, 4);
735
736 skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
737 if (!skb)
738 return NULL;
739
740 skb_reserve(skb, WMI_SKB_HEADROOM);
741 if (!IS_ALIGNED((unsigned long)skb->data, 4))
742 ath12k_warn(ab, "unaligned WMI skb data\n");
743
744 skb_put(skb, round_len);
745 memset(skb->data, 0, round_len);
746
747 return skb;
748}
749
750int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
751 struct sk_buff *frame)
752{
753 struct ath12k_wmi_pdev *wmi = ar->wmi;
754 struct wmi_mgmt_send_cmd *cmd;
755 struct wmi_tlv *frame_tlv;
756 struct sk_buff *skb;
757 u32 buf_len;
758 int ret, len;
759
760 buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
761
762 len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
763
764 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
765 if (!skb)
766 return -ENOMEM;
767
768 cmd = (struct wmi_mgmt_send_cmd *)skb->data;
769 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
770 sizeof(*cmd));
771 cmd->vdev_id = cpu_to_le32(vdev_id);
772 cmd->desc_id = cpu_to_le32(buf_id);
773 cmd->chanfreq = 0;
774 cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
775 cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
776 cmd->frame_len = cpu_to_le32(frame->len);
777 cmd->buf_len = cpu_to_le32(buf_len);
778 cmd->tx_params_valid = 0;
779
780 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
781 frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
782
783 memcpy(frame_tlv->value, frame->data, buf_len);
784
785 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
786 if (ret) {
787 ath12k_warn(ar->ab,
788 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
789 dev_kfree_skb(skb);
790 }
791
792 return ret;
793}
794
795int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
796 struct ath12k_wmi_vdev_create_arg *args)
797{
798 struct ath12k_wmi_pdev *wmi = ar->wmi;
799 struct wmi_vdev_create_cmd *cmd;
800 struct sk_buff *skb;
801 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
802 struct wmi_tlv *tlv;
803 int ret, len;
804 void *ptr;
805
806 /* It can be optimized my sending tx/rx chain configuration
807 * only for supported bands instead of always sending it for
808 * both the bands.
809 */
810 len = sizeof(*cmd) + TLV_HDR_SIZE +
811 (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
812
813 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
814 if (!skb)
815 return -ENOMEM;
816
817 cmd = (struct wmi_vdev_create_cmd *)skb->data;
818 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
819 sizeof(*cmd));
820
821 cmd->vdev_id = cpu_to_le32(args->if_id);
822 cmd->vdev_type = cpu_to_le32(args->type);
823 cmd->vdev_subtype = cpu_to_le32(args->subtype);
824 cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
825 cmd->pdev_id = cpu_to_le32(args->pdev_id);
826 cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
827 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
828
829 ptr = skb->data + sizeof(*cmd);
830 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
831
832 tlv = ptr;
833 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
834
835 ptr += TLV_HDR_SIZE;
836 txrx_streams = ptr;
837 len = sizeof(*txrx_streams);
838 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
839 len);
840 txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
841 txrx_streams->supported_tx_streams =
842 args->chains[NL80211_BAND_2GHZ].tx;
843 txrx_streams->supported_rx_streams =
844 args->chains[NL80211_BAND_2GHZ].rx;
845
846 txrx_streams++;
847 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
848 len);
849 txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
850 txrx_streams->supported_tx_streams =
851 args->chains[NL80211_BAND_5GHZ].tx;
852 txrx_streams->supported_rx_streams =
853 args->chains[NL80211_BAND_5GHZ].rx;
854
855 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
856 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
857 args->if_id, args->type, args->subtype,
858 macaddr, args->pdev_id);
859
860 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
861 if (ret) {
862 ath12k_warn(ar->ab,
863 "failed to submit WMI_VDEV_CREATE_CMDID\n");
864 dev_kfree_skb(skb);
865 }
866
867 return ret;
868}
869
870int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
871{
872 struct ath12k_wmi_pdev *wmi = ar->wmi;
873 struct wmi_vdev_delete_cmd *cmd;
874 struct sk_buff *skb;
875 int ret;
876
877 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
878 if (!skb)
879 return -ENOMEM;
880
881 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
882 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
883 sizeof(*cmd));
884 cmd->vdev_id = cpu_to_le32(vdev_id);
885
886 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
887
888 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
889 if (ret) {
890 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
891 dev_kfree_skb(skb);
892 }
893
894 return ret;
895}
896
897int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
898{
899 struct ath12k_wmi_pdev *wmi = ar->wmi;
900 struct wmi_vdev_stop_cmd *cmd;
901 struct sk_buff *skb;
902 int ret;
903
904 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
905 if (!skb)
906 return -ENOMEM;
907
908 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
909
910 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
911 sizeof(*cmd));
912 cmd->vdev_id = cpu_to_le32(vdev_id);
913
914 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
915
916 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
917 if (ret) {
918 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
919 dev_kfree_skb(skb);
920 }
921
922 return ret;
923}
924
925int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
926{
927 struct ath12k_wmi_pdev *wmi = ar->wmi;
928 struct wmi_vdev_down_cmd *cmd;
929 struct sk_buff *skb;
930 int ret;
931
932 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
933 if (!skb)
934 return -ENOMEM;
935
936 cmd = (struct wmi_vdev_down_cmd *)skb->data;
937
938 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
939 sizeof(*cmd));
940 cmd->vdev_id = cpu_to_le32(vdev_id);
941
942 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
943
944 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
945 if (ret) {
946 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
947 dev_kfree_skb(skb);
948 }
949
950 return ret;
951}
952
953static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
954 struct wmi_vdev_start_req_arg *arg)
955{
956 memset(chan, 0, sizeof(*chan));
957
958 chan->mhz = cpu_to_le32(arg->freq);
959 chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
960 if (arg->mode == MODE_11AC_VHT80_80)
961 chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
962 else
963 chan->band_center_freq2 = 0;
964
965 chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
966 if (arg->passive)
967 chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
968 if (arg->allow_ibss)
969 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
970 if (arg->allow_ht)
971 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
972 if (arg->allow_vht)
973 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
974 if (arg->allow_he)
975 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
976 if (arg->ht40plus)
977 chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
978 if (arg->chan_radar)
979 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
980 if (arg->freq2_radar)
981 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
982
983 chan->reg_info_1 = le32_encode_bits(arg->max_power,
984 WMI_CHAN_REG_INFO1_MAX_PWR) |
985 le32_encode_bits(arg->max_reg_power,
986 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
987
988 chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
989 WMI_CHAN_REG_INFO2_ANT_MAX) |
990 le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
991}
992
993int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
994 bool restart)
995{
996 struct ath12k_wmi_pdev *wmi = ar->wmi;
997 struct wmi_vdev_start_request_cmd *cmd;
998 struct sk_buff *skb;
999 struct ath12k_wmi_channel_params *chan;
1000 struct wmi_tlv *tlv;
1001 void *ptr;
1002 int ret, len;
1003
1004 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1005 return -EINVAL;
1006
1007 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1008
1009 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1010 if (!skb)
1011 return -ENOMEM;
1012
1013 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1014 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1015 sizeof(*cmd));
1016 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1017 cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1018 cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1019 cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1020 cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1021 cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1022 cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1023 cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1024 cmd->regdomain = cpu_to_le32(arg->regdomain);
1025 cmd->he_ops = cpu_to_le32(arg->he_ops);
1026 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1027
1028 if (!restart) {
1029 if (arg->ssid) {
1030 cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1031 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1032 }
1033 if (arg->hidden_ssid)
1034 cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1035 if (arg->pmf_enabled)
1036 cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1037 }
1038
1039 cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1040
1041 ptr = skb->data + sizeof(*cmd);
1042 chan = ptr;
1043
1044 ath12k_wmi_put_wmi_channel(chan, arg);
1045
1046 chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1047 sizeof(*chan));
1048 ptr += sizeof(*chan);
1049
1050 tlv = ptr;
1051 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1052
1053 /* Note: This is a nested TLV containing:
1054 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
1055 */
1056
1057 ptr += sizeof(*tlv);
1058
1059 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1060 restart ? "restart" : "start", arg->vdev_id,
1061 arg->freq, arg->mode);
1062
1063 if (restart)
1064 ret = ath12k_wmi_cmd_send(wmi, skb,
1065 WMI_VDEV_RESTART_REQUEST_CMDID);
1066 else
1067 ret = ath12k_wmi_cmd_send(wmi, skb,
1068 WMI_VDEV_START_REQUEST_CMDID);
1069 if (ret) {
1070 ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1071 restart ? "restart" : "start");
1072 dev_kfree_skb(skb);
1073 }
1074
1075 return ret;
1076}
1077
1078int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1079{
1080 struct ath12k_wmi_pdev *wmi = ar->wmi;
1081 struct wmi_vdev_up_cmd *cmd;
1082 struct sk_buff *skb;
1083 int ret;
1084
1085 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1086 if (!skb)
1087 return -ENOMEM;
1088
1089 cmd = (struct wmi_vdev_up_cmd *)skb->data;
1090
1091 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1092 sizeof(*cmd));
1093 cmd->vdev_id = cpu_to_le32(vdev_id);
1094 cmd->vdev_assoc_id = cpu_to_le32(aid);
1095
1096 ether_addr_copy(cmd->vdev_bssid.addr, bssid);
1097
1098 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1099 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1100 vdev_id, aid, bssid);
1101
1102 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1103 if (ret) {
1104 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1105 dev_kfree_skb(skb);
1106 }
1107
1108 return ret;
1109}
1110
1111int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1112 struct ath12k_wmi_peer_create_arg *arg)
1113{
1114 struct ath12k_wmi_pdev *wmi = ar->wmi;
1115 struct wmi_peer_create_cmd *cmd;
1116 struct sk_buff *skb;
1117 int ret;
1118
1119 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1120 if (!skb)
1121 return -ENOMEM;
1122
1123 cmd = (struct wmi_peer_create_cmd *)skb->data;
1124 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1125 sizeof(*cmd));
1126
1127 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1128 cmd->peer_type = cpu_to_le32(arg->peer_type);
1129 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1130
1131 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1132 "WMI peer create vdev_id %d peer_addr %pM\n",
1133 arg->vdev_id, arg->peer_addr);
1134
1135 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1136 if (ret) {
1137 ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1138 dev_kfree_skb(skb);
1139 }
1140
1141 return ret;
1142}
1143
1144int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1145 const u8 *peer_addr, u8 vdev_id)
1146{
1147 struct ath12k_wmi_pdev *wmi = ar->wmi;
1148 struct wmi_peer_delete_cmd *cmd;
1149 struct sk_buff *skb;
1150 int ret;
1151
1152 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1153 if (!skb)
1154 return -ENOMEM;
1155
1156 cmd = (struct wmi_peer_delete_cmd *)skb->data;
1157 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1158 sizeof(*cmd));
1159
1160 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1161 cmd->vdev_id = cpu_to_le32(vdev_id);
1162
1163 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1164 "WMI peer delete vdev_id %d peer_addr %pM\n",
1165 vdev_id, peer_addr);
1166
1167 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1168 if (ret) {
1169 ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1170 dev_kfree_skb(skb);
1171 }
1172
1173 return ret;
1174}
1175
1176int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1177 struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1178{
1179 struct ath12k_wmi_pdev *wmi = ar->wmi;
1180 struct wmi_pdev_set_regdomain_cmd *cmd;
1181 struct sk_buff *skb;
1182 int ret;
1183
1184 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1185 if (!skb)
1186 return -ENOMEM;
1187
1188 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1189 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1190 sizeof(*cmd));
1191
1192 cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1193 cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1194 cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1195 cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1196 cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1197 cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1198 cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1199
1200 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1201 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1202 arg->current_rd_in_use, arg->current_rd_2g,
1203 arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1204
1205 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1206 if (ret) {
1207 ath12k_warn(ar->ab,
1208 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1209 dev_kfree_skb(skb);
1210 }
1211
1212 return ret;
1213}
1214
1215int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1216 u32 vdev_id, u32 param_id, u32 param_val)
1217{
1218 struct ath12k_wmi_pdev *wmi = ar->wmi;
1219 struct wmi_peer_set_param_cmd *cmd;
1220 struct sk_buff *skb;
1221 int ret;
1222
1223 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1224 if (!skb)
1225 return -ENOMEM;
1226
1227 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1228 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1229 sizeof(*cmd));
1230 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1231 cmd->vdev_id = cpu_to_le32(vdev_id);
1232 cmd->param_id = cpu_to_le32(param_id);
1233 cmd->param_value = cpu_to_le32(param_val);
1234
1235 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1236 "WMI vdev %d peer 0x%pM set param %d value %d\n",
1237 vdev_id, peer_addr, param_id, param_val);
1238
1239 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1240 if (ret) {
1241 ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1242 dev_kfree_skb(skb);
1243 }
1244
1245 return ret;
1246}
1247
1248int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1249 u8 peer_addr[ETH_ALEN],
1250 u32 peer_tid_bitmap,
1251 u8 vdev_id)
1252{
1253 struct ath12k_wmi_pdev *wmi = ar->wmi;
1254 struct wmi_peer_flush_tids_cmd *cmd;
1255 struct sk_buff *skb;
1256 int ret;
1257
1258 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1259 if (!skb)
1260 return -ENOMEM;
1261
1262 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1263 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1264 sizeof(*cmd));
1265
1266 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1267 cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1268 cmd->vdev_id = cpu_to_le32(vdev_id);
1269
1270 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1271 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1272 vdev_id, peer_addr, peer_tid_bitmap);
1273
1274 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1275 if (ret) {
1276 ath12k_warn(ar->ab,
1277 "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1278 dev_kfree_skb(skb);
1279 }
1280
1281 return ret;
1282}
1283
1284int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1285 int vdev_id, const u8 *addr,
1286 dma_addr_t paddr, u8 tid,
1287 u8 ba_window_size_valid,
1288 u32 ba_window_size)
1289{
1290 struct wmi_peer_reorder_queue_setup_cmd *cmd;
1291 struct sk_buff *skb;
1292 int ret;
1293
1294 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1295 if (!skb)
1296 return -ENOMEM;
1297
1298 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1299 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1300 sizeof(*cmd));
1301
1302 ether_addr_copy(cmd->peer_macaddr.addr, addr);
1303 cmd->vdev_id = cpu_to_le32(vdev_id);
1304 cmd->tid = cpu_to_le32(tid);
1305 cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1306 cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1307 cmd->queue_no = cpu_to_le32(tid);
1308 cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1309 cmd->ba_window_size = cpu_to_le32(ba_window_size);
1310
1311 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1312 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1313 addr, vdev_id, tid);
1314
1315 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1316 WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1317 if (ret) {
1318 ath12k_warn(ar->ab,
1319 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1320 dev_kfree_skb(skb);
1321 }
1322
1323 return ret;
1324}
1325
1326int
1327ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1328 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1329{
1330 struct ath12k_wmi_pdev *wmi = ar->wmi;
1331 struct wmi_peer_reorder_queue_remove_cmd *cmd;
1332 struct sk_buff *skb;
1333 int ret;
1334
1335 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1336 if (!skb)
1337 return -ENOMEM;
1338
1339 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1340 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1341 sizeof(*cmd));
1342
1343 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1344 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1345 cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1346
1347 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1348 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1349 arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1350
1351 ret = ath12k_wmi_cmd_send(wmi, skb,
1352 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1353 if (ret) {
1354 ath12k_warn(ar->ab,
1355 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1356 dev_kfree_skb(skb);
1357 }
1358
1359 return ret;
1360}
1361
1362int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1363 u32 param_value, u8 pdev_id)
1364{
1365 struct ath12k_wmi_pdev *wmi = ar->wmi;
1366 struct wmi_pdev_set_param_cmd *cmd;
1367 struct sk_buff *skb;
1368 int ret;
1369
1370 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1371 if (!skb)
1372 return -ENOMEM;
1373
1374 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1375 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1376 sizeof(*cmd));
1377 cmd->pdev_id = cpu_to_le32(pdev_id);
1378 cmd->param_id = cpu_to_le32(param_id);
1379 cmd->param_value = cpu_to_le32(param_value);
1380
1381 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1382 "WMI pdev set param %d pdev id %d value %d\n",
1383 param_id, pdev_id, param_value);
1384
1385 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1386 if (ret) {
1387 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1388 dev_kfree_skb(skb);
1389 }
1390
1391 return ret;
1392}
1393
1394int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1395{
1396 struct ath12k_wmi_pdev *wmi = ar->wmi;
1397 struct wmi_pdev_set_ps_mode_cmd *cmd;
1398 struct sk_buff *skb;
1399 int ret;
1400
1401 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1402 if (!skb)
1403 return -ENOMEM;
1404
1405 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1406 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1407 sizeof(*cmd));
1408 cmd->vdev_id = cpu_to_le32(vdev_id);
1409 cmd->sta_ps_mode = cpu_to_le32(enable);
1410
1411 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1412 "WMI vdev set psmode %d vdev id %d\n",
1413 enable, vdev_id);
1414
1415 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1416 if (ret) {
1417 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1418 dev_kfree_skb(skb);
1419 }
1420
1421 return ret;
1422}
1423
1424int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1425 u32 pdev_id)
1426{
1427 struct ath12k_wmi_pdev *wmi = ar->wmi;
1428 struct wmi_pdev_suspend_cmd *cmd;
1429 struct sk_buff *skb;
1430 int ret;
1431
1432 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1433 if (!skb)
1434 return -ENOMEM;
1435
1436 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1437
1438 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1439 sizeof(*cmd));
1440
1441 cmd->suspend_opt = cpu_to_le32(suspend_opt);
1442 cmd->pdev_id = cpu_to_le32(pdev_id);
1443
1444 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1445 "WMI pdev suspend pdev_id %d\n", pdev_id);
1446
1447 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1448 if (ret) {
1449 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1450 dev_kfree_skb(skb);
1451 }
1452
1453 return ret;
1454}
1455
1456int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1457{
1458 struct ath12k_wmi_pdev *wmi = ar->wmi;
1459 struct wmi_pdev_resume_cmd *cmd;
1460 struct sk_buff *skb;
1461 int ret;
1462
1463 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1464 if (!skb)
1465 return -ENOMEM;
1466
1467 cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1468
1469 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1470 sizeof(*cmd));
1471 cmd->pdev_id = cpu_to_le32(pdev_id);
1472
1473 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1474 "WMI pdev resume pdev id %d\n", pdev_id);
1475
1476 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1477 if (ret) {
1478 ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1479 dev_kfree_skb(skb);
1480 }
1481
1482 return ret;
1483}
1484
1485/* TODO FW Support for the cmd is not available yet.
1486 * Can be tested once the command and corresponding
1487 * event is implemented in FW
1488 */
1489int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1490 enum wmi_bss_chan_info_req_type type)
1491{
1492 struct ath12k_wmi_pdev *wmi = ar->wmi;
1493 struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1494 struct sk_buff *skb;
1495 int ret;
1496
1497 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1498 if (!skb)
1499 return -ENOMEM;
1500
1501 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1502
1503 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1504 sizeof(*cmd));
1505 cmd->req_type = cpu_to_le32(type);
1506
1507 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1508 "WMI bss chan info req type %d\n", type);
1509
1510 ret = ath12k_wmi_cmd_send(wmi, skb,
1511 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1512 if (ret) {
1513 ath12k_warn(ar->ab,
1514 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1515 dev_kfree_skb(skb);
1516 }
1517
1518 return ret;
1519}
1520
1521int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1522 struct ath12k_wmi_ap_ps_arg *arg)
1523{
1524 struct ath12k_wmi_pdev *wmi = ar->wmi;
1525 struct wmi_ap_ps_peer_cmd *cmd;
1526 struct sk_buff *skb;
1527 int ret;
1528
1529 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1530 if (!skb)
1531 return -ENOMEM;
1532
1533 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1534 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1535 sizeof(*cmd));
1536
1537 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1538 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1539 cmd->param = cpu_to_le32(arg->param);
1540 cmd->value = cpu_to_le32(arg->value);
1541
1542 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1543 "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1544 arg->vdev_id, peer_addr, arg->param, arg->value);
1545
1546 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1547 if (ret) {
1548 ath12k_warn(ar->ab,
1549 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1550 dev_kfree_skb(skb);
1551 }
1552
1553 return ret;
1554}
1555
1556int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1557 u32 param, u32 param_value)
1558{
1559 struct ath12k_wmi_pdev *wmi = ar->wmi;
1560 struct wmi_sta_powersave_param_cmd *cmd;
1561 struct sk_buff *skb;
1562 int ret;
1563
1564 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1565 if (!skb)
1566 return -ENOMEM;
1567
1568 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1569 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1570 sizeof(*cmd));
1571
1572 cmd->vdev_id = cpu_to_le32(vdev_id);
1573 cmd->param = cpu_to_le32(param);
1574 cmd->value = cpu_to_le32(param_value);
1575
1576 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1577 "WMI set sta ps vdev_id %d param %d value %d\n",
1578 vdev_id, param, param_value);
1579
1580 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1581 if (ret) {
1582 ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1583 dev_kfree_skb(skb);
1584 }
1585
1586 return ret;
1587}
1588
1589int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1590{
1591 struct ath12k_wmi_pdev *wmi = ar->wmi;
1592 struct wmi_force_fw_hang_cmd *cmd;
1593 struct sk_buff *skb;
1594 int ret, len;
1595
1596 len = sizeof(*cmd);
1597
1598 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1599 if (!skb)
1600 return -ENOMEM;
1601
1602 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1603 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1604 len);
1605
1606 cmd->type = cpu_to_le32(type);
1607 cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1608
1609 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1610
1611 if (ret) {
1612 ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1613 dev_kfree_skb(skb);
1614 }
1615 return ret;
1616}
1617
1618int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1619 u32 param_id, u32 param_value)
1620{
1621 struct ath12k_wmi_pdev *wmi = ar->wmi;
1622 struct wmi_vdev_set_param_cmd *cmd;
1623 struct sk_buff *skb;
1624 int ret;
1625
1626 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1627 if (!skb)
1628 return -ENOMEM;
1629
1630 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1631 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1632 sizeof(*cmd));
1633
1634 cmd->vdev_id = cpu_to_le32(vdev_id);
1635 cmd->param_id = cpu_to_le32(param_id);
1636 cmd->param_value = cpu_to_le32(param_value);
1637
1638 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1639 "WMI vdev id 0x%x set param %d value %d\n",
1640 vdev_id, param_id, param_value);
1641
1642 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1643 if (ret) {
1644 ath12k_warn(ar->ab,
1645 "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1646 dev_kfree_skb(skb);
1647 }
1648
1649 return ret;
1650}
1651
1652int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1653{
1654 struct ath12k_wmi_pdev *wmi = ar->wmi;
1655 struct wmi_get_pdev_temperature_cmd *cmd;
1656 struct sk_buff *skb;
1657 int ret;
1658
1659 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1660 if (!skb)
1661 return -ENOMEM;
1662
1663 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1664 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1665 sizeof(*cmd));
1666 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1667
1668 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1669 "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1670
1671 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1672 if (ret) {
1673 ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1674 dev_kfree_skb(skb);
1675 }
1676
1677 return ret;
1678}
1679
1680int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1681 u32 vdev_id, u32 bcn_ctrl_op)
1682{
1683 struct ath12k_wmi_pdev *wmi = ar->wmi;
1684 struct wmi_bcn_offload_ctrl_cmd *cmd;
1685 struct sk_buff *skb;
1686 int ret;
1687
1688 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1689 if (!skb)
1690 return -ENOMEM;
1691
1692 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1693 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1694 sizeof(*cmd));
1695
1696 cmd->vdev_id = cpu_to_le32(vdev_id);
1697 cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1698
1699 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1700 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1701 vdev_id, bcn_ctrl_op);
1702
1703 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1704 if (ret) {
1705 ath12k_warn(ar->ab,
1706 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1707 dev_kfree_skb(skb);
1708 }
1709
1710 return ret;
1711}
1712
1713int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
1714 struct ieee80211_mutable_offsets *offs,
1715 struct sk_buff *bcn)
1716{
1717 struct ath12k_wmi_pdev *wmi = ar->wmi;
1718 struct wmi_bcn_tmpl_cmd *cmd;
1719 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1720 struct wmi_tlv *tlv;
1721 struct sk_buff *skb;
1722 void *ptr;
1723 int ret, len;
1724 size_t aligned_len = roundup(bcn->len, 4);
1725
1726 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1727
1728 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1729 if (!skb)
1730 return -ENOMEM;
1731
1732 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1733 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1734 sizeof(*cmd));
1735 cmd->vdev_id = cpu_to_le32(vdev_id);
1736 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1737 cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
1738 cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
1739 cmd->buf_len = cpu_to_le32(bcn->len);
1740
1741 ptr = skb->data + sizeof(*cmd);
1742
1743 bcn_prb_info = ptr;
1744 len = sizeof(*bcn_prb_info);
1745 bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
1746 len);
1747 bcn_prb_info->caps = 0;
1748 bcn_prb_info->erp = 0;
1749
1750 ptr += sizeof(*bcn_prb_info);
1751
1752 tlv = ptr;
1753 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
1754 memcpy(tlv->value, bcn->data, bcn->len);
1755
1756 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
1757 if (ret) {
1758 ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
1759 dev_kfree_skb(skb);
1760 }
1761
1762 return ret;
1763}
1764
1765int ath12k_wmi_vdev_install_key(struct ath12k *ar,
1766 struct wmi_vdev_install_key_arg *arg)
1767{
1768 struct ath12k_wmi_pdev *wmi = ar->wmi;
1769 struct wmi_vdev_install_key_cmd *cmd;
1770 struct wmi_tlv *tlv;
1771 struct sk_buff *skb;
1772 int ret, len, key_len_aligned;
1773
1774 /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
1775 * length is specified in cmd->key_len.
1776 */
1777 key_len_aligned = roundup(arg->key_len, 4);
1778
1779 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
1780
1781 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1782 if (!skb)
1783 return -ENOMEM;
1784
1785 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1786 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
1787 sizeof(*cmd));
1788 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1789 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1790 cmd->key_idx = cpu_to_le32(arg->key_idx);
1791 cmd->key_flags = cpu_to_le32(arg->key_flags);
1792 cmd->key_cipher = cpu_to_le32(arg->key_cipher);
1793 cmd->key_len = cpu_to_le32(arg->key_len);
1794 cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
1795 cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
1796
1797 if (arg->key_rsc_counter)
1798 cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
1799
1800 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
1801 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
1802 memcpy(tlv->value, arg->key_data, arg->key_len);
1803
1804 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1805 "WMI vdev install key idx %d cipher %d len %d\n",
1806 arg->key_idx, arg->key_cipher, arg->key_len);
1807
1808 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1809 if (ret) {
1810 ath12k_warn(ar->ab,
1811 "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1812 dev_kfree_skb(skb);
1813 }
1814
1815 return ret;
1816}
1817
1818static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
1819 struct ath12k_wmi_peer_assoc_arg *arg,
1820 bool hw_crypto_disabled)
1821{
1822 cmd->peer_flags = 0;
1823 cmd->peer_flags_ext = 0;
1824
1825 if (arg->is_wme_set) {
1826 if (arg->qos_flag)
1827 cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
1828 if (arg->apsd_flag)
1829 cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
1830 if (arg->ht_flag)
1831 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
1832 if (arg->bw_40)
1833 cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
1834 if (arg->bw_80)
1835 cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
1836 if (arg->bw_160)
1837 cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
1838 if (arg->bw_320)
1839 cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
1840
1841 /* Typically if STBC is enabled for VHT it should be enabled
1842 * for HT as well
1843 **/
1844 if (arg->stbc_flag)
1845 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
1846
1847 /* Typically if LDPC is enabled for VHT it should be enabled
1848 * for HT as well
1849 **/
1850 if (arg->ldpc_flag)
1851 cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
1852
1853 if (arg->static_mimops_flag)
1854 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
1855 if (arg->dynamic_mimops_flag)
1856 cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
1857 if (arg->spatial_mux_flag)
1858 cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
1859 if (arg->vht_flag)
1860 cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
1861 if (arg->he_flag)
1862 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
1863 if (arg->twt_requester)
1864 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
1865 if (arg->twt_responder)
1866 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
1867 if (arg->eht_flag)
1868 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
1869 }
1870
1871 /* Suppress authorization for all AUTH modes that need 4-way handshake
1872 * (during re-association).
1873 * Authorization will be done for these modes on key installation.
1874 */
1875 if (arg->auth_flag)
1876 cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
1877 if (arg->need_ptk_4_way) {
1878 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
1879 if (!hw_crypto_disabled)
1880 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
1881 }
1882 if (arg->need_gtk_2_way)
1883 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
1884 /* safe mode bypass the 4-way handshake */
1885 if (arg->safe_mode_enabled)
1886 cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
1887 WMI_PEER_NEED_GTK_2_WAY));
1888
1889 if (arg->is_pmf_enabled)
1890 cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
1891
1892 /* Disable AMSDU for station transmit, if user configures it */
1893 /* Disable AMSDU for AP transmit to 11n Stations, if user configures
1894 * it
1895 * if (arg->amsdu_disable) Add after FW support
1896 **/
1897
1898 /* Target asserts if node is marked HT and all MCS is set to 0.
1899 * Mark the node as non-HT if all the mcs rates are disabled through
1900 * iwpriv
1901 **/
1902 if (arg->peer_ht_rates.num_rates == 0)
1903 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
1904}
1905
1906int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
1907 struct ath12k_wmi_peer_assoc_arg *arg)
1908{
1909 struct ath12k_wmi_pdev *wmi = ar->wmi;
1910 struct wmi_peer_assoc_complete_cmd *cmd;
1911 struct ath12k_wmi_vht_rate_set_params *mcs;
1912 struct ath12k_wmi_he_rate_set_params *he_mcs;
1913 struct ath12k_wmi_eht_rate_set_params *eht_mcs;
1914 struct sk_buff *skb;
1915 struct wmi_tlv *tlv;
1916 void *ptr;
1917 u32 peer_legacy_rates_align;
1918 u32 peer_ht_rates_align;
1919 int i, ret, len;
1920
1921 peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
1922 sizeof(u32));
1923 peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
1924 sizeof(u32));
1925
1926 len = sizeof(*cmd) +
1927 TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
1928 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
1929 sizeof(*mcs) + TLV_HDR_SIZE +
1930 (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
1931 TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
1932 TLV_HDR_SIZE + TLV_HDR_SIZE;
1933
1934 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1935 if (!skb)
1936 return -ENOMEM;
1937
1938 ptr = skb->data;
1939
1940 cmd = ptr;
1941 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
1942 sizeof(*cmd));
1943
1944 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1945
1946 cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
1947 cmd->peer_associd = cpu_to_le32(arg->peer_associd);
1948 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1949
1950 ath12k_wmi_copy_peer_flags(cmd, arg,
1951 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
1952 &ar->ab->dev_flags));
1953
1954 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
1955
1956 cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
1957 cmd->peer_caps = cpu_to_le32(arg->peer_caps);
1958 cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
1959 cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
1960 cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
1961 cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
1962 cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
1963 cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
1964
1965 /* Update 11ax capabilities */
1966 cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
1967 cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
1968 cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
1969 cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
1970 cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
1971 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
1972 cmd->peer_he_cap_phy[i] =
1973 cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
1974 cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
1975 cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
1976 for (i = 0; i < WMI_MAX_NUM_SS; i++)
1977 cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
1978 cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
1979
1980 /* Update 11be capabilities */
1981 memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
1982 arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
1983 0);
1984 memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
1985 arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
1986 0);
1987 memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
1988 &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
1989
1990 /* Update peer legacy rate information */
1991 ptr += sizeof(*cmd);
1992
1993 tlv = ptr;
1994 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
1995
1996 ptr += TLV_HDR_SIZE;
1997
1998 cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
1999 memcpy(ptr, arg->peer_legacy_rates.rates,
2000 arg->peer_legacy_rates.num_rates);
2001
2002 /* Update peer HT rate information */
2003 ptr += peer_legacy_rates_align;
2004
2005 tlv = ptr;
2006 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2007 ptr += TLV_HDR_SIZE;
2008 cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2009 memcpy(ptr, arg->peer_ht_rates.rates,
2010 arg->peer_ht_rates.num_rates);
2011
2012 /* VHT Rates */
2013 ptr += peer_ht_rates_align;
2014
2015 mcs = ptr;
2016
2017 mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2018 sizeof(*mcs));
2019
2020 cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2021
2022 /* Update bandwidth-NSS mapping */
2023 cmd->peer_bw_rxnss_override = 0;
2024 cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2025
2026 if (arg->vht_capable) {
2027 mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2028 mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2029 mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2030 mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2031 }
2032
2033 /* HE Rates */
2034 cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2035 cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2036
2037 ptr += sizeof(*mcs);
2038
2039 len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2040
2041 tlv = ptr;
2042 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2043 ptr += TLV_HDR_SIZE;
2044
2045 /* Loop through the HE rate set */
2046 for (i = 0; i < arg->peer_he_mcs_count; i++) {
2047 he_mcs = ptr;
2048 he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2049 sizeof(*he_mcs));
2050
2051 he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2052 he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2053 ptr += sizeof(*he_mcs);
2054 }
2055
2056 /* MLO header tag with 0 length */
2057 len = 0;
2058 tlv = ptr;
2059 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2060 ptr += TLV_HDR_SIZE;
2061
2062 /* Loop through the EHT rate set */
2063 len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2064 tlv = ptr;
2065 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2066 ptr += TLV_HDR_SIZE;
2067
2068 for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2069 eht_mcs = ptr;
2070 eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2071 sizeof(*eht_mcs));
2072
2073 eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2074 eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2075 ptr += sizeof(*eht_mcs);
2076 }
2077
2078 /* ML partner links tag with 0 length */
2079 len = 0;
2080 tlv = ptr;
2081 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2082 ptr += TLV_HDR_SIZE;
2083
2084 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2085 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
2086 cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2087 cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2088 cmd->peer_listen_intval, cmd->peer_ht_caps,
2089 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2090 cmd->peer_mpdu_density,
2091 cmd->peer_vht_caps, cmd->peer_he_cap_info,
2092 cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2093 cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2094 cmd->peer_he_cap_phy[2],
2095 cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2096 cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2097 cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2098 cmd->peer_eht_cap_phy[2]);
2099
2100 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2101 if (ret) {
2102 ath12k_warn(ar->ab,
2103 "failed to send WMI_PEER_ASSOC_CMDID\n");
2104 dev_kfree_skb(skb);
2105 }
2106
2107 return ret;
2108}
2109
2110void ath12k_wmi_start_scan_init(struct ath12k *ar,
2111 struct ath12k_wmi_scan_req_arg *arg)
2112{
2113 /* setup commonly used values */
2114 arg->scan_req_id = 1;
2115 arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2116 arg->dwell_time_active = 50;
2117 arg->dwell_time_active_2g = 0;
2118 arg->dwell_time_passive = 150;
2119 arg->dwell_time_active_6g = 40;
2120 arg->dwell_time_passive_6g = 30;
2121 arg->min_rest_time = 50;
2122 arg->max_rest_time = 500;
2123 arg->repeat_probe_time = 0;
2124 arg->probe_spacing_time = 0;
2125 arg->idle_time = 0;
2126 arg->max_scan_time = 20000;
2127 arg->probe_delay = 5;
2128 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2129 WMI_SCAN_EVENT_COMPLETED |
2130 WMI_SCAN_EVENT_BSS_CHANNEL |
2131 WMI_SCAN_EVENT_FOREIGN_CHAN |
2132 WMI_SCAN_EVENT_DEQUEUED;
2133 arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
2134 arg->num_bssid = 1;
2135
2136 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2137 * ZEROs in probe request
2138 */
2139 eth_broadcast_addr(arg->bssid_list[0].addr);
2140}
2141
2142static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2143 struct ath12k_wmi_scan_req_arg *arg)
2144{
2145 /* Scan events subscription */
2146 if (arg->scan_ev_started)
2147 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2148 if (arg->scan_ev_completed)
2149 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2150 if (arg->scan_ev_bss_chan)
2151 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2152 if (arg->scan_ev_foreign_chan)
2153 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2154 if (arg->scan_ev_dequeued)
2155 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2156 if (arg->scan_ev_preempted)
2157 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2158 if (arg->scan_ev_start_failed)
2159 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2160 if (arg->scan_ev_restarted)
2161 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2162 if (arg->scan_ev_foreign_chn_exit)
2163 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2164 if (arg->scan_ev_suspended)
2165 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2166 if (arg->scan_ev_resumed)
2167 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2168
2169 /** Set scan control flags */
2170 cmd->scan_ctrl_flags = 0;
2171 if (arg->scan_f_passive)
2172 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2173 if (arg->scan_f_strict_passive_pch)
2174 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2175 if (arg->scan_f_promisc_mode)
2176 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2177 if (arg->scan_f_capture_phy_err)
2178 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2179 if (arg->scan_f_half_rate)
2180 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2181 if (arg->scan_f_quarter_rate)
2182 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2183 if (arg->scan_f_cck_rates)
2184 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2185 if (arg->scan_f_ofdm_rates)
2186 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2187 if (arg->scan_f_chan_stat_evnt)
2188 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2189 if (arg->scan_f_filter_prb_req)
2190 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2191 if (arg->scan_f_bcast_probe)
2192 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2193 if (arg->scan_f_offchan_mgmt_tx)
2194 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2195 if (arg->scan_f_offchan_data_tx)
2196 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2197 if (arg->scan_f_force_active_dfs_chn)
2198 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2199 if (arg->scan_f_add_tpc_ie_in_probe)
2200 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2201 if (arg->scan_f_add_ds_ie_in_probe)
2202 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2203 if (arg->scan_f_add_spoofed_mac_in_probe)
2204 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2205 if (arg->scan_f_add_rand_seq_in_probe)
2206 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2207 if (arg->scan_f_en_ie_whitelist_in_probe)
2208 cmd->scan_ctrl_flags |=
2209 cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2210
2211 cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2212 WMI_SCAN_DWELL_MODE_MASK);
2213}
2214
2215int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2216 struct ath12k_wmi_scan_req_arg *arg)
2217{
2218 struct ath12k_wmi_pdev *wmi = ar->wmi;
2219 struct wmi_start_scan_cmd *cmd;
2220 struct ath12k_wmi_ssid_params *ssid = NULL;
2221 struct ath12k_wmi_mac_addr_params *bssid;
2222 struct sk_buff *skb;
2223 struct wmi_tlv *tlv;
2224 void *ptr;
2225 int i, ret, len;
2226 u32 *tmp_ptr, extraie_len_with_pad = 0;
2227 struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2228 struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2229
2230 len = sizeof(*cmd);
2231
2232 len += TLV_HDR_SIZE;
2233 if (arg->num_chan)
2234 len += arg->num_chan * sizeof(u32);
2235
2236 len += TLV_HDR_SIZE;
2237 if (arg->num_ssids)
2238 len += arg->num_ssids * sizeof(*ssid);
2239
2240 len += TLV_HDR_SIZE;
2241 if (arg->num_bssid)
2242 len += sizeof(*bssid) * arg->num_bssid;
2243
2244 if (arg->num_hint_bssid)
2245 len += TLV_HDR_SIZE +
2246 arg->num_hint_bssid * sizeof(*hint_bssid);
2247
2248 if (arg->num_hint_s_ssid)
2249 len += TLV_HDR_SIZE +
2250 arg->num_hint_s_ssid * sizeof(*s_ssid);
2251
2252 len += TLV_HDR_SIZE;
2253 if (arg->extraie.len)
2254 extraie_len_with_pad =
2255 roundup(arg->extraie.len, sizeof(u32));
2256 if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2257 len += extraie_len_with_pad;
2258 } else {
2259 ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2260 arg->extraie.len);
2261 extraie_len_with_pad = 0;
2262 }
2263
2264 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2265 if (!skb)
2266 return -ENOMEM;
2267
2268 ptr = skb->data;
2269
2270 cmd = ptr;
2271 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2272 sizeof(*cmd));
2273
2274 cmd->scan_id = cpu_to_le32(arg->scan_id);
2275 cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2276 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2277 cmd->scan_priority = cpu_to_le32(arg->scan_priority);
2278 cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2279
2280 ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2281
2282 cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2283 cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2284 cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2285 cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2286 cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2287 cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2288 cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2289 cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2290 cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2291 cmd->idle_time = cpu_to_le32(arg->idle_time);
2292 cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2293 cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2294 cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2295 cmd->num_chan = cpu_to_le32(arg->num_chan);
2296 cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2297 cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2298 cmd->ie_len = cpu_to_le32(arg->extraie.len);
2299 cmd->n_probes = cpu_to_le32(arg->n_probes);
2300
2301 ptr += sizeof(*cmd);
2302
2303 len = arg->num_chan * sizeof(u32);
2304
2305 tlv = ptr;
2306 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2307 ptr += TLV_HDR_SIZE;
2308 tmp_ptr = (u32 *)ptr;
2309
2310 memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2311
2312 ptr += len;
2313
2314 len = arg->num_ssids * sizeof(*ssid);
2315 tlv = ptr;
2316 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2317
2318 ptr += TLV_HDR_SIZE;
2319
2320 if (arg->num_ssids) {
2321 ssid = ptr;
2322 for (i = 0; i < arg->num_ssids; ++i) {
2323 ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2324 memcpy(ssid->ssid, arg->ssid[i].ssid,
2325 arg->ssid[i].ssid_len);
2326 ssid++;
2327 }
2328 }
2329
2330 ptr += (arg->num_ssids * sizeof(*ssid));
2331 len = arg->num_bssid * sizeof(*bssid);
2332 tlv = ptr;
2333 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2334
2335 ptr += TLV_HDR_SIZE;
2336 bssid = ptr;
2337
2338 if (arg->num_bssid) {
2339 for (i = 0; i < arg->num_bssid; ++i) {
2340 ether_addr_copy(bssid->addr,
2341 arg->bssid_list[i].addr);
2342 bssid++;
2343 }
2344 }
2345
2346 ptr += arg->num_bssid * sizeof(*bssid);
2347
2348 len = extraie_len_with_pad;
2349 tlv = ptr;
2350 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2351 ptr += TLV_HDR_SIZE;
2352
2353 if (extraie_len_with_pad)
2354 memcpy(ptr, arg->extraie.ptr,
2355 arg->extraie.len);
2356
2357 ptr += extraie_len_with_pad;
2358
2359 if (arg->num_hint_s_ssid) {
2360 len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2361 tlv = ptr;
2362 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2363 ptr += TLV_HDR_SIZE;
2364 s_ssid = ptr;
2365 for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2366 s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2367 s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2368 s_ssid++;
2369 }
2370 ptr += len;
2371 }
2372
2373 if (arg->num_hint_bssid) {
2374 len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2375 tlv = ptr;
2376 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2377 ptr += TLV_HDR_SIZE;
2378 hint_bssid = ptr;
2379 for (i = 0; i < arg->num_hint_bssid; ++i) {
2380 hint_bssid->freq_flags =
2381 arg->hint_bssid[i].freq_flags;
2382 ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2383 &hint_bssid->bssid.addr[0]);
2384 hint_bssid++;
2385 }
2386 }
2387
2388 ret = ath12k_wmi_cmd_send(wmi, skb,
2389 WMI_START_SCAN_CMDID);
2390 if (ret) {
2391 ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2392 dev_kfree_skb(skb);
2393 }
2394
2395 return ret;
2396}
2397
2398int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2399 struct ath12k_wmi_scan_cancel_arg *arg)
2400{
2401 struct ath12k_wmi_pdev *wmi = ar->wmi;
2402 struct wmi_stop_scan_cmd *cmd;
2403 struct sk_buff *skb;
2404 int ret;
2405
2406 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2407 if (!skb)
2408 return -ENOMEM;
2409
2410 cmd = (struct wmi_stop_scan_cmd *)skb->data;
2411
2412 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2413 sizeof(*cmd));
2414
2415 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2416 cmd->requestor = cpu_to_le32(arg->requester);
2417 cmd->scan_id = cpu_to_le32(arg->scan_id);
2418 cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2419 /* stop the scan with the corresponding scan_id */
2420 if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2421 /* Cancelling all scans */
2422 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2423 } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2424 /* Cancelling VAP scans */
2425 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2426 } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2427 /* Cancelling specific scan */
2428 cmd->req_type = WMI_SCAN_STOP_ONE;
2429 } else {
2430 ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2431 arg->req_type);
2432 dev_kfree_skb(skb);
2433 return -EINVAL;
2434 }
2435
2436 ret = ath12k_wmi_cmd_send(wmi, skb,
2437 WMI_STOP_SCAN_CMDID);
2438 if (ret) {
2439 ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2440 dev_kfree_skb(skb);
2441 }
2442
2443 return ret;
2444}
2445
2446int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2447 struct ath12k_wmi_scan_chan_list_arg *arg)
2448{
2449 struct ath12k_wmi_pdev *wmi = ar->wmi;
2450 struct wmi_scan_chan_list_cmd *cmd;
2451 struct sk_buff *skb;
2452 struct ath12k_wmi_channel_params *chan_info;
2453 struct ath12k_wmi_channel_arg *channel_arg;
2454 struct wmi_tlv *tlv;
2455 void *ptr;
2456 int i, ret, len;
2457 u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2458 __le32 *reg1, *reg2;
2459
2460 channel_arg = &arg->channel[0];
2461 while (arg->nallchans) {
2462 len = sizeof(*cmd) + TLV_HDR_SIZE;
2463 max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2464 sizeof(*chan_info);
2465
2466 num_send_chans = min(arg->nallchans, max_chan_limit);
2467
2468 arg->nallchans -= num_send_chans;
2469 len += sizeof(*chan_info) * num_send_chans;
2470
2471 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2472 if (!skb)
2473 return -ENOMEM;
2474
2475 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2476 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2477 sizeof(*cmd));
2478 cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2479 cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2480 if (num_sends)
2481 cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2482
2483 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2484 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2485 num_send_chans, len, cmd->pdev_id, num_sends);
2486
2487 ptr = skb->data + sizeof(*cmd);
2488
2489 len = sizeof(*chan_info) * num_send_chans;
2490 tlv = ptr;
2491 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2492 len);
2493 ptr += TLV_HDR_SIZE;
2494
2495 for (i = 0; i < num_send_chans; ++i) {
2496 chan_info = ptr;
2497 memset(chan_info, 0, sizeof(*chan_info));
2498 len = sizeof(*chan_info);
2499 chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2500 len);
2501
2502 reg1 = &chan_info->reg_info_1;
2503 reg2 = &chan_info->reg_info_2;
2504 chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2505 chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2506 chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2507
2508 if (channel_arg->is_chan_passive)
2509 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2510 if (channel_arg->allow_he)
2511 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2512 else if (channel_arg->allow_vht)
2513 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2514 else if (channel_arg->allow_ht)
2515 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2516 if (channel_arg->half_rate)
2517 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2518 if (channel_arg->quarter_rate)
2519 chan_info->info |=
2520 cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2521
2522 if (channel_arg->psc_channel)
2523 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2524
2525 if (channel_arg->dfs_set)
2526 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2527
2528 chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2529 WMI_CHAN_INFO_MODE);
2530 *reg1 |= le32_encode_bits(channel_arg->minpower,
2531 WMI_CHAN_REG_INFO1_MIN_PWR);
2532 *reg1 |= le32_encode_bits(channel_arg->maxpower,
2533 WMI_CHAN_REG_INFO1_MAX_PWR);
2534 *reg1 |= le32_encode_bits(channel_arg->maxregpower,
2535 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2536 *reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2537 WMI_CHAN_REG_INFO1_REG_CLS);
2538 *reg2 |= le32_encode_bits(channel_arg->antennamax,
2539 WMI_CHAN_REG_INFO2_ANT_MAX);
2540
2541 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2542 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2543 i, chan_info->mhz, chan_info->info);
2544
2545 ptr += sizeof(*chan_info);
2546
2547 channel_arg++;
2548 }
2549
2550 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2551 if (ret) {
2552 ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2553 dev_kfree_skb(skb);
2554 return ret;
2555 }
2556
2557 num_sends++;
2558 }
2559
2560 return 0;
2561}
2562
2563int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2564 struct wmi_wmm_params_all_arg *param)
2565{
2566 struct ath12k_wmi_pdev *wmi = ar->wmi;
2567 struct wmi_vdev_set_wmm_params_cmd *cmd;
2568 struct wmi_wmm_params *wmm_param;
2569 struct wmi_wmm_params_arg *wmi_wmm_arg;
2570 struct sk_buff *skb;
2571 int ret, ac;
2572
2573 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2574 if (!skb)
2575 return -ENOMEM;
2576
2577 cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2578 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2579 sizeof(*cmd));
2580
2581 cmd->vdev_id = cpu_to_le32(vdev_id);
2582 cmd->wmm_param_type = 0;
2583
2584 for (ac = 0; ac < WME_NUM_AC; ac++) {
2585 switch (ac) {
2586 case WME_AC_BE:
2587 wmi_wmm_arg = ¶m->ac_be;
2588 break;
2589 case WME_AC_BK:
2590 wmi_wmm_arg = ¶m->ac_bk;
2591 break;
2592 case WME_AC_VI:
2593 wmi_wmm_arg = ¶m->ac_vi;
2594 break;
2595 case WME_AC_VO:
2596 wmi_wmm_arg = ¶m->ac_vo;
2597 break;
2598 }
2599
2600 wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2601 wmm_param->tlv_header =
2602 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2603 sizeof(*wmm_param));
2604
2605 wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2606 wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2607 wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2608 wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2609 wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2610 wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2611
2612 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2613 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2614 ac, wmm_param->aifs, wmm_param->cwmin,
2615 wmm_param->cwmax, wmm_param->txoplimit,
2616 wmm_param->acm, wmm_param->no_ack);
2617 }
2618 ret = ath12k_wmi_cmd_send(wmi, skb,
2619 WMI_VDEV_SET_WMM_PARAMS_CMDID);
2620 if (ret) {
2621 ath12k_warn(ar->ab,
2622 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2623 dev_kfree_skb(skb);
2624 }
2625
2626 return ret;
2627}
2628
2629int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2630 u32 pdev_id)
2631{
2632 struct ath12k_wmi_pdev *wmi = ar->wmi;
2633 struct wmi_dfs_phyerr_offload_cmd *cmd;
2634 struct sk_buff *skb;
2635 int ret;
2636
2637 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2638 if (!skb)
2639 return -ENOMEM;
2640
2641 cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2642 cmd->tlv_header =
2643 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
2644 sizeof(*cmd));
2645
2646 cmd->pdev_id = cpu_to_le32(pdev_id);
2647
2648 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2649 "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2650
2651 ret = ath12k_wmi_cmd_send(wmi, skb,
2652 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2653 if (ret) {
2654 ath12k_warn(ar->ab,
2655 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2656 dev_kfree_skb(skb);
2657 }
2658
2659 return ret;
2660}
2661
2662int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2663 u32 tid, u32 initiator, u32 reason)
2664{
2665 struct ath12k_wmi_pdev *wmi = ar->wmi;
2666 struct wmi_delba_send_cmd *cmd;
2667 struct sk_buff *skb;
2668 int ret;
2669
2670 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2671 if (!skb)
2672 return -ENOMEM;
2673
2674 cmd = (struct wmi_delba_send_cmd *)skb->data;
2675 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
2676 sizeof(*cmd));
2677 cmd->vdev_id = cpu_to_le32(vdev_id);
2678 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2679 cmd->tid = cpu_to_le32(tid);
2680 cmd->initiator = cpu_to_le32(initiator);
2681 cmd->reasoncode = cpu_to_le32(reason);
2682
2683 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2684 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2685 vdev_id, mac, tid, initiator, reason);
2686
2687 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
2688
2689 if (ret) {
2690 ath12k_warn(ar->ab,
2691 "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2692 dev_kfree_skb(skb);
2693 }
2694
2695 return ret;
2696}
2697
2698int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2699 u32 tid, u32 status)
2700{
2701 struct ath12k_wmi_pdev *wmi = ar->wmi;
2702 struct wmi_addba_setresponse_cmd *cmd;
2703 struct sk_buff *skb;
2704 int ret;
2705
2706 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2707 if (!skb)
2708 return -ENOMEM;
2709
2710 cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
2711 cmd->tlv_header =
2712 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
2713 sizeof(*cmd));
2714 cmd->vdev_id = cpu_to_le32(vdev_id);
2715 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2716 cmd->tid = cpu_to_le32(tid);
2717 cmd->statuscode = cpu_to_le32(status);
2718
2719 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2720 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2721 vdev_id, mac, tid, status);
2722
2723 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
2724
2725 if (ret) {
2726 ath12k_warn(ar->ab,
2727 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2728 dev_kfree_skb(skb);
2729 }
2730
2731 return ret;
2732}
2733
2734int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2735 u32 tid, u32 buf_size)
2736{
2737 struct ath12k_wmi_pdev *wmi = ar->wmi;
2738 struct wmi_addba_send_cmd *cmd;
2739 struct sk_buff *skb;
2740 int ret;
2741
2742 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2743 if (!skb)
2744 return -ENOMEM;
2745
2746 cmd = (struct wmi_addba_send_cmd *)skb->data;
2747 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
2748 sizeof(*cmd));
2749 cmd->vdev_id = cpu_to_le32(vdev_id);
2750 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2751 cmd->tid = cpu_to_le32(tid);
2752 cmd->buffersize = cpu_to_le32(buf_size);
2753
2754 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2755 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2756 vdev_id, mac, tid, buf_size);
2757
2758 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
2759
2760 if (ret) {
2761 ath12k_warn(ar->ab,
2762 "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
2763 dev_kfree_skb(skb);
2764 }
2765
2766 return ret;
2767}
2768
2769int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
2770{
2771 struct ath12k_wmi_pdev *wmi = ar->wmi;
2772 struct wmi_addba_clear_resp_cmd *cmd;
2773 struct sk_buff *skb;
2774 int ret;
2775
2776 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2777 if (!skb)
2778 return -ENOMEM;
2779
2780 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
2781 cmd->tlv_header =
2782 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
2783 sizeof(*cmd));
2784 cmd->vdev_id = cpu_to_le32(vdev_id);
2785 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2786
2787 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2788 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
2789 vdev_id, mac);
2790
2791 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
2792
2793 if (ret) {
2794 ath12k_warn(ar->ab,
2795 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
2796 dev_kfree_skb(skb);
2797 }
2798
2799 return ret;
2800}
2801
2802int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
2803 struct ath12k_wmi_init_country_arg *arg)
2804{
2805 struct ath12k_wmi_pdev *wmi = ar->wmi;
2806 struct wmi_init_country_cmd *cmd;
2807 struct sk_buff *skb;
2808 int ret;
2809
2810 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2811 if (!skb)
2812 return -ENOMEM;
2813
2814 cmd = (struct wmi_init_country_cmd *)skb->data;
2815 cmd->tlv_header =
2816 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
2817 sizeof(*cmd));
2818
2819 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
2820
2821 switch (arg->flags) {
2822 case ALPHA_IS_SET:
2823 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
2824 memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
2825 break;
2826 case CC_IS_SET:
2827 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
2828 cmd->cc_info.country_code =
2829 cpu_to_le32(arg->cc_info.country_code);
2830 break;
2831 case REGDMN_IS_SET:
2832 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
2833 cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
2834 break;
2835 default:
2836 ret = -EINVAL;
2837 goto out;
2838 }
2839
2840 ret = ath12k_wmi_cmd_send(wmi, skb,
2841 WMI_SET_INIT_COUNTRY_CMDID);
2842
2843out:
2844 if (ret) {
2845 ath12k_warn(ar->ab,
2846 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
2847 ret);
2848 dev_kfree_skb(skb);
2849 }
2850
2851 return ret;
2852}
2853
2854int
2855ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
2856{
2857 struct ath12k_wmi_pdev *wmi = ar->wmi;
2858 struct ath12k_base *ab = wmi->wmi_ab->ab;
2859 struct wmi_twt_enable_params_cmd *cmd;
2860 struct sk_buff *skb;
2861 int ret, len;
2862
2863 len = sizeof(*cmd);
2864
2865 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2866 if (!skb)
2867 return -ENOMEM;
2868
2869 cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
2870 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
2871 len);
2872 cmd->pdev_id = cpu_to_le32(pdev_id);
2873 cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
2874 cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
2875 cmd->congestion_thresh_setup =
2876 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
2877 cmd->congestion_thresh_teardown =
2878 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
2879 cmd->congestion_thresh_critical =
2880 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
2881 cmd->interference_thresh_teardown =
2882 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
2883 cmd->interference_thresh_setup =
2884 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
2885 cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
2886 cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
2887 cmd->no_of_bcast_mcast_slots =
2888 cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
2889 cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
2890 cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
2891 cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
2892 cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
2893 cmd->remove_sta_slot_interval =
2894 cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
2895 /* TODO add MBSSID support */
2896 cmd->mbss_support = 0;
2897
2898 ret = ath12k_wmi_cmd_send(wmi, skb,
2899 WMI_TWT_ENABLE_CMDID);
2900 if (ret) {
2901 ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
2902 dev_kfree_skb(skb);
2903 }
2904 return ret;
2905}
2906
2907int
2908ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
2909{
2910 struct ath12k_wmi_pdev *wmi = ar->wmi;
2911 struct ath12k_base *ab = wmi->wmi_ab->ab;
2912 struct wmi_twt_disable_params_cmd *cmd;
2913 struct sk_buff *skb;
2914 int ret, len;
2915
2916 len = sizeof(*cmd);
2917
2918 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2919 if (!skb)
2920 return -ENOMEM;
2921
2922 cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
2923 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
2924 len);
2925 cmd->pdev_id = cpu_to_le32(pdev_id);
2926
2927 ret = ath12k_wmi_cmd_send(wmi, skb,
2928 WMI_TWT_DISABLE_CMDID);
2929 if (ret) {
2930 ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
2931 dev_kfree_skb(skb);
2932 }
2933 return ret;
2934}
2935
2936int
2937ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
2938 struct ieee80211_he_obss_pd *he_obss_pd)
2939{
2940 struct ath12k_wmi_pdev *wmi = ar->wmi;
2941 struct ath12k_base *ab = wmi->wmi_ab->ab;
2942 struct wmi_obss_spatial_reuse_params_cmd *cmd;
2943 struct sk_buff *skb;
2944 int ret, len;
2945
2946 len = sizeof(*cmd);
2947
2948 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2949 if (!skb)
2950 return -ENOMEM;
2951
2952 cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
2953 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
2954 len);
2955 cmd->vdev_id = cpu_to_le32(vdev_id);
2956 cmd->enable = cpu_to_le32(he_obss_pd->enable);
2957 cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
2958 cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
2959
2960 ret = ath12k_wmi_cmd_send(wmi, skb,
2961 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
2962 if (ret) {
2963 ath12k_warn(ab,
2964 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
2965 dev_kfree_skb(skb);
2966 }
2967 return ret;
2968}
2969
2970int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
2971 u8 bss_color, u32 period,
2972 bool enable)
2973{
2974 struct ath12k_wmi_pdev *wmi = ar->wmi;
2975 struct ath12k_base *ab = wmi->wmi_ab->ab;
2976 struct wmi_obss_color_collision_cfg_params_cmd *cmd;
2977 struct sk_buff *skb;
2978 int ret, len;
2979
2980 len = sizeof(*cmd);
2981
2982 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2983 if (!skb)
2984 return -ENOMEM;
2985
2986 cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
2987 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
2988 len);
2989 cmd->vdev_id = cpu_to_le32(vdev_id);
2990 cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
2991 cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
2992 cmd->current_bss_color = cpu_to_le32(bss_color);
2993 cmd->detection_period_ms = cpu_to_le32(period);
2994 cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
2995 cmd->free_slot_expiry_time_ms = 0;
2996 cmd->flags = 0;
2997
2998 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2999 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3000 cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3001 cmd->detection_period_ms, cmd->scan_period_ms);
3002
3003 ret = ath12k_wmi_cmd_send(wmi, skb,
3004 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3005 if (ret) {
3006 ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3007 dev_kfree_skb(skb);
3008 }
3009 return ret;
3010}
3011
3012int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3013 bool enable)
3014{
3015 struct ath12k_wmi_pdev *wmi = ar->wmi;
3016 struct ath12k_base *ab = wmi->wmi_ab->ab;
3017 struct wmi_bss_color_change_enable_params_cmd *cmd;
3018 struct sk_buff *skb;
3019 int ret, len;
3020
3021 len = sizeof(*cmd);
3022
3023 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3024 if (!skb)
3025 return -ENOMEM;
3026
3027 cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3028 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3029 len);
3030 cmd->vdev_id = cpu_to_le32(vdev_id);
3031 cmd->enable = enable ? cpu_to_le32(1) : 0;
3032
3033 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3034 "wmi_send_bss_color_change_enable id %d enable %d\n",
3035 cmd->vdev_id, cmd->enable);
3036
3037 ret = ath12k_wmi_cmd_send(wmi, skb,
3038 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3039 if (ret) {
3040 ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3041 dev_kfree_skb(skb);
3042 }
3043 return ret;
3044}
3045
3046int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3047 struct sk_buff *tmpl)
3048{
3049 struct wmi_tlv *tlv;
3050 struct sk_buff *skb;
3051 void *ptr;
3052 int ret, len;
3053 size_t aligned_len;
3054 struct wmi_fils_discovery_tmpl_cmd *cmd;
3055
3056 aligned_len = roundup(tmpl->len, 4);
3057 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3058
3059 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3060 "WMI vdev %i set FILS discovery template\n", vdev_id);
3061
3062 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3063 if (!skb)
3064 return -ENOMEM;
3065
3066 cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3067 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3068 sizeof(*cmd));
3069 cmd->vdev_id = cpu_to_le32(vdev_id);
3070 cmd->buf_len = cpu_to_le32(tmpl->len);
3071 ptr = skb->data + sizeof(*cmd);
3072
3073 tlv = ptr;
3074 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3075 memcpy(tlv->value, tmpl->data, tmpl->len);
3076
3077 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3078 if (ret) {
3079 ath12k_warn(ar->ab,
3080 "WMI vdev %i failed to send FILS discovery template command\n",
3081 vdev_id);
3082 dev_kfree_skb(skb);
3083 }
3084 return ret;
3085}
3086
3087int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3088 struct sk_buff *tmpl)
3089{
3090 struct wmi_probe_tmpl_cmd *cmd;
3091 struct ath12k_wmi_bcn_prb_info_params *probe_info;
3092 struct wmi_tlv *tlv;
3093 struct sk_buff *skb;
3094 void *ptr;
3095 int ret, len;
3096 size_t aligned_len = roundup(tmpl->len, 4);
3097
3098 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3099 "WMI vdev %i set probe response template\n", vdev_id);
3100
3101 len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3102
3103 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3104 if (!skb)
3105 return -ENOMEM;
3106
3107 cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3108 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3109 sizeof(*cmd));
3110 cmd->vdev_id = cpu_to_le32(vdev_id);
3111 cmd->buf_len = cpu_to_le32(tmpl->len);
3112
3113 ptr = skb->data + sizeof(*cmd);
3114
3115 probe_info = ptr;
3116 len = sizeof(*probe_info);
3117 probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3118 len);
3119 probe_info->caps = 0;
3120 probe_info->erp = 0;
3121
3122 ptr += sizeof(*probe_info);
3123
3124 tlv = ptr;
3125 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3126 memcpy(tlv->value, tmpl->data, tmpl->len);
3127
3128 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3129 if (ret) {
3130 ath12k_warn(ar->ab,
3131 "WMI vdev %i failed to send probe response template command\n",
3132 vdev_id);
3133 dev_kfree_skb(skb);
3134 }
3135 return ret;
3136}
3137
3138int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3139 bool unsol_bcast_probe_resp_enabled)
3140{
3141 struct sk_buff *skb;
3142 int ret, len;
3143 struct wmi_fils_discovery_cmd *cmd;
3144
3145 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3146 "WMI vdev %i set %s interval to %u TU\n",
3147 vdev_id, unsol_bcast_probe_resp_enabled ?
3148 "unsolicited broadcast probe response" : "FILS discovery",
3149 interval);
3150
3151 len = sizeof(*cmd);
3152 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3153 if (!skb)
3154 return -ENOMEM;
3155
3156 cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3157 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3158 len);
3159 cmd->vdev_id = cpu_to_le32(vdev_id);
3160 cmd->interval = cpu_to_le32(interval);
3161 cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3162
3163 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3164 if (ret) {
3165 ath12k_warn(ar->ab,
3166 "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3167 vdev_id);
3168 dev_kfree_skb(skb);
3169 }
3170 return ret;
3171}
3172
3173static void
3174ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
3175 struct ath12k_wmi_pdev_band_arg *arg)
3176{
3177 u8 i;
3178 struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3179 struct ath12k_pdev *pdev;
3180
3181 for (i = 0; i < soc->num_radios; i++) {
3182 pdev = &soc->pdevs[i];
3183 hal_reg_cap = &soc->hal_reg_cap[i];
3184 arg[i].pdev_id = pdev->pdev_id;
3185
3186 switch (pdev->cap.supported_bands) {
3187 case WMI_HOST_WLAN_2G_5G_CAP:
3188 arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3189 arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3190 break;
3191 case WMI_HOST_WLAN_2G_CAP:
3192 arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3193 arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3194 break;
3195 case WMI_HOST_WLAN_5G_CAP:
3196 arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3197 arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3198 break;
3199 default:
3200 break;
3201 }
3202 }
3203}
3204
3205static void
3206ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
3207 struct ath12k_wmi_resource_config_arg *tg_cfg)
3208{
3209 wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3210 wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3211 wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3212 wmi_cfg->num_offload_reorder_buffs =
3213 cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3214 wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3215 wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3216 wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3217 wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3218 wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3219 wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3220 wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3221 wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3222 wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3223 wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3224 wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3225 wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3226 wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3227 wmi_cfg->roam_offload_max_ap_profiles =
3228 cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3229 wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3230 wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3231 wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3232 wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3233 wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3234 wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3235 wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3236 wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3237 cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3238 wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3239 wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3240 wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3241 wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3242 wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3243 wmi_cfg->num_tdls_conn_table_entries =
3244 cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3245 wmi_cfg->beacon_tx_offload_max_vdev =
3246 cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3247 wmi_cfg->num_multicast_filter_entries =
3248 cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3249 wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3250 wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3251 wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3252 wmi_cfg->max_tdls_concurrent_sleep_sta =
3253 cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3254 wmi_cfg->max_tdls_concurrent_buffer_sta =
3255 cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3256 wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3257 wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3258 wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3259 wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3260 wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3261 wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3262 wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3263 wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
3264 wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3265 wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3266 wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3267 wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3268 wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3269 WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3270}
3271
3272static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3273 struct ath12k_wmi_init_cmd_arg *arg)
3274{
3275 struct ath12k_base *ab = wmi->wmi_ab->ab;
3276 struct sk_buff *skb;
3277 struct wmi_init_cmd *cmd;
3278 struct ath12k_wmi_resource_config_params *cfg;
3279 struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3280 struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3281 struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3282 struct wmi_tlv *tlv;
3283 size_t ret, len;
3284 void *ptr;
3285 u32 hw_mode_len = 0;
3286 u16 idx;
3287
3288 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3289 hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3290 (arg->num_band_to_mac * sizeof(*band_to_mac));
3291
3292 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3293 (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3294
3295 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3296 if (!skb)
3297 return -ENOMEM;
3298
3299 cmd = (struct wmi_init_cmd *)skb->data;
3300
3301 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3302 sizeof(*cmd));
3303
3304 ptr = skb->data + sizeof(*cmd);
3305 cfg = ptr;
3306
3307 ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
3308
3309 cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3310 sizeof(*cfg));
3311
3312 ptr += sizeof(*cfg);
3313 host_mem_chunks = ptr + TLV_HDR_SIZE;
3314 len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3315
3316 for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3317 host_mem_chunks[idx].tlv_header =
3318 ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3319 len);
3320
3321 host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3322 host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3323 host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3324
3325 ath12k_dbg(ab, ATH12K_DBG_WMI,
3326 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3327 arg->mem_chunks[idx].req_id,
3328 (u64)arg->mem_chunks[idx].paddr,
3329 arg->mem_chunks[idx].len);
3330 }
3331 cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3332 len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3333
3334 /* num_mem_chunks is zero */
3335 tlv = ptr;
3336 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3337 ptr += TLV_HDR_SIZE + len;
3338
3339 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3340 hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3341 hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3342 sizeof(*hw_mode));
3343
3344 hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3345 hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3346
3347 ptr += sizeof(*hw_mode);
3348
3349 len = arg->num_band_to_mac * sizeof(*band_to_mac);
3350 tlv = ptr;
3351 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3352
3353 ptr += TLV_HDR_SIZE;
3354 len = sizeof(*band_to_mac);
3355
3356 for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3357 band_to_mac = (void *)ptr;
3358
3359 band_to_mac->tlv_header =
3360 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3361 len);
3362 band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3363 band_to_mac->start_freq =
3364 cpu_to_le32(arg->band_to_mac[idx].start_freq);
3365 band_to_mac->end_freq =
3366 cpu_to_le32(arg->band_to_mac[idx].end_freq);
3367 ptr += sizeof(*band_to_mac);
3368 }
3369 }
3370
3371 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3372 if (ret) {
3373 ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3374 dev_kfree_skb(skb);
3375 }
3376
3377 return ret;
3378}
3379
3380int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
3381 int pdev_id)
3382{
3383 struct ath12k_wmi_pdev_lro_config_cmd *cmd;
3384 struct sk_buff *skb;
3385 int ret;
3386
3387 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3388 if (!skb)
3389 return -ENOMEM;
3390
3391 cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
3392 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
3393 sizeof(*cmd));
3394
3395 get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
3396 get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
3397
3398 cmd->pdev_id = cpu_to_le32(pdev_id);
3399
3400 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3401 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
3402
3403 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
3404 if (ret) {
3405 ath12k_warn(ar->ab,
3406 "failed to send lro cfg req wmi cmd\n");
3407 goto err;
3408 }
3409
3410 return 0;
3411err:
3412 dev_kfree_skb(skb);
3413 return ret;
3414}
3415
3416int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
3417{
3418 unsigned long time_left;
3419
3420 time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
3421 WMI_SERVICE_READY_TIMEOUT_HZ);
3422 if (!time_left)
3423 return -ETIMEDOUT;
3424
3425 return 0;
3426}
3427
3428int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
3429{
3430 unsigned long time_left;
3431
3432 time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
3433 WMI_SERVICE_READY_TIMEOUT_HZ);
3434 if (!time_left)
3435 return -ETIMEDOUT;
3436
3437 return 0;
3438}
3439
3440int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
3441 enum wmi_host_hw_mode_config_type mode)
3442{
3443 struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
3444 struct sk_buff *skb;
3445 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3446 int len;
3447 int ret;
3448
3449 len = sizeof(*cmd);
3450
3451 skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3452 if (!skb)
3453 return -ENOMEM;
3454
3455 cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
3456
3457 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3458 sizeof(*cmd));
3459
3460 cmd->pdev_id = WMI_PDEV_ID_SOC;
3461 cmd->hw_mode_index = cpu_to_le32(mode);
3462
3463 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
3464 if (ret) {
3465 ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
3466 dev_kfree_skb(skb);
3467 }
3468
3469 return ret;
3470}
3471
3472int ath12k_wmi_cmd_init(struct ath12k_base *ab)
3473{
3474 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3475 struct ath12k_wmi_init_cmd_arg arg = {};
3476
3477 if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
3478 ab->wmi_ab.svc_map))
3479 arg.res_cfg.is_reg_cc_ext_event_supported = true;
3480
3481 ab->hw_params->wmi_init(ab, &arg.res_cfg);
3482
3483 arg.num_mem_chunks = wmi_ab->num_mem_chunks;
3484 arg.hw_mode_id = wmi_ab->preferred_hw_mode;
3485 arg.mem_chunks = wmi_ab->mem_chunks;
3486
3487 if (ab->hw_params->single_pdev_only)
3488 arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
3489
3490 arg.num_band_to_mac = ab->num_radios;
3491 ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
3492
3493 return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
3494}
3495
3496int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
3497 struct ath12k_wmi_vdev_spectral_conf_arg *arg)
3498{
3499 struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
3500 struct sk_buff *skb;
3501 int ret;
3502
3503 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3504 if (!skb)
3505 return -ENOMEM;
3506
3507 cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
3508 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
3509 sizeof(*cmd));
3510 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3511 cmd->scan_count = cpu_to_le32(arg->scan_count);
3512 cmd->scan_period = cpu_to_le32(arg->scan_period);
3513 cmd->scan_priority = cpu_to_le32(arg->scan_priority);
3514 cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
3515 cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
3516 cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
3517 cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
3518 cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
3519 cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
3520 cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
3521 cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
3522 cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
3523 cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
3524 cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
3525 cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
3526 cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
3527 cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
3528 cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
3529
3530 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3531 "WMI spectral scan config cmd vdev_id 0x%x\n",
3532 arg->vdev_id);
3533
3534 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3535 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
3536 if (ret) {
3537 ath12k_warn(ar->ab,
3538 "failed to send spectral scan config wmi cmd\n");
3539 goto err;
3540 }
3541
3542 return 0;
3543err:
3544 dev_kfree_skb(skb);
3545 return ret;
3546}
3547
3548int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
3549 u32 trigger, u32 enable)
3550{
3551 struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
3552 struct sk_buff *skb;
3553 int ret;
3554
3555 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3556 if (!skb)
3557 return -ENOMEM;
3558
3559 cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
3560 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
3561 sizeof(*cmd));
3562
3563 cmd->vdev_id = cpu_to_le32(vdev_id);
3564 cmd->trigger_cmd = cpu_to_le32(trigger);
3565 cmd->enable_cmd = cpu_to_le32(enable);
3566
3567 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3568 "WMI spectral enable cmd vdev id 0x%x\n",
3569 vdev_id);
3570
3571 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3572 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
3573 if (ret) {
3574 ath12k_warn(ar->ab,
3575 "failed to send spectral enable wmi cmd\n");
3576 goto err;
3577 }
3578
3579 return 0;
3580err:
3581 dev_kfree_skb(skb);
3582 return ret;
3583}
3584
3585int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
3586 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
3587{
3588 struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
3589 struct sk_buff *skb;
3590 int ret;
3591
3592 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3593 if (!skb)
3594 return -ENOMEM;
3595
3596 cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
3597 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
3598 sizeof(*cmd));
3599
3600 cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
3601 cmd->module_id = cpu_to_le32(arg->module_id);
3602 cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
3603 cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
3604 cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
3605 cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
3606 cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
3607 cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
3608 cmd->num_elems = cpu_to_le32(arg->num_elems);
3609 cmd->buf_size = cpu_to_le32(arg->buf_size);
3610 cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
3611 cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
3612
3613 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3614 "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
3615 arg->pdev_id);
3616
3617 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3618 WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
3619 if (ret) {
3620 ath12k_warn(ar->ab,
3621 "failed to send dma ring cfg req wmi cmd\n");
3622 goto err;
3623 }
3624
3625 return 0;
3626err:
3627 dev_kfree_skb(skb);
3628 return ret;
3629}
3630
3631static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
3632 u16 tag, u16 len,
3633 const void *ptr, void *data)
3634{
3635 struct ath12k_wmi_dma_buf_release_arg *arg = data;
3636
3637 if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
3638 return -EPROTO;
3639
3640 if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
3641 return -ENOBUFS;
3642
3643 arg->num_buf_entry++;
3644 return 0;
3645}
3646
3647static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
3648 u16 tag, u16 len,
3649 const void *ptr, void *data)
3650{
3651 struct ath12k_wmi_dma_buf_release_arg *arg = data;
3652
3653 if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
3654 return -EPROTO;
3655
3656 if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
3657 return -ENOBUFS;
3658
3659 arg->num_meta++;
3660
3661 return 0;
3662}
3663
3664static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
3665 u16 tag, u16 len,
3666 const void *ptr, void *data)
3667{
3668 struct ath12k_wmi_dma_buf_release_arg *arg = data;
3669 const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
3670 u32 pdev_id;
3671 int ret;
3672
3673 switch (tag) {
3674 case WMI_TAG_DMA_BUF_RELEASE:
3675 fixed = ptr;
3676 arg->fixed = *fixed;
3677 pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
3678 arg->fixed.pdev_id = cpu_to_le32(pdev_id);
3679 break;
3680 case WMI_TAG_ARRAY_STRUCT:
3681 if (!arg->buf_entry_done) {
3682 arg->num_buf_entry = 0;
3683 arg->buf_entry = ptr;
3684
3685 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3686 ath12k_wmi_dma_buf_entry_parse,
3687 arg);
3688 if (ret) {
3689 ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
3690 ret);
3691 return ret;
3692 }
3693
3694 arg->buf_entry_done = true;
3695 } else if (!arg->meta_data_done) {
3696 arg->num_meta = 0;
3697 arg->meta_data = ptr;
3698
3699 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3700 ath12k_wmi_dma_buf_meta_parse,
3701 arg);
3702 if (ret) {
3703 ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
3704 ret);
3705 return ret;
3706 }
3707
3708 arg->meta_data_done = true;
3709 }
3710 break;
3711 default:
3712 break;
3713 }
3714 return 0;
3715}
3716
3717static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
3718 struct sk_buff *skb)
3719{
3720 struct ath12k_wmi_dma_buf_release_arg arg = {};
3721 struct ath12k_dbring_buf_release_event param;
3722 int ret;
3723
3724 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
3725 ath12k_wmi_dma_buf_parse,
3726 &arg);
3727 if (ret) {
3728 ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
3729 return;
3730 }
3731
3732 param.fixed = arg.fixed;
3733 param.buf_entry = arg.buf_entry;
3734 param.num_buf_entry = arg.num_buf_entry;
3735 param.meta_data = arg.meta_data;
3736 param.num_meta = arg.num_meta;
3737
3738 ret = ath12k_dbring_buffer_release_event(ab, ¶m);
3739 if (ret) {
3740 ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
3741 return;
3742 }
3743}
3744
3745static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
3746 u16 tag, u16 len,
3747 const void *ptr, void *data)
3748{
3749 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3750 struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
3751 u32 phy_map = 0;
3752
3753 if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
3754 return -EPROTO;
3755
3756 if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
3757 return -ENOBUFS;
3758
3759 hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
3760 hw_mode_id);
3761 svc_rdy_ext->n_hw_mode_caps++;
3762
3763 phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
3764 svc_rdy_ext->tot_phy_id += fls(phy_map);
3765
3766 return 0;
3767}
3768
3769static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
3770 u16 len, const void *ptr, void *data)
3771{
3772 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3773 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
3774 enum wmi_host_hw_mode_config_type mode, pref;
3775 u32 i;
3776 int ret;
3777
3778 svc_rdy_ext->n_hw_mode_caps = 0;
3779 svc_rdy_ext->hw_mode_caps = ptr;
3780
3781 ret = ath12k_wmi_tlv_iter(soc, ptr, len,
3782 ath12k_wmi_hw_mode_caps_parse,
3783 svc_rdy_ext);
3784 if (ret) {
3785 ath12k_warn(soc, "failed to parse tlv %d\n", ret);
3786 return ret;
3787 }
3788
3789 for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
3790 hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
3791 mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
3792
3793 if (mode >= WMI_HOST_HW_MODE_MAX)
3794 continue;
3795
3796 pref = soc->wmi_ab.preferred_hw_mode;
3797
3798 if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
3799 svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
3800 soc->wmi_ab.preferred_hw_mode = mode;
3801 }
3802 }
3803
3804 ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
3805 soc->wmi_ab.preferred_hw_mode);
3806 if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
3807 return -EINVAL;
3808
3809 return 0;
3810}
3811
3812static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
3813 u16 tag, u16 len,
3814 const void *ptr, void *data)
3815{
3816 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3817
3818 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
3819 return -EPROTO;
3820
3821 if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
3822 return -ENOBUFS;
3823
3824 len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
3825 if (!svc_rdy_ext->n_mac_phy_caps) {
3826 svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
3827 GFP_ATOMIC);
3828 if (!svc_rdy_ext->mac_phy_caps)
3829 return -ENOMEM;
3830 }
3831
3832 memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
3833 svc_rdy_ext->n_mac_phy_caps++;
3834 return 0;
3835}
3836
3837static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
3838 u16 tag, u16 len,
3839 const void *ptr, void *data)
3840{
3841 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3842
3843 if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
3844 return -EPROTO;
3845
3846 if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
3847 return -ENOBUFS;
3848
3849 svc_rdy_ext->n_ext_hal_reg_caps++;
3850 return 0;
3851}
3852
3853static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
3854 u16 len, const void *ptr, void *data)
3855{
3856 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
3857 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3858 struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
3859 int ret;
3860 u32 i;
3861
3862 svc_rdy_ext->n_ext_hal_reg_caps = 0;
3863 svc_rdy_ext->ext_hal_reg_caps = ptr;
3864 ret = ath12k_wmi_tlv_iter(soc, ptr, len,
3865 ath12k_wmi_ext_hal_reg_caps_parse,
3866 svc_rdy_ext);
3867 if (ret) {
3868 ath12k_warn(soc, "failed to parse tlv %d\n", ret);
3869 return ret;
3870 }
3871
3872 for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
3873 ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
3874 svc_rdy_ext->soc_hal_reg_caps,
3875 svc_rdy_ext->ext_hal_reg_caps, i,
3876 ®_cap);
3877 if (ret) {
3878 ath12k_warn(soc, "failed to extract reg cap %d\n", i);
3879 return ret;
3880 }
3881
3882 if (reg_cap.phy_id >= MAX_RADIOS) {
3883 ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
3884 return -EINVAL;
3885 }
3886
3887 soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
3888 }
3889 return 0;
3890}
3891
3892static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
3893 u16 len, const void *ptr,
3894 void *data)
3895{
3896 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
3897 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3898 u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
3899 u32 phy_id_map;
3900 int pdev_index = 0;
3901 int ret;
3902
3903 svc_rdy_ext->soc_hal_reg_caps = ptr;
3904 svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
3905
3906 soc->num_radios = 0;
3907 phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
3908 soc->fw_pdev_count = 0;
3909
3910 while (phy_id_map && soc->num_radios < MAX_RADIOS) {
3911 ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
3912 svc_rdy_ext,
3913 hw_mode_id, soc->num_radios,
3914 &soc->pdevs[pdev_index]);
3915 if (ret) {
3916 ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
3917 soc->num_radios);
3918 return ret;
3919 }
3920
3921 soc->num_radios++;
3922
3923 /* For single_pdev_only targets,
3924 * save mac_phy capability in the same pdev
3925 */
3926 if (soc->hw_params->single_pdev_only)
3927 pdev_index = 0;
3928 else
3929 pdev_index = soc->num_radios;
3930
3931 /* TODO: mac_phy_cap prints */
3932 phy_id_map >>= 1;
3933 }
3934
3935 if (soc->hw_params->single_pdev_only) {
3936 soc->num_radios = 1;
3937 soc->pdevs[0].pdev_id = 0;
3938 }
3939
3940 return 0;
3941}
3942
3943static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
3944 u16 tag, u16 len,
3945 const void *ptr, void *data)
3946{
3947 struct ath12k_wmi_dma_ring_caps_parse *parse = data;
3948
3949 if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
3950 return -EPROTO;
3951
3952 parse->n_dma_ring_caps++;
3953 return 0;
3954}
3955
3956static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
3957 u32 num_cap)
3958{
3959 size_t sz;
3960 void *ptr;
3961
3962 sz = num_cap * sizeof(struct ath12k_dbring_cap);
3963 ptr = kzalloc(sz, GFP_ATOMIC);
3964 if (!ptr)
3965 return -ENOMEM;
3966
3967 ab->db_caps = ptr;
3968 ab->num_db_cap = num_cap;
3969
3970 return 0;
3971}
3972
3973static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
3974{
3975 kfree(ab->db_caps);
3976 ab->db_caps = NULL;
3977}
3978
3979static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
3980 u16 len, const void *ptr, void *data)
3981{
3982 struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
3983 struct ath12k_wmi_dma_ring_caps_params *dma_caps;
3984 struct ath12k_dbring_cap *dir_buff_caps;
3985 int ret;
3986 u32 i;
3987
3988 dma_caps_parse->n_dma_ring_caps = 0;
3989 dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
3990 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3991 ath12k_wmi_dma_ring_caps_parse,
3992 dma_caps_parse);
3993 if (ret) {
3994 ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
3995 return ret;
3996 }
3997
3998 if (!dma_caps_parse->n_dma_ring_caps)
3999 return 0;
4000
4001 if (ab->num_db_cap) {
4002 ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4003 return 0;
4004 }
4005
4006 ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4007 if (ret)
4008 return ret;
4009
4010 dir_buff_caps = ab->db_caps;
4011 for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4012 if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4013 ath12k_warn(ab, "Invalid module id %d\n",
4014 le32_to_cpu(dma_caps[i].module_id));
4015 ret = -EINVAL;
4016 goto free_dir_buff;
4017 }
4018
4019 dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4020 dir_buff_caps[i].pdev_id =
4021 DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4022 dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4023 dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4024 dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4025 }
4026
4027 return 0;
4028
4029free_dir_buff:
4030 ath12k_wmi_free_dbring_caps(ab);
4031 return ret;
4032}
4033
4034static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4035 u16 tag, u16 len,
4036 const void *ptr, void *data)
4037{
4038 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4039 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4040 int ret;
4041
4042 switch (tag) {
4043 case WMI_TAG_SERVICE_READY_EXT_EVENT:
4044 ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4045 &svc_rdy_ext->arg);
4046 if (ret) {
4047 ath12k_warn(ab, "unable to extract ext params\n");
4048 return ret;
4049 }
4050 break;
4051
4052 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4053 svc_rdy_ext->hw_caps = ptr;
4054 svc_rdy_ext->arg.num_hw_modes =
4055 le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4056 break;
4057
4058 case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4059 ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4060 svc_rdy_ext);
4061 if (ret)
4062 return ret;
4063 break;
4064
4065 case WMI_TAG_ARRAY_STRUCT:
4066 if (!svc_rdy_ext->hw_mode_done) {
4067 ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4068 if (ret)
4069 return ret;
4070
4071 svc_rdy_ext->hw_mode_done = true;
4072 } else if (!svc_rdy_ext->mac_phy_done) {
4073 svc_rdy_ext->n_mac_phy_caps = 0;
4074 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4075 ath12k_wmi_mac_phy_caps_parse,
4076 svc_rdy_ext);
4077 if (ret) {
4078 ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4079 return ret;
4080 }
4081
4082 svc_rdy_ext->mac_phy_done = true;
4083 } else if (!svc_rdy_ext->ext_hal_reg_done) {
4084 ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4085 if (ret)
4086 return ret;
4087
4088 svc_rdy_ext->ext_hal_reg_done = true;
4089 } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4090 svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4091 } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4092 svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4093 } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4094 svc_rdy_ext->oem_dma_ring_cap_done = true;
4095 } else if (!svc_rdy_ext->dma_ring_cap_done) {
4096 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4097 &svc_rdy_ext->dma_caps_parse);
4098 if (ret)
4099 return ret;
4100
4101 svc_rdy_ext->dma_ring_cap_done = true;
4102 }
4103 break;
4104
4105 default:
4106 break;
4107 }
4108 return 0;
4109}
4110
4111static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4112 struct sk_buff *skb)
4113{
4114 struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4115 int ret;
4116
4117 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4118 ath12k_wmi_svc_rdy_ext_parse,
4119 &svc_rdy_ext);
4120 if (ret) {
4121 ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4122 goto err;
4123 }
4124
4125 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4126 complete(&ab->wmi_ab.service_ready);
4127
4128 kfree(svc_rdy_ext.mac_phy_caps);
4129 return 0;
4130
4131err:
4132 ath12k_wmi_free_dbring_caps(ab);
4133 return ret;
4134}
4135
4136static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4137 const void *ptr,
4138 struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4139{
4140 const struct wmi_service_ready_ext2_event *ev = ptr;
4141
4142 if (!ev)
4143 return -EINVAL;
4144
4145 arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4146 arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4147 arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4148 arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4149 arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4150 arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4151 arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4152 arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4153 return 0;
4154}
4155
4156static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4157 const __le32 cap_mac_info[],
4158 const __le32 cap_phy_info[],
4159 const __le32 supp_mcs[],
4160 const struct ath12k_wmi_ppe_threshold_params *ppet,
4161 __le32 cap_info_internal)
4162{
4163 struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4164 u32 support_320mhz;
4165 u8 i;
4166
4167 if (band == NL80211_BAND_6GHZ)
4168 support_320mhz = cap_band->eht_cap_phy_info[0] &
4169 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4170
4171 for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4172 cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4173
4174 for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4175 cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4176
4177 if (band == NL80211_BAND_6GHZ)
4178 cap_band->eht_cap_phy_info[0] |= support_320mhz;
4179
4180 cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4181 cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4182 if (band != NL80211_BAND_2GHZ) {
4183 cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4184 cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4185 }
4186
4187 cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4188 cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4189 for (i = 0; i < WMI_MAX_NUM_SS; i++)
4190 cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4191 le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4192
4193 cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4194}
4195
4196static int
4197ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4198 const struct ath12k_wmi_caps_ext_params *caps,
4199 struct ath12k_pdev *pdev)
4200{
4201 struct ath12k_band_cap *cap_band;
4202 u32 bands, support_320mhz;
4203 int i;
4204
4205 if (ab->hw_params->single_pdev_only) {
4206 if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4207 support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4208 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4209 cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4210 cap_band->eht_cap_phy_info[0] |= support_320mhz;
4211 return 0;
4212 }
4213
4214 for (i = 0; i < ab->fw_pdev_count; i++) {
4215 struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4216
4217 if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
4218 fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4219 bands = fw_pdev->supported_bands;
4220 break;
4221 }
4222 }
4223
4224 if (i == ab->fw_pdev_count)
4225 return -EINVAL;
4226 } else {
4227 bands = pdev->cap.supported_bands;
4228 }
4229
4230 if (bands & WMI_HOST_WLAN_2G_CAP) {
4231 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4232 caps->eht_cap_mac_info_2ghz,
4233 caps->eht_cap_phy_info_2ghz,
4234 caps->eht_supp_mcs_ext_2ghz,
4235 &caps->eht_ppet_2ghz,
4236 caps->eht_cap_info_internal);
4237 }
4238
4239 if (bands & WMI_HOST_WLAN_5G_CAP) {
4240 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4241 caps->eht_cap_mac_info_5ghz,
4242 caps->eht_cap_phy_info_5ghz,
4243 caps->eht_supp_mcs_ext_5ghz,
4244 &caps->eht_ppet_5ghz,
4245 caps->eht_cap_info_internal);
4246
4247 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4248 caps->eht_cap_mac_info_5ghz,
4249 caps->eht_cap_phy_info_5ghz,
4250 caps->eht_supp_mcs_ext_5ghz,
4251 &caps->eht_ppet_5ghz,
4252 caps->eht_cap_info_internal);
4253 }
4254
4255 return 0;
4256}
4257
4258static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4259 u16 len, const void *ptr,
4260 void *data)
4261{
4262 const struct ath12k_wmi_caps_ext_params *caps = ptr;
4263 int i = 0, ret;
4264
4265 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4266 return -EPROTO;
4267
4268 if (ab->hw_params->single_pdev_only) {
4269 if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4270 caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4271 return 0;
4272 } else {
4273 for (i = 0; i < ab->num_radios; i++) {
4274 if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
4275 break;
4276 }
4277
4278 if (i == ab->num_radios)
4279 return -EINVAL;
4280 }
4281
4282 ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4283 if (ret) {
4284 ath12k_warn(ab,
4285 "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4286 ret, ab->pdevs[i].pdev_id);
4287 return ret;
4288 }
4289
4290 return 0;
4291}
4292
4293static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4294 u16 tag, u16 len,
4295 const void *ptr, void *data)
4296{
4297 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4298 struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4299 int ret;
4300
4301 switch (tag) {
4302 case WMI_TAG_SERVICE_READY_EXT2_EVENT:
4303 ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
4304 &parse->arg);
4305 if (ret) {
4306 ath12k_warn(ab,
4307 "failed to extract wmi service ready ext2 parameters: %d\n",
4308 ret);
4309 return ret;
4310 }
4311 break;
4312
4313 case WMI_TAG_ARRAY_STRUCT:
4314 if (!parse->dma_ring_cap_done) {
4315 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4316 &parse->dma_caps_parse);
4317 if (ret)
4318 return ret;
4319
4320 parse->dma_ring_cap_done = true;
4321 } else if (!parse->spectral_bin_scaling_done) {
4322 /* TODO: This is a place-holder as WMI tag for
4323 * spectral scaling is before
4324 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
4325 */
4326 parse->spectral_bin_scaling_done = true;
4327 } else if (!parse->mac_phy_caps_ext_done) {
4328 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4329 ath12k_wmi_tlv_mac_phy_caps_ext,
4330 parse);
4331 if (ret) {
4332 ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
4333 ret);
4334 return ret;
4335 }
4336
4337 parse->mac_phy_caps_ext_done = true;
4338 }
4339 break;
4340 default:
4341 break;
4342 }
4343
4344 return 0;
4345}
4346
4347static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4348 struct sk_buff *skb)
4349{
4350 struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4351 int ret;
4352
4353 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4354 ath12k_wmi_svc_rdy_ext2_parse,
4355 &svc_rdy_ext2);
4356 if (ret) {
4357 ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4358 goto err;
4359 }
4360
4361 complete(&ab->wmi_ab.service_ready);
4362
4363 return 0;
4364
4365err:
4366 ath12k_wmi_free_dbring_caps(ab);
4367 return ret;
4368}
4369
4370static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4371 struct wmi_vdev_start_resp_event *vdev_rsp)
4372{
4373 const void **tb;
4374 const struct wmi_vdev_start_resp_event *ev;
4375 int ret;
4376
4377 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4378 if (IS_ERR(tb)) {
4379 ret = PTR_ERR(tb);
4380 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4381 return ret;
4382 }
4383
4384 ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4385 if (!ev) {
4386 ath12k_warn(ab, "failed to fetch vdev start resp ev");
4387 kfree(tb);
4388 return -EPROTO;
4389 }
4390
4391 *vdev_rsp = *ev;
4392
4393 kfree(tb);
4394 return 0;
4395}
4396
4397static struct ath12k_reg_rule
4398*create_ext_reg_rules_from_wmi(u32 num_reg_rules,
4399 struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
4400{
4401 struct ath12k_reg_rule *reg_rule_ptr;
4402 u32 count;
4403
4404 reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
4405 GFP_ATOMIC);
4406
4407 if (!reg_rule_ptr)
4408 return NULL;
4409
4410 for (count = 0; count < num_reg_rules; count++) {
4411 reg_rule_ptr[count].start_freq =
4412 le32_get_bits(wmi_reg_rule[count].freq_info,
4413 REG_RULE_START_FREQ);
4414 reg_rule_ptr[count].end_freq =
4415 le32_get_bits(wmi_reg_rule[count].freq_info,
4416 REG_RULE_END_FREQ);
4417 reg_rule_ptr[count].max_bw =
4418 le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4419 REG_RULE_MAX_BW);
4420 reg_rule_ptr[count].reg_power =
4421 le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4422 REG_RULE_REG_PWR);
4423 reg_rule_ptr[count].ant_gain =
4424 le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4425 REG_RULE_ANT_GAIN);
4426 reg_rule_ptr[count].flags =
4427 le32_get_bits(wmi_reg_rule[count].flag_info,
4428 REG_RULE_FLAGS);
4429 reg_rule_ptr[count].psd_flag =
4430 le32_get_bits(wmi_reg_rule[count].psd_power_info,
4431 REG_RULE_PSD_INFO);
4432 reg_rule_ptr[count].psd_eirp =
4433 le32_get_bits(wmi_reg_rule[count].psd_power_info,
4434 REG_RULE_PSD_EIRP);
4435 }
4436
4437 return reg_rule_ptr;
4438}
4439
4440static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
4441 struct sk_buff *skb,
4442 struct ath12k_reg_info *reg_info)
4443{
4444 const void **tb;
4445 const struct wmi_reg_chan_list_cc_ext_event *ev;
4446 struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
4447 u32 num_2g_reg_rules, num_5g_reg_rules;
4448 u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
4449 u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
4450 u32 total_reg_rules = 0;
4451 int ret, i, j;
4452
4453 ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
4454
4455 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4456 if (IS_ERR(tb)) {
4457 ret = PTR_ERR(tb);
4458 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4459 return ret;
4460 }
4461
4462 ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
4463 if (!ev) {
4464 ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
4465 kfree(tb);
4466 return -EPROTO;
4467 }
4468
4469 reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
4470 reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
4471 reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
4472 le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
4473 reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
4474 le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
4475 reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
4476 le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
4477
4478 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4479 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4480 le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
4481 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4482 le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
4483 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4484 le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
4485 }
4486
4487 num_2g_reg_rules = reg_info->num_2g_reg_rules;
4488 total_reg_rules += num_2g_reg_rules;
4489 num_5g_reg_rules = reg_info->num_5g_reg_rules;
4490 total_reg_rules += num_5g_reg_rules;
4491
4492 if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
4493 ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
4494 num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
4495 kfree(tb);
4496 return -EINVAL;
4497 }
4498
4499 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4500 num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
4501
4502 if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
4503 ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
4504 i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
4505 kfree(tb);
4506 return -EINVAL;
4507 }
4508
4509 total_reg_rules += num_6g_reg_rules_ap[i];
4510 }
4511
4512 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4513 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4514 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4515 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4516
4517 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4518 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4519 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4520
4521 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4522 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4523 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4524
4525 if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
4526 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
4527 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) {
4528 ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
4529 i);
4530 kfree(tb);
4531 return -EINVAL;
4532 }
4533 }
4534
4535 if (!total_reg_rules) {
4536 ath12k_warn(ab, "No reg rules available\n");
4537 kfree(tb);
4538 return -EINVAL;
4539 }
4540
4541 memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
4542
4543 /* FIXME: Currently FW includes 6G reg rule also in 5G rule
4544 * list for country US.
4545 * Having same 6G reg rule in 5G and 6G rules list causes
4546 * intersect check to be true, and same rules will be shown
4547 * multiple times in iw cmd. So added hack below to avoid
4548 * parsing 6G rule from 5G reg rule list, and this can be
4549 * removed later, after FW updates to remove 6G reg rule
4550 * from 5G rules list.
4551 */
4552 if (memcmp(reg_info->alpha2, "US", 2) == 0) {
4553 reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
4554 num_5g_reg_rules = reg_info->num_5g_reg_rules;
4555 }
4556
4557 reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
4558 reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
4559 reg_info->num_phy = le32_to_cpu(ev->num_phy);
4560 reg_info->phy_id = le32_to_cpu(ev->phy_id);
4561 reg_info->ctry_code = le32_to_cpu(ev->country_id);
4562 reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
4563
4564 switch (le32_to_cpu(ev->status_code)) {
4565 case WMI_REG_SET_CC_STATUS_PASS:
4566 reg_info->status_code = REG_SET_CC_STATUS_PASS;
4567 break;
4568 case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
4569 reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
4570 break;
4571 case WMI_REG_INIT_ALPHA2_NOT_FOUND:
4572 reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
4573 break;
4574 case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
4575 reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
4576 break;
4577 case WMI_REG_SET_CC_STATUS_NO_MEMORY:
4578 reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
4579 break;
4580 case WMI_REG_SET_CC_STATUS_FAIL:
4581 reg_info->status_code = REG_SET_CC_STATUS_FAIL;
4582 break;
4583 }
4584
4585 reg_info->is_ext_reg_event = true;
4586
4587 reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
4588 reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
4589 reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
4590 reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
4591 reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
4592 reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
4593 reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
4594 reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
4595 reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
4596 reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
4597
4598 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4599 reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4600 le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
4601 reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4602 le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
4603 reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4604 le32_to_cpu(ev->min_bw_6g_client_sp[i]);
4605 reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4606 le32_to_cpu(ev->max_bw_6g_client_sp[i]);
4607 reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
4608 le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
4609 reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
4610 le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
4611 }
4612
4613 ath12k_dbg(ab, ATH12K_DBG_WMI,
4614 "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
4615 __func__, reg_info->alpha2, reg_info->dfs_region,
4616 reg_info->min_bw_2g, reg_info->max_bw_2g,
4617 reg_info->min_bw_5g, reg_info->max_bw_5g,
4618 reg_info->phybitmap);
4619
4620 ath12k_dbg(ab, ATH12K_DBG_WMI,
4621 "num_2g_reg_rules %d num_5g_reg_rules %d",
4622 num_2g_reg_rules, num_5g_reg_rules);
4623
4624 ath12k_dbg(ab, ATH12K_DBG_WMI,
4625 "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
4626 num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
4627 num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
4628 num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
4629
4630 ath12k_dbg(ab, ATH12K_DBG_WMI,
4631 "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4632 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
4633 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
4634 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
4635
4636 ath12k_dbg(ab, ATH12K_DBG_WMI,
4637 "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4638 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
4639 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
4640 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
4641
4642 ext_wmi_reg_rule =
4643 (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
4644 + sizeof(*ev)
4645 + sizeof(struct wmi_tlv));
4646
4647 if (num_2g_reg_rules) {
4648 reg_info->reg_rules_2g_ptr =
4649 create_ext_reg_rules_from_wmi(num_2g_reg_rules,
4650 ext_wmi_reg_rule);
4651
4652 if (!reg_info->reg_rules_2g_ptr) {
4653 kfree(tb);
4654 ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
4655 return -ENOMEM;
4656 }
4657 }
4658
4659 if (num_5g_reg_rules) {
4660 ext_wmi_reg_rule += num_2g_reg_rules;
4661 reg_info->reg_rules_5g_ptr =
4662 create_ext_reg_rules_from_wmi(num_5g_reg_rules,
4663 ext_wmi_reg_rule);
4664
4665 if (!reg_info->reg_rules_5g_ptr) {
4666 kfree(tb);
4667 ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
4668 return -ENOMEM;
4669 }
4670 }
4671
4672 ext_wmi_reg_rule += num_5g_reg_rules;
4673
4674 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4675 reg_info->reg_rules_6g_ap_ptr[i] =
4676 create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
4677 ext_wmi_reg_rule);
4678
4679 if (!reg_info->reg_rules_6g_ap_ptr[i]) {
4680 kfree(tb);
4681 ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
4682 return -ENOMEM;
4683 }
4684
4685 ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
4686 }
4687
4688 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
4689 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4690 reg_info->reg_rules_6g_client_ptr[j][i] =
4691 create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
4692 ext_wmi_reg_rule);
4693
4694 if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
4695 kfree(tb);
4696 ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
4697 return -ENOMEM;
4698 }
4699
4700 ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
4701 }
4702 }
4703
4704 reg_info->client_type = le32_to_cpu(ev->client_type);
4705 reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
4706 reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
4707 reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
4708 le32_to_cpu(ev->domain_code_6g_ap_lpi);
4709 reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
4710 le32_to_cpu(ev->domain_code_6g_ap_sp);
4711 reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
4712 le32_to_cpu(ev->domain_code_6g_ap_vlp);
4713
4714 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4715 reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
4716 le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
4717 reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
4718 le32_to_cpu(ev->domain_code_6g_client_sp[i]);
4719 reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
4720 le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
4721 }
4722
4723 reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
4724
4725 ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
4726 reg_info->client_type, reg_info->domain_code_6g_super_id);
4727
4728 ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
4729
4730 kfree(tb);
4731 return 0;
4732}
4733
4734static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
4735 struct wmi_peer_delete_resp_event *peer_del_resp)
4736{
4737 const void **tb;
4738 const struct wmi_peer_delete_resp_event *ev;
4739 int ret;
4740
4741 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4742 if (IS_ERR(tb)) {
4743 ret = PTR_ERR(tb);
4744 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4745 return ret;
4746 }
4747
4748 ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
4749 if (!ev) {
4750 ath12k_warn(ab, "failed to fetch peer delete resp ev");
4751 kfree(tb);
4752 return -EPROTO;
4753 }
4754
4755 memset(peer_del_resp, 0, sizeof(*peer_del_resp));
4756
4757 peer_del_resp->vdev_id = ev->vdev_id;
4758 ether_addr_copy(peer_del_resp->peer_macaddr.addr,
4759 ev->peer_macaddr.addr);
4760
4761 kfree(tb);
4762 return 0;
4763}
4764
4765static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
4766 struct sk_buff *skb,
4767 u32 *vdev_id)
4768{
4769 const void **tb;
4770 const struct wmi_vdev_delete_resp_event *ev;
4771 int ret;
4772
4773 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4774 if (IS_ERR(tb)) {
4775 ret = PTR_ERR(tb);
4776 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4777 return ret;
4778 }
4779
4780 ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
4781 if (!ev) {
4782 ath12k_warn(ab, "failed to fetch vdev delete resp ev");
4783 kfree(tb);
4784 return -EPROTO;
4785 }
4786
4787 *vdev_id = le32_to_cpu(ev->vdev_id);
4788
4789 kfree(tb);
4790 return 0;
4791}
4792
4793static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
4794 u32 len, u32 *vdev_id,
4795 u32 *tx_status)
4796{
4797 const void **tb;
4798 const struct wmi_bcn_tx_status_event *ev;
4799 int ret;
4800
4801 tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
4802 if (IS_ERR(tb)) {
4803 ret = PTR_ERR(tb);
4804 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4805 return ret;
4806 }
4807
4808 ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
4809 if (!ev) {
4810 ath12k_warn(ab, "failed to fetch bcn tx status ev");
4811 kfree(tb);
4812 return -EPROTO;
4813 }
4814
4815 *vdev_id = le32_to_cpu(ev->vdev_id);
4816 *tx_status = le32_to_cpu(ev->tx_status);
4817
4818 kfree(tb);
4819 return 0;
4820}
4821
4822static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4823 u32 *vdev_id)
4824{
4825 const void **tb;
4826 const struct wmi_vdev_stopped_event *ev;
4827 int ret;
4828
4829 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4830 if (IS_ERR(tb)) {
4831 ret = PTR_ERR(tb);
4832 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4833 return ret;
4834 }
4835
4836 ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
4837 if (!ev) {
4838 ath12k_warn(ab, "failed to fetch vdev stop ev");
4839 kfree(tb);
4840 return -EPROTO;
4841 }
4842
4843 *vdev_id = le32_to_cpu(ev->vdev_id);
4844
4845 kfree(tb);
4846 return 0;
4847}
4848
4849static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
4850 u16 tag, u16 len,
4851 const void *ptr, void *data)
4852{
4853 struct wmi_tlv_mgmt_rx_parse *parse = data;
4854
4855 switch (tag) {
4856 case WMI_TAG_MGMT_RX_HDR:
4857 parse->fixed = ptr;
4858 break;
4859 case WMI_TAG_ARRAY_BYTE:
4860 if (!parse->frame_buf_done) {
4861 parse->frame_buf = ptr;
4862 parse->frame_buf_done = true;
4863 }
4864 break;
4865 }
4866 return 0;
4867}
4868
4869static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
4870 struct sk_buff *skb,
4871 struct ath12k_wmi_mgmt_rx_arg *hdr)
4872{
4873 struct wmi_tlv_mgmt_rx_parse parse = { };
4874 const struct ath12k_wmi_mgmt_rx_params *ev;
4875 const u8 *frame;
4876 int i, ret;
4877
4878 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4879 ath12k_wmi_tlv_mgmt_rx_parse,
4880 &parse);
4881 if (ret) {
4882 ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
4883 return ret;
4884 }
4885
4886 ev = parse.fixed;
4887 frame = parse.frame_buf;
4888
4889 if (!ev || !frame) {
4890 ath12k_warn(ab, "failed to fetch mgmt rx hdr");
4891 return -EPROTO;
4892 }
4893
4894 hdr->pdev_id = le32_to_cpu(ev->pdev_id);
4895 hdr->chan_freq = le32_to_cpu(ev->chan_freq);
4896 hdr->channel = le32_to_cpu(ev->channel);
4897 hdr->snr = le32_to_cpu(ev->snr);
4898 hdr->rate = le32_to_cpu(ev->rate);
4899 hdr->phy_mode = le32_to_cpu(ev->phy_mode);
4900 hdr->buf_len = le32_to_cpu(ev->buf_len);
4901 hdr->status = le32_to_cpu(ev->status);
4902 hdr->flags = le32_to_cpu(ev->flags);
4903 hdr->rssi = a_sle32_to_cpu(ev->rssi);
4904 hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
4905
4906 for (i = 0; i < ATH_MAX_ANTENNA; i++)
4907 hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
4908
4909 if (skb->len < (frame - skb->data) + hdr->buf_len) {
4910 ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
4911 return -EPROTO;
4912 }
4913
4914 /* shift the sk_buff to point to `frame` */
4915 skb_trim(skb, 0);
4916 skb_put(skb, frame - skb->data);
4917 skb_pull(skb, frame - skb->data);
4918 skb_put(skb, hdr->buf_len);
4919
4920 return 0;
4921}
4922
4923static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
4924 u32 status)
4925{
4926 struct sk_buff *msdu;
4927 struct ieee80211_tx_info *info;
4928 struct ath12k_skb_cb *skb_cb;
4929 int num_mgmt;
4930
4931 spin_lock_bh(&ar->txmgmt_idr_lock);
4932 msdu = idr_find(&ar->txmgmt_idr, desc_id);
4933
4934 if (!msdu) {
4935 ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
4936 desc_id);
4937 spin_unlock_bh(&ar->txmgmt_idr_lock);
4938 return -ENOENT;
4939 }
4940
4941 idr_remove(&ar->txmgmt_idr, desc_id);
4942 spin_unlock_bh(&ar->txmgmt_idr_lock);
4943
4944 skb_cb = ATH12K_SKB_CB(msdu);
4945 dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
4946
4947 info = IEEE80211_SKB_CB(msdu);
4948 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
4949 info->flags |= IEEE80211_TX_STAT_ACK;
4950
4951 ieee80211_tx_status_irqsafe(ar->hw, msdu);
4952
4953 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
4954
4955 /* WARN when we received this event without doing any mgmt tx */
4956 if (num_mgmt < 0)
4957 WARN_ON_ONCE(1);
4958
4959 if (!num_mgmt)
4960 wake_up(&ar->txmgmt_empty_waitq);
4961
4962 return 0;
4963}
4964
4965static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
4966 struct sk_buff *skb,
4967 struct wmi_mgmt_tx_compl_event *param)
4968{
4969 const void **tb;
4970 const struct wmi_mgmt_tx_compl_event *ev;
4971 int ret;
4972
4973 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4974 if (IS_ERR(tb)) {
4975 ret = PTR_ERR(tb);
4976 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4977 return ret;
4978 }
4979
4980 ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
4981 if (!ev) {
4982 ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
4983 kfree(tb);
4984 return -EPROTO;
4985 }
4986
4987 param->pdev_id = ev->pdev_id;
4988 param->desc_id = ev->desc_id;
4989 param->status = ev->status;
4990
4991 kfree(tb);
4992 return 0;
4993}
4994
4995static void ath12k_wmi_event_scan_started(struct ath12k *ar)
4996{
4997 lockdep_assert_held(&ar->data_lock);
4998
4999 switch (ar->scan.state) {
5000 case ATH12K_SCAN_IDLE:
5001 case ATH12K_SCAN_RUNNING:
5002 case ATH12K_SCAN_ABORTING:
5003 ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5004 ath12k_scan_state_str(ar->scan.state),
5005 ar->scan.state);
5006 break;
5007 case ATH12K_SCAN_STARTING:
5008 ar->scan.state = ATH12K_SCAN_RUNNING;
5009 complete(&ar->scan.started);
5010 break;
5011 }
5012}
5013
5014static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
5015{
5016 lockdep_assert_held(&ar->data_lock);
5017
5018 switch (ar->scan.state) {
5019 case ATH12K_SCAN_IDLE:
5020 case ATH12K_SCAN_RUNNING:
5021 case ATH12K_SCAN_ABORTING:
5022 ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5023 ath12k_scan_state_str(ar->scan.state),
5024 ar->scan.state);
5025 break;
5026 case ATH12K_SCAN_STARTING:
5027 complete(&ar->scan.started);
5028 __ath12k_mac_scan_finish(ar);
5029 break;
5030 }
5031}
5032
5033static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
5034{
5035 lockdep_assert_held(&ar->data_lock);
5036
5037 switch (ar->scan.state) {
5038 case ATH12K_SCAN_IDLE:
5039 case ATH12K_SCAN_STARTING:
5040 /* One suspected reason scan can be completed while starting is
5041 * if firmware fails to deliver all scan events to the host,
5042 * e.g. when transport pipe is full. This has been observed
5043 * with spectral scan phyerr events starving wmi transport
5044 * pipe. In such case the "scan completed" event should be (and
5045 * is) ignored by the host as it may be just firmware's scan
5046 * state machine recovering.
5047 */
5048 ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5049 ath12k_scan_state_str(ar->scan.state),
5050 ar->scan.state);
5051 break;
5052 case ATH12K_SCAN_RUNNING:
5053 case ATH12K_SCAN_ABORTING:
5054 __ath12k_mac_scan_finish(ar);
5055 break;
5056 }
5057}
5058
5059static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
5060{
5061 lockdep_assert_held(&ar->data_lock);
5062
5063 switch (ar->scan.state) {
5064 case ATH12K_SCAN_IDLE:
5065 case ATH12K_SCAN_STARTING:
5066 ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5067 ath12k_scan_state_str(ar->scan.state),
5068 ar->scan.state);
5069 break;
5070 case ATH12K_SCAN_RUNNING:
5071 case ATH12K_SCAN_ABORTING:
5072 ar->scan_channel = NULL;
5073 break;
5074 }
5075}
5076
5077static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
5078{
5079 lockdep_assert_held(&ar->data_lock);
5080
5081 switch (ar->scan.state) {
5082 case ATH12K_SCAN_IDLE:
5083 case ATH12K_SCAN_STARTING:
5084 ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5085 ath12k_scan_state_str(ar->scan.state),
5086 ar->scan.state);
5087 break;
5088 case ATH12K_SCAN_RUNNING:
5089 case ATH12K_SCAN_ABORTING:
5090 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
5091 break;
5092 }
5093}
5094
5095static const char *
5096ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5097 enum wmi_scan_completion_reason reason)
5098{
5099 switch (type) {
5100 case WMI_SCAN_EVENT_STARTED:
5101 return "started";
5102 case WMI_SCAN_EVENT_COMPLETED:
5103 switch (reason) {
5104 case WMI_SCAN_REASON_COMPLETED:
5105 return "completed";
5106 case WMI_SCAN_REASON_CANCELLED:
5107 return "completed [cancelled]";
5108 case WMI_SCAN_REASON_PREEMPTED:
5109 return "completed [preempted]";
5110 case WMI_SCAN_REASON_TIMEDOUT:
5111 return "completed [timedout]";
5112 case WMI_SCAN_REASON_INTERNAL_FAILURE:
5113 return "completed [internal err]";
5114 case WMI_SCAN_REASON_MAX:
5115 break;
5116 }
5117 return "completed [unknown]";
5118 case WMI_SCAN_EVENT_BSS_CHANNEL:
5119 return "bss channel";
5120 case WMI_SCAN_EVENT_FOREIGN_CHAN:
5121 return "foreign channel";
5122 case WMI_SCAN_EVENT_DEQUEUED:
5123 return "dequeued";
5124 case WMI_SCAN_EVENT_PREEMPTED:
5125 return "preempted";
5126 case WMI_SCAN_EVENT_START_FAILED:
5127 return "start failed";
5128 case WMI_SCAN_EVENT_RESTARTED:
5129 return "restarted";
5130 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5131 return "foreign channel exit";
5132 default:
5133 return "unknown";
5134 }
5135}
5136
5137static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
5138 struct wmi_scan_event *scan_evt_param)
5139{
5140 const void **tb;
5141 const struct wmi_scan_event *ev;
5142 int ret;
5143
5144 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5145 if (IS_ERR(tb)) {
5146 ret = PTR_ERR(tb);
5147 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5148 return ret;
5149 }
5150
5151 ev = tb[WMI_TAG_SCAN_EVENT];
5152 if (!ev) {
5153 ath12k_warn(ab, "failed to fetch scan ev");
5154 kfree(tb);
5155 return -EPROTO;
5156 }
5157
5158 scan_evt_param->event_type = ev->event_type;
5159 scan_evt_param->reason = ev->reason;
5160 scan_evt_param->channel_freq = ev->channel_freq;
5161 scan_evt_param->scan_req_id = ev->scan_req_id;
5162 scan_evt_param->scan_id = ev->scan_id;
5163 scan_evt_param->vdev_id = ev->vdev_id;
5164 scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5165
5166 kfree(tb);
5167 return 0;
5168}
5169
5170static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
5171 struct wmi_peer_sta_kickout_arg *arg)
5172{
5173 const void **tb;
5174 const struct wmi_peer_sta_kickout_event *ev;
5175 int ret;
5176
5177 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5178 if (IS_ERR(tb)) {
5179 ret = PTR_ERR(tb);
5180 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5181 return ret;
5182 }
5183
5184 ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5185 if (!ev) {
5186 ath12k_warn(ab, "failed to fetch peer sta kickout ev");
5187 kfree(tb);
5188 return -EPROTO;
5189 }
5190
5191 arg->mac_addr = ev->peer_macaddr.addr;
5192
5193 kfree(tb);
5194 return 0;
5195}
5196
5197static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
5198 struct wmi_roam_event *roam_ev)
5199{
5200 const void **tb;
5201 const struct wmi_roam_event *ev;
5202 int ret;
5203
5204 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5205 if (IS_ERR(tb)) {
5206 ret = PTR_ERR(tb);
5207 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5208 return ret;
5209 }
5210
5211 ev = tb[WMI_TAG_ROAM_EVENT];
5212 if (!ev) {
5213 ath12k_warn(ab, "failed to fetch roam ev");
5214 kfree(tb);
5215 return -EPROTO;
5216 }
5217
5218 roam_ev->vdev_id = ev->vdev_id;
5219 roam_ev->reason = ev->reason;
5220 roam_ev->rssi = ev->rssi;
5221
5222 kfree(tb);
5223 return 0;
5224}
5225
5226static int freq_to_idx(struct ath12k *ar, int freq)
5227{
5228 struct ieee80211_supported_band *sband;
5229 int band, ch, idx = 0;
5230
5231 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5232 if (!ar->mac.sbands[band].channels)
5233 continue;
5234
5235 sband = ar->hw->wiphy->bands[band];
5236 if (!sband)
5237 continue;
5238
5239 for (ch = 0; ch < sband->n_channels; ch++, idx++)
5240 if (sband->channels[ch].center_freq == freq)
5241 goto exit;
5242 }
5243
5244exit:
5245 return idx;
5246}
5247
5248static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
5249 u32 len, struct wmi_chan_info_event *ch_info_ev)
5250{
5251 const void **tb;
5252 const struct wmi_chan_info_event *ev;
5253 int ret;
5254
5255 tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
5256 if (IS_ERR(tb)) {
5257 ret = PTR_ERR(tb);
5258 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5259 return ret;
5260 }
5261
5262 ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5263 if (!ev) {
5264 ath12k_warn(ab, "failed to fetch chan info ev");
5265 kfree(tb);
5266 return -EPROTO;
5267 }
5268
5269 ch_info_ev->err_code = ev->err_code;
5270 ch_info_ev->freq = ev->freq;
5271 ch_info_ev->cmd_flags = ev->cmd_flags;
5272 ch_info_ev->noise_floor = ev->noise_floor;
5273 ch_info_ev->rx_clear_count = ev->rx_clear_count;
5274 ch_info_ev->cycle_count = ev->cycle_count;
5275 ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5276 ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5277 ch_info_ev->rx_frame_count = ev->rx_frame_count;
5278 ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5279 ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5280 ch_info_ev->vdev_id = ev->vdev_id;
5281
5282 kfree(tb);
5283 return 0;
5284}
5285
5286static int
5287ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5288 struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5289{
5290 const void **tb;
5291 const struct wmi_pdev_bss_chan_info_event *ev;
5292 int ret;
5293
5294 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5295 if (IS_ERR(tb)) {
5296 ret = PTR_ERR(tb);
5297 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5298 return ret;
5299 }
5300
5301 ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5302 if (!ev) {
5303 ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5304 kfree(tb);
5305 return -EPROTO;
5306 }
5307
5308 bss_ch_info_ev->pdev_id = ev->pdev_id;
5309 bss_ch_info_ev->freq = ev->freq;
5310 bss_ch_info_ev->noise_floor = ev->noise_floor;
5311 bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5312 bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5313 bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5314 bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5315 bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5316 bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5317 bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5318 bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5319 bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5320 bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5321
5322 kfree(tb);
5323 return 0;
5324}
5325
5326static int
5327ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
5328 struct wmi_vdev_install_key_complete_arg *arg)
5329{
5330 const void **tb;
5331 const struct wmi_vdev_install_key_compl_event *ev;
5332 int ret;
5333
5334 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5335 if (IS_ERR(tb)) {
5336 ret = PTR_ERR(tb);
5337 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5338 return ret;
5339 }
5340
5341 ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5342 if (!ev) {
5343 ath12k_warn(ab, "failed to fetch vdev install key compl ev");
5344 kfree(tb);
5345 return -EPROTO;
5346 }
5347
5348 arg->vdev_id = le32_to_cpu(ev->vdev_id);
5349 arg->macaddr = ev->peer_macaddr.addr;
5350 arg->key_idx = le32_to_cpu(ev->key_idx);
5351 arg->key_flags = le32_to_cpu(ev->key_flags);
5352 arg->status = le32_to_cpu(ev->status);
5353
5354 kfree(tb);
5355 return 0;
5356}
5357
5358static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
5359 struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
5360{
5361 const void **tb;
5362 const struct wmi_peer_assoc_conf_event *ev;
5363 int ret;
5364
5365 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5366 if (IS_ERR(tb)) {
5367 ret = PTR_ERR(tb);
5368 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5369 return ret;
5370 }
5371
5372 ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
5373 if (!ev) {
5374 ath12k_warn(ab, "failed to fetch peer assoc conf ev");
5375 kfree(tb);
5376 return -EPROTO;
5377 }
5378
5379 peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
5380 peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
5381
5382 kfree(tb);
5383 return 0;
5384}
5385
5386static int
5387ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
5388 u32 len, const struct wmi_pdev_temperature_event *ev)
5389{
5390 const void **tb;
5391 int ret;
5392
5393 tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
5394 if (IS_ERR(tb)) {
5395 ret = PTR_ERR(tb);
5396 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5397 return ret;
5398 }
5399
5400 ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
5401 if (!ev) {
5402 ath12k_warn(ab, "failed to fetch pdev temp ev");
5403 kfree(tb);
5404 return -EPROTO;
5405 }
5406
5407 kfree(tb);
5408 return 0;
5409}
5410
5411static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
5412{
5413 /* try to send pending beacons first. they take priority */
5414 wake_up(&ab->wmi_ab.tx_credits_wq);
5415}
5416
5417static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
5418 struct sk_buff *skb)
5419{
5420 dev_kfree_skb(skb);
5421}
5422
5423static bool ath12k_reg_is_world_alpha(char *alpha)
5424{
5425 if (alpha[0] == '0' && alpha[1] == '0')
5426 return true;
5427
5428 if (alpha[0] == 'n' && alpha[1] == 'a')
5429 return true;
5430
5431 return false;
5432}
5433
5434static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
5435{
5436 struct ath12k_reg_info *reg_info = NULL;
5437 struct ieee80211_regdomain *regd = NULL;
5438 bool intersect = false;
5439 int ret = 0, pdev_idx, i, j;
5440 struct ath12k *ar;
5441
5442 reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
5443 if (!reg_info) {
5444 ret = -ENOMEM;
5445 goto fallback;
5446 }
5447
5448 ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
5449
5450 if (ret) {
5451 ath12k_warn(ab, "failed to extract regulatory info from received event\n");
5452 goto fallback;
5453 }
5454
5455 if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
5456 /* In case of failure to set the requested ctry,
5457 * fw retains the current regd. We print a failure info
5458 * and return from here.
5459 */
5460 ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
5461 goto mem_free;
5462 }
5463
5464 pdev_idx = reg_info->phy_id;
5465
5466 if (pdev_idx >= ab->num_radios) {
5467 /* Process the event for phy0 only if single_pdev_only
5468 * is true. If pdev_idx is valid but not 0, discard the
5469 * event. Otherwise, it goes to fallback.
5470 */
5471 if (ab->hw_params->single_pdev_only &&
5472 pdev_idx < ab->hw_params->num_rxmda_per_pdev)
5473 goto mem_free;
5474 else
5475 goto fallback;
5476 }
5477
5478 /* Avoid multiple overwrites to default regd, during core
5479 * stop-start after mac registration.
5480 */
5481 if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
5482 !memcmp(ab->default_regd[pdev_idx]->alpha2,
5483 reg_info->alpha2, 2))
5484 goto mem_free;
5485
5486 /* Intersect new rules with default regd if a new country setting was
5487 * requested, i.e a default regd was already set during initialization
5488 * and the regd coming from this event has a valid country info.
5489 */
5490 if (ab->default_regd[pdev_idx] &&
5491 !ath12k_reg_is_world_alpha((char *)
5492 ab->default_regd[pdev_idx]->alpha2) &&
5493 !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
5494 intersect = true;
5495
5496 regd = ath12k_reg_build_regd(ab, reg_info, intersect);
5497 if (!regd) {
5498 ath12k_warn(ab, "failed to build regd from reg_info\n");
5499 goto fallback;
5500 }
5501
5502 spin_lock(&ab->base_lock);
5503 if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
5504 /* Once mac is registered, ar is valid and all CC events from
5505 * fw is considered to be received due to user requests
5506 * currently.
5507 * Free previously built regd before assigning the newly
5508 * generated regd to ar. NULL pointer handling will be
5509 * taken care by kfree itself.
5510 */
5511 ar = ab->pdevs[pdev_idx].ar;
5512 kfree(ab->new_regd[pdev_idx]);
5513 ab->new_regd[pdev_idx] = regd;
5514 queue_work(ab->workqueue, &ar->regd_update_work);
5515 } else {
5516 /* Multiple events for the same *ar is not expected. But we
5517 * can still clear any previously stored default_regd if we
5518 * are receiving this event for the same radio by mistake.
5519 * NULL pointer handling will be taken care by kfree itself.
5520 */
5521 kfree(ab->default_regd[pdev_idx]);
5522 /* This regd would be applied during mac registration */
5523 ab->default_regd[pdev_idx] = regd;
5524 }
5525 ab->dfs_region = reg_info->dfs_region;
5526 spin_unlock(&ab->base_lock);
5527
5528 goto mem_free;
5529
5530fallback:
5531 /* Fallback to older reg (by sending previous country setting
5532 * again if fw has succeeded and we failed to process here.
5533 * The Regdomain should be uniform across driver and fw. Since the
5534 * FW has processed the command and sent a success status, we expect
5535 * this function to succeed as well. If it doesn't, CTRY needs to be
5536 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
5537 */
5538 /* TODO: This is rare, but still should also be handled */
5539 WARN_ON(1);
5540mem_free:
5541 if (reg_info) {
5542 kfree(reg_info->reg_rules_2g_ptr);
5543 kfree(reg_info->reg_rules_5g_ptr);
5544 if (reg_info->is_ext_reg_event) {
5545 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
5546 kfree(reg_info->reg_rules_6g_ap_ptr[i]);
5547
5548 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
5549 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
5550 kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
5551 }
5552 kfree(reg_info);
5553 }
5554 return ret;
5555}
5556
5557static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
5558 const void *ptr, void *data)
5559{
5560 struct ath12k_wmi_rdy_parse *rdy_parse = data;
5561 struct wmi_ready_event fixed_param;
5562 struct ath12k_wmi_mac_addr_params *addr_list;
5563 struct ath12k_pdev *pdev;
5564 u32 num_mac_addr;
5565 int i;
5566
5567 switch (tag) {
5568 case WMI_TAG_READY_EVENT:
5569 memset(&fixed_param, 0, sizeof(fixed_param));
5570 memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
5571 min_t(u16, sizeof(fixed_param), len));
5572 ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
5573 rdy_parse->num_extra_mac_addr =
5574 le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
5575
5576 ether_addr_copy(ab->mac_addr,
5577 fixed_param.ready_event_min.mac_addr.addr);
5578 ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
5579 ab->wmi_ready = true;
5580 break;
5581 case WMI_TAG_ARRAY_FIXED_STRUCT:
5582 addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
5583 num_mac_addr = rdy_parse->num_extra_mac_addr;
5584
5585 if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
5586 break;
5587
5588 for (i = 0; i < ab->num_radios; i++) {
5589 pdev = &ab->pdevs[i];
5590 ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
5591 }
5592 ab->pdevs_macaddr_valid = true;
5593 break;
5594 default:
5595 break;
5596 }
5597
5598 return 0;
5599}
5600
5601static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
5602{
5603 struct ath12k_wmi_rdy_parse rdy_parse = { };
5604 int ret;
5605
5606 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5607 ath12k_wmi_rdy_parse, &rdy_parse);
5608 if (ret) {
5609 ath12k_warn(ab, "failed to parse tlv %d\n", ret);
5610 return ret;
5611 }
5612
5613 complete(&ab->wmi_ab.unified_ready);
5614 return 0;
5615}
5616
5617static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5618{
5619 struct wmi_peer_delete_resp_event peer_del_resp;
5620 struct ath12k *ar;
5621
5622 if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
5623 ath12k_warn(ab, "failed to extract peer delete resp");
5624 return;
5625 }
5626
5627 rcu_read_lock();
5628 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
5629 if (!ar) {
5630 ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
5631 peer_del_resp.vdev_id);
5632 rcu_read_unlock();
5633 return;
5634 }
5635
5636 complete(&ar->peer_delete_done);
5637 rcu_read_unlock();
5638 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
5639 peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
5640}
5641
5642static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
5643 struct sk_buff *skb)
5644{
5645 struct ath12k *ar;
5646 u32 vdev_id = 0;
5647
5648 if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
5649 ath12k_warn(ab, "failed to extract vdev delete resp");
5650 return;
5651 }
5652
5653 rcu_read_lock();
5654 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
5655 if (!ar) {
5656 ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
5657 vdev_id);
5658 rcu_read_unlock();
5659 return;
5660 }
5661
5662 complete(&ar->vdev_delete_done);
5663
5664 rcu_read_unlock();
5665
5666 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
5667 vdev_id);
5668}
5669
5670static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
5671{
5672 switch (vdev_resp_status) {
5673 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
5674 return "invalid vdev id";
5675 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
5676 return "not supported";
5677 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
5678 return "dfs violation";
5679 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
5680 return "invalid regdomain";
5681 default:
5682 return "unknown";
5683 }
5684}
5685
5686static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5687{
5688 struct wmi_vdev_start_resp_event vdev_start_resp;
5689 struct ath12k *ar;
5690 u32 status;
5691
5692 if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
5693 ath12k_warn(ab, "failed to extract vdev start resp");
5694 return;
5695 }
5696
5697 rcu_read_lock();
5698 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
5699 if (!ar) {
5700 ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
5701 vdev_start_resp.vdev_id);
5702 rcu_read_unlock();
5703 return;
5704 }
5705
5706 ar->last_wmi_vdev_start_status = 0;
5707
5708 status = le32_to_cpu(vdev_start_resp.status);
5709
5710 if (WARN_ON_ONCE(status)) {
5711 ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
5712 status, ath12k_wmi_vdev_resp_print(status));
5713 ar->last_wmi_vdev_start_status = status;
5714 }
5715
5716 complete(&ar->vdev_setup_done);
5717
5718 rcu_read_unlock();
5719
5720 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
5721 vdev_start_resp.vdev_id);
5722}
5723
5724static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
5725{
5726 u32 vdev_id, tx_status;
5727
5728 if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
5729 &vdev_id, &tx_status) != 0) {
5730 ath12k_warn(ab, "failed to extract bcn tx status");
5731 return;
5732 }
5733}
5734
5735static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
5736{
5737 struct ath12k *ar;
5738 u32 vdev_id = 0;
5739
5740 if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
5741 ath12k_warn(ab, "failed to extract vdev stopped event");
5742 return;
5743 }
5744
5745 rcu_read_lock();
5746 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
5747 if (!ar) {
5748 ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
5749 vdev_id);
5750 rcu_read_unlock();
5751 return;
5752 }
5753
5754 complete(&ar->vdev_setup_done);
5755
5756 rcu_read_unlock();
5757
5758 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
5759}
5760
5761static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
5762{
5763 struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
5764 struct ath12k *ar;
5765 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5766 struct ieee80211_hdr *hdr;
5767 u16 fc;
5768 struct ieee80211_supported_band *sband;
5769
5770 if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
5771 ath12k_warn(ab, "failed to extract mgmt rx event");
5772 dev_kfree_skb(skb);
5773 return;
5774 }
5775
5776 memset(status, 0, sizeof(*status));
5777
5778 ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
5779 rx_ev.status);
5780
5781 rcu_read_lock();
5782 ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
5783
5784 if (!ar) {
5785 ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
5786 rx_ev.pdev_id);
5787 dev_kfree_skb(skb);
5788 goto exit;
5789 }
5790
5791 if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
5792 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
5793 WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
5794 WMI_RX_STATUS_ERR_CRC))) {
5795 dev_kfree_skb(skb);
5796 goto exit;
5797 }
5798
5799 if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
5800 status->flag |= RX_FLAG_MMIC_ERROR;
5801
5802 if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
5803 status->band = NL80211_BAND_6GHZ;
5804 } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
5805 status->band = NL80211_BAND_2GHZ;
5806 } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
5807 status->band = NL80211_BAND_5GHZ;
5808 } else {
5809 /* Shouldn't happen unless list of advertised channels to
5810 * mac80211 has been changed.
5811 */
5812 WARN_ON_ONCE(1);
5813 dev_kfree_skb(skb);
5814 goto exit;
5815 }
5816
5817 if (rx_ev.phy_mode == MODE_11B &&
5818 (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
5819 ath12k_dbg(ab, ATH12K_DBG_WMI,
5820 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
5821
5822 sband = &ar->mac.sbands[status->band];
5823
5824 status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
5825 status->band);
5826 status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
5827 status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
5828
5829 hdr = (struct ieee80211_hdr *)skb->data;
5830 fc = le16_to_cpu(hdr->frame_control);
5831
5832 /* Firmware is guaranteed to report all essential management frames via
5833 * WMI while it can deliver some extra via HTT. Since there can be
5834 * duplicates split the reporting wrt monitor/sniffing.
5835 */
5836 status->flag |= RX_FLAG_SKIP_MONITOR;
5837
5838 /* In case of PMF, FW delivers decrypted frames with Protected Bit set
5839 * including group privacy action frames.
5840 */
5841 if (ieee80211_has_protected(hdr->frame_control)) {
5842 status->flag |= RX_FLAG_DECRYPTED;
5843
5844 if (!ieee80211_is_robust_mgmt_frame(skb)) {
5845 status->flag |= RX_FLAG_IV_STRIPPED |
5846 RX_FLAG_MMIC_STRIPPED;
5847 hdr->frame_control = __cpu_to_le16(fc &
5848 ~IEEE80211_FCTL_PROTECTED);
5849 }
5850 }
5851
5852 /* TODO: Pending handle beacon implementation
5853 *if (ieee80211_is_beacon(hdr->frame_control))
5854 * ath12k_mac_handle_beacon(ar, skb);
5855 */
5856
5857 ath12k_dbg(ab, ATH12K_DBG_MGMT,
5858 "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
5859 skb, skb->len,
5860 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
5861
5862 ath12k_dbg(ab, ATH12K_DBG_MGMT,
5863 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
5864 status->freq, status->band, status->signal,
5865 status->rate_idx);
5866
5867 ieee80211_rx_ni(ar->hw, skb);
5868
5869exit:
5870 rcu_read_unlock();
5871}
5872
5873static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
5874{
5875 struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
5876 struct ath12k *ar;
5877
5878 if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
5879 ath12k_warn(ab, "failed to extract mgmt tx compl event");
5880 return;
5881 }
5882
5883 rcu_read_lock();
5884 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
5885 if (!ar) {
5886 ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
5887 tx_compl_param.pdev_id);
5888 goto exit;
5889 }
5890
5891 wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
5892 le32_to_cpu(tx_compl_param.status));
5893
5894 ath12k_dbg(ab, ATH12K_DBG_MGMT,
5895 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
5896 tx_compl_param.pdev_id, tx_compl_param.desc_id,
5897 tx_compl_param.status);
5898
5899exit:
5900 rcu_read_unlock();
5901}
5902
5903static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
5904 u32 vdev_id,
5905 enum ath12k_scan_state state)
5906{
5907 int i;
5908 struct ath12k_pdev *pdev;
5909 struct ath12k *ar;
5910
5911 for (i = 0; i < ab->num_radios; i++) {
5912 pdev = rcu_dereference(ab->pdevs_active[i]);
5913 if (pdev && pdev->ar) {
5914 ar = pdev->ar;
5915
5916 spin_lock_bh(&ar->data_lock);
5917 if (ar->scan.state == state &&
5918 ar->scan.vdev_id == vdev_id) {
5919 spin_unlock_bh(&ar->data_lock);
5920 return ar;
5921 }
5922 spin_unlock_bh(&ar->data_lock);
5923 }
5924 }
5925 return NULL;
5926}
5927
5928static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
5929{
5930 struct ath12k *ar;
5931 struct wmi_scan_event scan_ev = {0};
5932
5933 if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
5934 ath12k_warn(ab, "failed to extract scan event");
5935 return;
5936 }
5937
5938 rcu_read_lock();
5939
5940 /* In case the scan was cancelled, ex. during interface teardown,
5941 * the interface will not be found in active interfaces.
5942 * Rather, in such scenarios, iterate over the active pdev's to
5943 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
5944 * aborting scan's vdev id matches this event info.
5945 */
5946 if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
5947 le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
5948 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
5949 ATH12K_SCAN_ABORTING);
5950 if (!ar)
5951 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
5952 ATH12K_SCAN_RUNNING);
5953 } else {
5954 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
5955 }
5956
5957 if (!ar) {
5958 ath12k_warn(ab, "Received scan event for unknown vdev");
5959 rcu_read_unlock();
5960 return;
5961 }
5962
5963 spin_lock_bh(&ar->data_lock);
5964
5965 ath12k_dbg(ab, ATH12K_DBG_WMI,
5966 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
5967 ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
5968 le32_to_cpu(scan_ev.reason)),
5969 le32_to_cpu(scan_ev.event_type),
5970 le32_to_cpu(scan_ev.reason),
5971 le32_to_cpu(scan_ev.channel_freq),
5972 le32_to_cpu(scan_ev.scan_req_id),
5973 le32_to_cpu(scan_ev.scan_id),
5974 le32_to_cpu(scan_ev.vdev_id),
5975 ath12k_scan_state_str(ar->scan.state), ar->scan.state);
5976
5977 switch (le32_to_cpu(scan_ev.event_type)) {
5978 case WMI_SCAN_EVENT_STARTED:
5979 ath12k_wmi_event_scan_started(ar);
5980 break;
5981 case WMI_SCAN_EVENT_COMPLETED:
5982 ath12k_wmi_event_scan_completed(ar);
5983 break;
5984 case WMI_SCAN_EVENT_BSS_CHANNEL:
5985 ath12k_wmi_event_scan_bss_chan(ar);
5986 break;
5987 case WMI_SCAN_EVENT_FOREIGN_CHAN:
5988 ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
5989 break;
5990 case WMI_SCAN_EVENT_START_FAILED:
5991 ath12k_warn(ab, "received scan start failure event\n");
5992 ath12k_wmi_event_scan_start_failed(ar);
5993 break;
5994 case WMI_SCAN_EVENT_DEQUEUED:
5995 __ath12k_mac_scan_finish(ar);
5996 break;
5997 case WMI_SCAN_EVENT_PREEMPTED:
5998 case WMI_SCAN_EVENT_RESTARTED:
5999 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6000 default:
6001 break;
6002 }
6003
6004 spin_unlock_bh(&ar->data_lock);
6005
6006 rcu_read_unlock();
6007}
6008
6009static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
6010{
6011 struct wmi_peer_sta_kickout_arg arg = {};
6012 struct ieee80211_sta *sta;
6013 struct ath12k_peer *peer;
6014 struct ath12k *ar;
6015
6016 if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
6017 ath12k_warn(ab, "failed to extract peer sta kickout event");
6018 return;
6019 }
6020
6021 rcu_read_lock();
6022
6023 spin_lock_bh(&ab->base_lock);
6024
6025 peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
6026
6027 if (!peer) {
6028 ath12k_warn(ab, "peer not found %pM\n",
6029 arg.mac_addr);
6030 goto exit;
6031 }
6032
6033 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
6034 if (!ar) {
6035 ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
6036 peer->vdev_id);
6037 goto exit;
6038 }
6039
6040 sta = ieee80211_find_sta_by_ifaddr(ar->hw,
6041 arg.mac_addr, NULL);
6042 if (!sta) {
6043 ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
6044 arg.mac_addr);
6045 goto exit;
6046 }
6047
6048 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
6049 arg.mac_addr);
6050
6051 ieee80211_report_low_ack(sta, 10);
6052
6053exit:
6054 spin_unlock_bh(&ab->base_lock);
6055 rcu_read_unlock();
6056}
6057
6058static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
6059{
6060 struct wmi_roam_event roam_ev = {};
6061 struct ath12k *ar;
6062
6063 if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
6064 ath12k_warn(ab, "failed to extract roam event");
6065 return;
6066 }
6067
6068 ath12k_dbg(ab, ATH12K_DBG_WMI,
6069 "wmi roam event vdev %u reason 0x%08x rssi %d\n",
6070 roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
6071
6072 rcu_read_lock();
6073 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
6074 if (!ar) {
6075 ath12k_warn(ab, "invalid vdev id in roam ev %d",
6076 roam_ev.vdev_id);
6077 rcu_read_unlock();
6078 return;
6079 }
6080
6081 if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
6082 ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
6083 roam_ev.reason, roam_ev.vdev_id);
6084
6085 switch (le32_to_cpu(roam_ev.reason)) {
6086 case WMI_ROAM_REASON_BEACON_MISS:
6087 /* TODO: Pending beacon miss and connection_loss_work
6088 * implementation
6089 * ath12k_mac_handle_beacon_miss(ar, vdev_id);
6090 */
6091 break;
6092 case WMI_ROAM_REASON_BETTER_AP:
6093 case WMI_ROAM_REASON_LOW_RSSI:
6094 case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
6095 case WMI_ROAM_REASON_HO_FAILED:
6096 ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
6097 roam_ev.reason, roam_ev.vdev_id);
6098 break;
6099 }
6100
6101 rcu_read_unlock();
6102}
6103
6104static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6105{
6106 struct wmi_chan_info_event ch_info_ev = {0};
6107 struct ath12k *ar;
6108 struct survey_info *survey;
6109 int idx;
6110 /* HW channel counters frequency value in hertz */
6111 u32 cc_freq_hz = ab->cc_freq_hz;
6112
6113 if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
6114 ath12k_warn(ab, "failed to extract chan info event");
6115 return;
6116 }
6117
6118 ath12k_dbg(ab, ATH12K_DBG_WMI,
6119 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6120 ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
6121 ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
6122 ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
6123 ch_info_ev.mac_clk_mhz);
6124
6125 if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
6126 ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
6127 return;
6128 }
6129
6130 rcu_read_lock();
6131 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
6132 if (!ar) {
6133 ath12k_warn(ab, "invalid vdev id in chan info ev %d",
6134 ch_info_ev.vdev_id);
6135 rcu_read_unlock();
6136 return;
6137 }
6138 spin_lock_bh(&ar->data_lock);
6139
6140 switch (ar->scan.state) {
6141 case ATH12K_SCAN_IDLE:
6142 case ATH12K_SCAN_STARTING:
6143 ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
6144 goto exit;
6145 case ATH12K_SCAN_RUNNING:
6146 case ATH12K_SCAN_ABORTING:
6147 break;
6148 }
6149
6150 idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
6151 if (idx >= ARRAY_SIZE(ar->survey)) {
6152 ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6153 ch_info_ev.freq, idx);
6154 goto exit;
6155 }
6156
6157 /* If FW provides MAC clock frequency in Mhz, overriding the initialized
6158 * HW channel counters frequency value
6159 */
6160 if (ch_info_ev.mac_clk_mhz)
6161 cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
6162
6163 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
6164 survey = &ar->survey[idx];
6165 memset(survey, 0, sizeof(*survey));
6166 survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
6167 survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
6168 SURVEY_INFO_TIME_BUSY;
6169 survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
6170 survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
6171 cc_freq_hz);
6172 }
6173exit:
6174 spin_unlock_bh(&ar->data_lock);
6175 rcu_read_unlock();
6176}
6177
6178static void
6179ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6180{
6181 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
6182 struct survey_info *survey;
6183 struct ath12k *ar;
6184 u32 cc_freq_hz = ab->cc_freq_hz;
6185 u64 busy, total, tx, rx, rx_bss;
6186 int idx;
6187
6188 if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
6189 ath12k_warn(ab, "failed to extract pdev bss chan info event");
6190 return;
6191 }
6192
6193 busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
6194 le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
6195
6196 total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
6197 le32_to_cpu(bss_ch_info_ev.cycle_count_low);
6198
6199 tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
6200 le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
6201
6202 rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
6203 le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
6204
6205 rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
6206 le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
6207
6208 ath12k_dbg(ab, ATH12K_DBG_WMI,
6209 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6210 bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
6211 bss_ch_info_ev.noise_floor, busy, total,
6212 tx, rx, rx_bss);
6213
6214 rcu_read_lock();
6215 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
6216
6217 if (!ar) {
6218 ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
6219 bss_ch_info_ev.pdev_id);
6220 rcu_read_unlock();
6221 return;
6222 }
6223
6224 spin_lock_bh(&ar->data_lock);
6225 idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
6226 if (idx >= ARRAY_SIZE(ar->survey)) {
6227 ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6228 bss_ch_info_ev.freq, idx);
6229 goto exit;
6230 }
6231
6232 survey = &ar->survey[idx];
6233
6234 survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor);
6235 survey->time = div_u64(total, cc_freq_hz);
6236 survey->time_busy = div_u64(busy, cc_freq_hz);
6237 survey->time_rx = div_u64(rx_bss, cc_freq_hz);
6238 survey->time_tx = div_u64(tx, cc_freq_hz);
6239 survey->filled |= (SURVEY_INFO_NOISE_DBM |
6240 SURVEY_INFO_TIME |
6241 SURVEY_INFO_TIME_BUSY |
6242 SURVEY_INFO_TIME_RX |
6243 SURVEY_INFO_TIME_TX);
6244exit:
6245 spin_unlock_bh(&ar->data_lock);
6246 complete(&ar->bss_survey_done);
6247
6248 rcu_read_unlock();
6249}
6250
6251static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
6252 struct sk_buff *skb)
6253{
6254 struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
6255 struct ath12k *ar;
6256
6257 if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
6258 ath12k_warn(ab, "failed to extract install key compl event");
6259 return;
6260 }
6261
6262 ath12k_dbg(ab, ATH12K_DBG_WMI,
6263 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6264 install_key_compl.key_idx, install_key_compl.key_flags,
6265 install_key_compl.macaddr, install_key_compl.status);
6266
6267 rcu_read_lock();
6268 ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
6269 if (!ar) {
6270 ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
6271 install_key_compl.vdev_id);
6272 rcu_read_unlock();
6273 return;
6274 }
6275
6276 ar->install_key_status = 0;
6277
6278 if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
6279 ath12k_warn(ab, "install key failed for %pM status %d\n",
6280 install_key_compl.macaddr, install_key_compl.status);
6281 ar->install_key_status = install_key_compl.status;
6282 }
6283
6284 complete(&ar->install_key_done);
6285 rcu_read_unlock();
6286}
6287
6288static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
6289 u16 tag, u16 len,
6290 const void *ptr,
6291 void *data)
6292{
6293 const struct wmi_service_available_event *ev;
6294 u32 *wmi_ext2_service_bitmap;
6295 int i, j;
6296 u16 expected_len;
6297
6298 expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
6299 if (len < expected_len) {
6300 ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
6301 len, tag);
6302 return -EINVAL;
6303 }
6304
6305 switch (tag) {
6306 case WMI_TAG_SERVICE_AVAILABLE_EVENT:
6307 ev = (struct wmi_service_available_event *)ptr;
6308 for (i = 0, j = WMI_MAX_SERVICE;
6309 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
6310 i++) {
6311 do {
6312 if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
6313 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6314 set_bit(j, ab->wmi_ab.svc_map);
6315 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6316 }
6317
6318 ath12k_dbg(ab, ATH12K_DBG_WMI,
6319 "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
6320 ev->wmi_service_segment_bitmap[0],
6321 ev->wmi_service_segment_bitmap[1],
6322 ev->wmi_service_segment_bitmap[2],
6323 ev->wmi_service_segment_bitmap[3]);
6324 break;
6325 case WMI_TAG_ARRAY_UINT32:
6326 wmi_ext2_service_bitmap = (u32 *)ptr;
6327 for (i = 0, j = WMI_MAX_EXT_SERVICE;
6328 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
6329 i++) {
6330 do {
6331 if (wmi_ext2_service_bitmap[i] &
6332 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6333 set_bit(j, ab->wmi_ab.svc_map);
6334 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6335 }
6336
6337 ath12k_dbg(ab, ATH12K_DBG_WMI,
6338 "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
6339 wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
6340 wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
6341 break;
6342 }
6343 return 0;
6344}
6345
6346static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
6347{
6348 int ret;
6349
6350 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6351 ath12k_wmi_tlv_services_parser,
6352 NULL);
6353 return ret;
6354}
6355
6356static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
6357{
6358 struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
6359 struct ath12k *ar;
6360
6361 if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
6362 ath12k_warn(ab, "failed to extract peer assoc conf event");
6363 return;
6364 }
6365
6366 ath12k_dbg(ab, ATH12K_DBG_WMI,
6367 "peer assoc conf ev vdev id %d macaddr %pM\n",
6368 peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
6369
6370 rcu_read_lock();
6371 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
6372
6373 if (!ar) {
6374 ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
6375 peer_assoc_conf.vdev_id);
6376 rcu_read_unlock();
6377 return;
6378 }
6379
6380 complete(&ar->peer_assoc_done);
6381 rcu_read_unlock();
6382}
6383
6384static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
6385{
6386}
6387
6388/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
6389 * is not part of BDF CTL(Conformance test limits) table entries.
6390 */
6391static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
6392 struct sk_buff *skb)
6393{
6394 const void **tb;
6395 const struct wmi_pdev_ctl_failsafe_chk_event *ev;
6396 int ret;
6397
6398 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6399 if (IS_ERR(tb)) {
6400 ret = PTR_ERR(tb);
6401 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6402 return;
6403 }
6404
6405 ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
6406 if (!ev) {
6407 ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
6408 kfree(tb);
6409 return;
6410 }
6411
6412 ath12k_dbg(ab, ATH12K_DBG_WMI,
6413 "pdev ctl failsafe check ev status %d\n",
6414 ev->ctl_failsafe_status);
6415
6416 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
6417 * to 10 dBm else the CTL power entry in the BDF would be picked up.
6418 */
6419 if (ev->ctl_failsafe_status != 0)
6420 ath12k_warn(ab, "pdev ctl failsafe failure status %d",
6421 ev->ctl_failsafe_status);
6422
6423 kfree(tb);
6424}
6425
6426static void
6427ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
6428 const struct ath12k_wmi_pdev_csa_event *ev,
6429 const u32 *vdev_ids)
6430{
6431 int i;
6432 struct ath12k_vif *arvif;
6433
6434 /* Finish CSA once the switch count becomes NULL */
6435 if (ev->current_switch_count)
6436 return;
6437
6438 rcu_read_lock();
6439 for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
6440 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
6441
6442 if (!arvif) {
6443 ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
6444 vdev_ids[i]);
6445 continue;
6446 }
6447
6448 if (arvif->is_up && arvif->vif->bss_conf.csa_active)
6449 ieee80211_csa_finish(arvif->vif);
6450 }
6451 rcu_read_unlock();
6452}
6453
6454static void
6455ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
6456 struct sk_buff *skb)
6457{
6458 const void **tb;
6459 const struct ath12k_wmi_pdev_csa_event *ev;
6460 const u32 *vdev_ids;
6461 int ret;
6462
6463 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6464 if (IS_ERR(tb)) {
6465 ret = PTR_ERR(tb);
6466 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6467 return;
6468 }
6469
6470 ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
6471 vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
6472
6473 if (!ev || !vdev_ids) {
6474 ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
6475 kfree(tb);
6476 return;
6477 }
6478
6479 ath12k_dbg(ab, ATH12K_DBG_WMI,
6480 "pdev csa switch count %d for pdev %d, num_vdevs %d",
6481 ev->current_switch_count, ev->pdev_id,
6482 ev->num_vdevs);
6483
6484 ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
6485
6486 kfree(tb);
6487}
6488
6489static void
6490ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
6491{
6492 const void **tb;
6493 const struct ath12k_wmi_pdev_radar_event *ev;
6494 struct ath12k *ar;
6495 int ret;
6496
6497 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6498 if (IS_ERR(tb)) {
6499 ret = PTR_ERR(tb);
6500 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6501 return;
6502 }
6503
6504 ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
6505
6506 if (!ev) {
6507 ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
6508 kfree(tb);
6509 return;
6510 }
6511
6512 ath12k_dbg(ab, ATH12K_DBG_WMI,
6513 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
6514 ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
6515 ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
6516 ev->freq_offset, ev->sidx);
6517
6518 rcu_read_lock();
6519
6520 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
6521
6522 if (!ar) {
6523 ath12k_warn(ab, "radar detected in invalid pdev %d\n",
6524 ev->pdev_id);
6525 goto exit;
6526 }
6527
6528 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
6529 ev->pdev_id);
6530
6531 if (ar->dfs_block_radar_events)
6532 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
6533 else
6534 ieee80211_radar_detected(ar->hw);
6535
6536exit:
6537 rcu_read_unlock();
6538
6539 kfree(tb);
6540}
6541
6542static void
6543ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
6544 struct sk_buff *skb)
6545{
6546 struct ath12k *ar;
6547 struct wmi_pdev_temperature_event ev = {0};
6548
6549 if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
6550 ath12k_warn(ab, "failed to extract pdev temperature event");
6551 return;
6552 }
6553
6554 ath12k_dbg(ab, ATH12K_DBG_WMI,
6555 "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
6556
6557 rcu_read_lock();
6558
6559 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
6560 if (!ar) {
6561 ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
6562 goto exit;
6563 }
6564
6565exit:
6566 rcu_read_unlock();
6567}
6568
6569static void ath12k_fils_discovery_event(struct ath12k_base *ab,
6570 struct sk_buff *skb)
6571{
6572 const void **tb;
6573 const struct wmi_fils_discovery_event *ev;
6574 int ret;
6575
6576 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6577 if (IS_ERR(tb)) {
6578 ret = PTR_ERR(tb);
6579 ath12k_warn(ab,
6580 "failed to parse FILS discovery event tlv %d\n",
6581 ret);
6582 return;
6583 }
6584
6585 ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
6586 if (!ev) {
6587 ath12k_warn(ab, "failed to fetch FILS discovery event\n");
6588 kfree(tb);
6589 return;
6590 }
6591
6592 ath12k_warn(ab,
6593 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
6594 ev->vdev_id, ev->fils_tt, ev->tbtt);
6595
6596 kfree(tb);
6597}
6598
6599static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
6600 struct sk_buff *skb)
6601{
6602 const void **tb;
6603 const struct wmi_probe_resp_tx_status_event *ev;
6604 int ret;
6605
6606 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6607 if (IS_ERR(tb)) {
6608 ret = PTR_ERR(tb);
6609 ath12k_warn(ab,
6610 "failed to parse probe response transmission status event tlv: %d\n",
6611 ret);
6612 return;
6613 }
6614
6615 ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
6616 if (!ev) {
6617 ath12k_warn(ab,
6618 "failed to fetch probe response transmission status event");
6619 kfree(tb);
6620 return;
6621 }
6622
6623 if (ev->tx_status)
6624 ath12k_warn(ab,
6625 "Probe response transmission failed for vdev_id %u, status %u\n",
6626 ev->vdev_id, ev->tx_status);
6627
6628 kfree(tb);
6629}
6630
6631static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
6632 struct sk_buff *skb)
6633{
6634 const struct wmi_rfkill_state_change_event *ev;
6635 const void **tb;
6636 int ret;
6637
6638 tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6639 if (IS_ERR(tb)) {
6640 ret = PTR_ERR(tb);
6641 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6642 return;
6643 }
6644
6645 ev = tb[WMI_TAG_RFKILL_EVENT];
6646 if (!ev) {
6647 kfree(tb);
6648 return;
6649 }
6650
6651 ath12k_dbg(ab, ATH12K_DBG_MAC,
6652 "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
6653 le32_to_cpu(ev->gpio_pin_num),
6654 le32_to_cpu(ev->int_type),
6655 le32_to_cpu(ev->radio_state));
6656
6657 spin_lock_bh(&ab->base_lock);
6658 ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
6659 spin_unlock_bh(&ab->base_lock);
6660
6661 queue_work(ab->workqueue, &ab->rfkill_work);
6662 kfree(tb);
6663}
6664
6665static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
6666{
6667 struct wmi_cmd_hdr *cmd_hdr;
6668 enum wmi_tlv_event_id id;
6669
6670 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6671 id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
6672
6673 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6674 goto out;
6675
6676 switch (id) {
6677 /* Process all the WMI events here */
6678 case WMI_SERVICE_READY_EVENTID:
6679 ath12k_service_ready_event(ab, skb);
6680 break;
6681 case WMI_SERVICE_READY_EXT_EVENTID:
6682 ath12k_service_ready_ext_event(ab, skb);
6683 break;
6684 case WMI_SERVICE_READY_EXT2_EVENTID:
6685 ath12k_service_ready_ext2_event(ab, skb);
6686 break;
6687 case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
6688 ath12k_reg_chan_list_event(ab, skb);
6689 break;
6690 case WMI_READY_EVENTID:
6691 ath12k_ready_event(ab, skb);
6692 break;
6693 case WMI_PEER_DELETE_RESP_EVENTID:
6694 ath12k_peer_delete_resp_event(ab, skb);
6695 break;
6696 case WMI_VDEV_START_RESP_EVENTID:
6697 ath12k_vdev_start_resp_event(ab, skb);
6698 break;
6699 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
6700 ath12k_bcn_tx_status_event(ab, skb);
6701 break;
6702 case WMI_VDEV_STOPPED_EVENTID:
6703 ath12k_vdev_stopped_event(ab, skb);
6704 break;
6705 case WMI_MGMT_RX_EVENTID:
6706 ath12k_mgmt_rx_event(ab, skb);
6707 /* mgmt_rx_event() owns the skb now! */
6708 return;
6709 case WMI_MGMT_TX_COMPLETION_EVENTID:
6710 ath12k_mgmt_tx_compl_event(ab, skb);
6711 break;
6712 case WMI_SCAN_EVENTID:
6713 ath12k_scan_event(ab, skb);
6714 break;
6715 case WMI_PEER_STA_KICKOUT_EVENTID:
6716 ath12k_peer_sta_kickout_event(ab, skb);
6717 break;
6718 case WMI_ROAM_EVENTID:
6719 ath12k_roam_event(ab, skb);
6720 break;
6721 case WMI_CHAN_INFO_EVENTID:
6722 ath12k_chan_info_event(ab, skb);
6723 break;
6724 case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
6725 ath12k_pdev_bss_chan_info_event(ab, skb);
6726 break;
6727 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
6728 ath12k_vdev_install_key_compl_event(ab, skb);
6729 break;
6730 case WMI_SERVICE_AVAILABLE_EVENTID:
6731 ath12k_service_available_event(ab, skb);
6732 break;
6733 case WMI_PEER_ASSOC_CONF_EVENTID:
6734 ath12k_peer_assoc_conf_event(ab, skb);
6735 break;
6736 case WMI_UPDATE_STATS_EVENTID:
6737 ath12k_update_stats_event(ab, skb);
6738 break;
6739 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
6740 ath12k_pdev_ctl_failsafe_check_event(ab, skb);
6741 break;
6742 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
6743 ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
6744 break;
6745 case WMI_PDEV_TEMPERATURE_EVENTID:
6746 ath12k_wmi_pdev_temperature_event(ab, skb);
6747 break;
6748 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
6749 ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
6750 break;
6751 case WMI_HOST_FILS_DISCOVERY_EVENTID:
6752 ath12k_fils_discovery_event(ab, skb);
6753 break;
6754 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
6755 ath12k_probe_resp_tx_status_event(ab, skb);
6756 break;
6757 case WMI_RFKILL_STATE_CHANGE_EVENTID:
6758 ath12k_rfkill_state_change_event(ab, skb);
6759 break;
6760 /* add Unsupported events here */
6761 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
6762 case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
6763 case WMI_TWT_ENABLE_EVENTID:
6764 case WMI_TWT_DISABLE_EVENTID:
6765 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
6766 ath12k_dbg(ab, ATH12K_DBG_WMI,
6767 "ignoring unsupported event 0x%x\n", id);
6768 break;
6769 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
6770 ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
6771 break;
6772 case WMI_VDEV_DELETE_RESP_EVENTID:
6773 ath12k_vdev_delete_resp_event(ab, skb);
6774 break;
6775 /* TODO: Add remaining events */
6776 default:
6777 ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
6778 break;
6779 }
6780
6781out:
6782 dev_kfree_skb(skb);
6783}
6784
6785static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
6786 u32 pdev_idx)
6787{
6788 int status;
6789 u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
6790 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
6791 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
6792 struct ath12k_htc_svc_conn_req conn_req = {};
6793 struct ath12k_htc_svc_conn_resp conn_resp = {};
6794
6795 /* these fields are the same for all service endpoints */
6796 conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
6797 conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
6798 conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
6799
6800 /* connect to control service */
6801 conn_req.service_id = svc_id[pdev_idx];
6802
6803 status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
6804 if (status) {
6805 ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
6806 status);
6807 return status;
6808 }
6809
6810 ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
6811 ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
6812 ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
6813
6814 return 0;
6815}
6816
6817static int
6818ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
6819 struct wmi_unit_test_cmd ut_cmd,
6820 u32 *test_args)
6821{
6822 struct ath12k_wmi_pdev *wmi = ar->wmi;
6823 struct wmi_unit_test_cmd *cmd;
6824 struct sk_buff *skb;
6825 struct wmi_tlv *tlv;
6826 void *ptr;
6827 u32 *ut_cmd_args;
6828 int buf_len, arg_len;
6829 int ret;
6830 int i;
6831
6832 arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
6833 buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
6834
6835 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
6836 if (!skb)
6837 return -ENOMEM;
6838
6839 cmd = (struct wmi_unit_test_cmd *)skb->data;
6840 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
6841 sizeof(ut_cmd));
6842
6843 cmd->vdev_id = ut_cmd.vdev_id;
6844 cmd->module_id = ut_cmd.module_id;
6845 cmd->num_args = ut_cmd.num_args;
6846 cmd->diag_token = ut_cmd.diag_token;
6847
6848 ptr = skb->data + sizeof(ut_cmd);
6849
6850 tlv = ptr;
6851 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
6852
6853 ptr += TLV_HDR_SIZE;
6854
6855 ut_cmd_args = ptr;
6856 for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
6857 ut_cmd_args[i] = test_args[i];
6858
6859 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
6860 "WMI unit test : module %d vdev %d n_args %d token %d\n",
6861 cmd->module_id, cmd->vdev_id, cmd->num_args,
6862 cmd->diag_token);
6863
6864 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
6865
6866 if (ret) {
6867 ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
6868 ret);
6869 dev_kfree_skb(skb);
6870 }
6871
6872 return ret;
6873}
6874
6875int ath12k_wmi_simulate_radar(struct ath12k *ar)
6876{
6877 struct ath12k_vif *arvif;
6878 u32 dfs_args[DFS_MAX_TEST_ARGS];
6879 struct wmi_unit_test_cmd wmi_ut;
6880 bool arvif_found = false;
6881
6882 list_for_each_entry(arvif, &ar->arvifs, list) {
6883 if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
6884 arvif_found = true;
6885 break;
6886 }
6887 }
6888
6889 if (!arvif_found)
6890 return -EINVAL;
6891
6892 dfs_args[DFS_TEST_CMDID] = 0;
6893 dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
6894 /* Currently we could pass segment_id(b0 - b1), chirp(b2)
6895 * freq offset (b3 - b10) to unit test. For simulation
6896 * purpose this can be set to 0 which is valid.
6897 */
6898 dfs_args[DFS_TEST_RADAR_PARAM] = 0;
6899
6900 wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
6901 wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
6902 wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
6903 wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
6904
6905 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
6906
6907 return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
6908}
6909
6910int ath12k_wmi_connect(struct ath12k_base *ab)
6911{
6912 u32 i;
6913 u8 wmi_ep_count;
6914
6915 wmi_ep_count = ab->htc.wmi_ep_count;
6916 if (wmi_ep_count > ab->hw_params->max_radios)
6917 return -1;
6918
6919 for (i = 0; i < wmi_ep_count; i++)
6920 ath12k_connect_pdev_htc_service(ab, i);
6921
6922 return 0;
6923}
6924
6925static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
6926{
6927 if (WARN_ON(pdev_id >= MAX_RADIOS))
6928 return;
6929
6930 /* TODO: Deinit any pdev specific wmi resource */
6931}
6932
6933int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
6934 u8 pdev_id)
6935{
6936 struct ath12k_wmi_pdev *wmi_handle;
6937
6938 if (pdev_id >= ab->hw_params->max_radios)
6939 return -EINVAL;
6940
6941 wmi_handle = &ab->wmi_ab.wmi[pdev_id];
6942
6943 wmi_handle->wmi_ab = &ab->wmi_ab;
6944
6945 ab->wmi_ab.ab = ab;
6946 /* TODO: Init remaining resource specific to pdev */
6947
6948 return 0;
6949}
6950
6951int ath12k_wmi_attach(struct ath12k_base *ab)
6952{
6953 int ret;
6954
6955 ret = ath12k_wmi_pdev_attach(ab, 0);
6956 if (ret)
6957 return ret;
6958
6959 ab->wmi_ab.ab = ab;
6960 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
6961
6962 /* It's overwritten when service_ext_ready is handled */
6963 if (ab->hw_params->single_pdev_only)
6964 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
6965
6966 /* TODO: Init remaining wmi soc resources required */
6967 init_completion(&ab->wmi_ab.service_ready);
6968 init_completion(&ab->wmi_ab.unified_ready);
6969
6970 return 0;
6971}
6972
6973void ath12k_wmi_detach(struct ath12k_base *ab)
6974{
6975 int i;
6976
6977 /* TODO: Deinit wmi resource specific to SOC as required */
6978
6979 for (i = 0; i < ab->htc.wmi_ep_count; i++)
6980 ath12k_wmi_pdev_detach(ab, i);
6981
6982 ath12k_wmi_free_dbring_caps(ab);
6983}