Loading...
1/* SPDX-License-Identifier: ISC */
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 */
7
8#ifndef _WMI_OPS_H_
9#define _WMI_OPS_H_
10
11struct ath10k;
12struct sk_buff;
13
14struct wmi_ops {
15 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
16 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
17 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
18
19 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
20 struct wmi_scan_ev_arg *arg);
21 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
22 struct wmi_mgmt_rx_ev_arg *arg);
23 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
24 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
25 int (*pull_mgmt_tx_bundle_compl)(
26 struct ath10k *ar, struct sk_buff *skb,
27 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
28 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_ch_info_ev_arg *arg);
30 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_vdev_start_ev_arg *arg);
32 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_peer_kick_ev_arg *arg);
34 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_swba_ev_arg *arg);
36 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_phyerr_hdr_arg *arg);
38 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
39 int left_len, struct wmi_phyerr_ev_arg *arg);
40 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_svc_rdy_ev_arg *arg);
42 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_rdy_ev_arg *arg);
44 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
45 struct ath10k_fw_stats *stats);
46 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
47 struct wmi_roam_ev_arg *arg);
48 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
49 struct wmi_wow_ev_arg *arg);
50 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_echo_ev_arg *arg);
52 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
53 struct wmi_dfs_status_ev_arg *arg);
54 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
55 struct wmi_svc_avail_ev_arg *arg);
56
57 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
58
59 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
60 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
61 struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
62 const u8 macaddr[ETH_ALEN]);
63 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
64 u16 rd5g, u16 ctl2g, u16 ctl5g,
65 enum wmi_dfs_region dfs_reg);
66 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
67 u32 value);
68 struct sk_buff *(*gen_init)(struct ath10k *ar);
69 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
70 const struct wmi_start_scan_arg *arg);
71 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
72 const struct wmi_stop_scan_arg *arg);
73 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
74 enum wmi_vdev_type type,
75 enum wmi_vdev_subtype subtype,
76 const u8 macaddr[ETH_ALEN]);
77 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
78 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
79 const struct wmi_vdev_start_request_arg *arg,
80 bool restart);
81 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
82 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
83 const u8 *bssid);
84 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
85 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
86 u32 param_id, u32 param_value);
87 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
88 const struct wmi_vdev_install_key_arg *arg);
89 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
90 const struct wmi_vdev_spectral_conf_arg *arg);
91 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
92 u32 trigger, u32 enable);
93 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
94 const struct wmi_wmm_params_all_arg *arg);
95 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
96 const u8 peer_addr[ETH_ALEN],
97 enum wmi_peer_type peer_type);
98 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
99 const u8 peer_addr[ETH_ALEN]);
100 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
101 const u8 peer_addr[ETH_ALEN],
102 u32 tid_bitmap);
103 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
104 const u8 *peer_addr,
105 enum wmi_peer_param param_id,
106 u32 param_value);
107 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
108 const struct wmi_peer_assoc_complete_arg *arg);
109 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
110 enum wmi_sta_ps_mode psmode);
111 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
112 enum wmi_sta_powersave_param param_id,
113 u32 value);
114 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
115 const u8 *mac,
116 enum wmi_ap_ps_peer_param param_id,
117 u32 value);
118 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
119 const struct wmi_scan_chan_list_arg *arg);
120 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
121 u32 prob_req_oui);
122 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
123 const void *bcn, size_t bcn_len,
124 u32 bcn_paddr, bool dtim_zero,
125 bool deliver_cab);
126 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
127 const struct wmi_wmm_params_all_arg *arg);
128 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
129 struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
130 u32 vdev_id,
131 enum
132 wmi_peer_stats_info_request_type
133 type,
134 u8 *addr,
135 u32 reset);
136 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
137 enum wmi_force_fw_hang_type type,
138 u32 delay_ms);
139 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
140 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
141 struct sk_buff *skb,
142 dma_addr_t paddr);
143 int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
144 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
145 u32 log_level);
146 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
147 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
148 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
149 u32 period, u32 duration,
150 u32 next_offset,
151 u32 enabled);
152 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
153 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
154 const u8 *mac);
155 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
156 const u8 *mac, u32 tid, u32 buf_size);
157 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
158 const u8 *mac, u32 tid,
159 u32 status);
160 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
161 const u8 *mac, u32 tid, u32 initiator,
162 u32 reason);
163 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
164 u32 tim_ie_offset, struct sk_buff *bcn,
165 u32 prb_caps, u32 prb_erp,
166 void *prb_ies, size_t prb_ies_len);
167 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
168 struct sk_buff *bcn);
169 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
170 const u8 *p2p_ie);
171 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
172 const u8 peer_addr[ETH_ALEN],
173 const struct wmi_sta_uapsd_auto_trig_arg *args,
174 u32 num_ac);
175 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
176 const struct wmi_sta_keepalive_arg *arg);
177 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
178 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
179 enum wmi_wow_wakeup_event event,
180 u32 enable);
181 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
182 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
183 u32 pattern_id,
184 const u8 *pattern,
185 const u8 *mask,
186 int pattern_len,
187 int pattern_offset);
188 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
189 u32 pattern_id);
190 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
191 u32 vdev_id,
192 enum wmi_tdls_state state);
193 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
194 const struct wmi_tdls_peer_update_cmd_arg *arg,
195 const struct wmi_tdls_peer_capab_arg *cap,
196 const struct wmi_channel_arg *chan);
197 struct sk_buff *(*gen_radar_found)
198 (struct ath10k *ar,
199 const struct ath10k_radar_found_info *arg);
200 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
201 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
202 u32 param);
203 void (*fw_stats_fill)(struct ath10k *ar,
204 struct ath10k_fw_stats *fw_stats,
205 char *buf);
206 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
207 u8 enable,
208 u32 detect_level,
209 u32 detect_margin);
210 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
211 enum wmi_host_platform_type type,
212 u32 fw_feature_bitmap);
213 int (*get_vdev_subtype)(struct ath10k *ar,
214 enum wmi_vdev_subtype subtype);
215 struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
216 u32 vdev_id,
217 struct wmi_pno_scan_req *pno_scan);
218 struct sk_buff *(*gen_pdev_bss_chan_info_req)
219 (struct ath10k *ar,
220 enum wmi_bss_survey_req_type type);
221 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
222 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
223 u32 param);
224 struct sk_buff *(*gen_bb_timing)
225 (struct ath10k *ar,
226 const struct wmi_bb_timing_cfg_arg *arg);
227 struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
228 const struct wmi_per_peer_per_tid_cfg_arg *arg);
229
230};
231
232int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
233
234static inline int
235ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
236{
237 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
238 return -EOPNOTSUPP;
239
240 ar->wmi.ops->rx(ar, skb);
241 return 0;
242}
243
244static inline int
245ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
246 size_t len)
247{
248 if (!ar->wmi.ops->map_svc)
249 return -EOPNOTSUPP;
250
251 ar->wmi.ops->map_svc(in, out, len);
252 return 0;
253}
254
255static inline int
256ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
257 size_t len)
258{
259 if (!ar->wmi.ops->map_svc_ext)
260 return -EOPNOTSUPP;
261
262 ar->wmi.ops->map_svc_ext(in, out, len);
263 return 0;
264}
265
266static inline int
267ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
268 struct wmi_scan_ev_arg *arg)
269{
270 if (!ar->wmi.ops->pull_scan)
271 return -EOPNOTSUPP;
272
273 return ar->wmi.ops->pull_scan(ar, skb, arg);
274}
275
276static inline int
277ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
278 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
279{
280 if (!ar->wmi.ops->pull_mgmt_tx_compl)
281 return -EOPNOTSUPP;
282
283 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
284}
285
286static inline int
287ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
288 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
289{
290 if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
291 return -EOPNOTSUPP;
292
293 return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
294}
295
296static inline int
297ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
298 struct wmi_mgmt_rx_ev_arg *arg)
299{
300 if (!ar->wmi.ops->pull_mgmt_rx)
301 return -EOPNOTSUPP;
302
303 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
304}
305
306static inline int
307ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
308 struct wmi_ch_info_ev_arg *arg)
309{
310 if (!ar->wmi.ops->pull_ch_info)
311 return -EOPNOTSUPP;
312
313 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
314}
315
316static inline int
317ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
318 struct wmi_vdev_start_ev_arg *arg)
319{
320 if (!ar->wmi.ops->pull_vdev_start)
321 return -EOPNOTSUPP;
322
323 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
324}
325
326static inline int
327ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
328 struct wmi_peer_kick_ev_arg *arg)
329{
330 if (!ar->wmi.ops->pull_peer_kick)
331 return -EOPNOTSUPP;
332
333 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
334}
335
336static inline int
337ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
338 struct wmi_swba_ev_arg *arg)
339{
340 if (!ar->wmi.ops->pull_swba)
341 return -EOPNOTSUPP;
342
343 return ar->wmi.ops->pull_swba(ar, skb, arg);
344}
345
346static inline int
347ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
348 struct wmi_phyerr_hdr_arg *arg)
349{
350 if (!ar->wmi.ops->pull_phyerr_hdr)
351 return -EOPNOTSUPP;
352
353 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
354}
355
356static inline int
357ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
358 int left_len, struct wmi_phyerr_ev_arg *arg)
359{
360 if (!ar->wmi.ops->pull_phyerr)
361 return -EOPNOTSUPP;
362
363 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
364}
365
366static inline int
367ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
368 struct wmi_svc_rdy_ev_arg *arg)
369{
370 if (!ar->wmi.ops->pull_svc_rdy)
371 return -EOPNOTSUPP;
372
373 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
374}
375
376static inline int
377ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
378 struct wmi_rdy_ev_arg *arg)
379{
380 if (!ar->wmi.ops->pull_rdy)
381 return -EOPNOTSUPP;
382
383 return ar->wmi.ops->pull_rdy(ar, skb, arg);
384}
385
386static inline int
387ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
388 struct wmi_svc_avail_ev_arg *arg)
389{
390 if (!ar->wmi.ops->pull_svc_avail)
391 return -EOPNOTSUPP;
392 return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
393}
394
395static inline int
396ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
397 struct ath10k_fw_stats *stats)
398{
399 if (!ar->wmi.ops->pull_fw_stats)
400 return -EOPNOTSUPP;
401
402 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
403}
404
405static inline int
406ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
407 struct wmi_roam_ev_arg *arg)
408{
409 if (!ar->wmi.ops->pull_roam_ev)
410 return -EOPNOTSUPP;
411
412 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
413}
414
415static inline int
416ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
417 struct wmi_wow_ev_arg *arg)
418{
419 if (!ar->wmi.ops->pull_wow_event)
420 return -EOPNOTSUPP;
421
422 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
423}
424
425static inline int
426ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
427 struct wmi_echo_ev_arg *arg)
428{
429 if (!ar->wmi.ops->pull_echo_ev)
430 return -EOPNOTSUPP;
431
432 return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
433}
434
435static inline int
436ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
437 struct wmi_dfs_status_ev_arg *arg)
438{
439 if (!ar->wmi.ops->pull_dfs_status_ev)
440 return -EOPNOTSUPP;
441
442 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
443}
444
445static inline enum wmi_txbf_conf
446ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
447{
448 if (!ar->wmi.ops->get_txbf_conf_scheme)
449 return WMI_TXBF_CONF_UNSUPPORTED;
450
451 return ar->wmi.ops->get_txbf_conf_scheme(ar);
452}
453
454static inline int
455ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
456{
457 if (!ar->wmi.ops->cleanup_mgmt_tx_send)
458 return -EOPNOTSUPP;
459
460 return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
461}
462
463static inline int
464ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
465 dma_addr_t paddr)
466{
467 struct sk_buff *skb;
468 int ret;
469
470 if (!ar->wmi.ops->gen_mgmt_tx_send)
471 return -EOPNOTSUPP;
472
473 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
474 if (IS_ERR(skb))
475 return PTR_ERR(skb);
476
477 ret = ath10k_wmi_cmd_send(ar, skb,
478 ar->wmi.cmd->mgmt_tx_send_cmdid);
479 if (ret)
480 return ret;
481
482 return 0;
483}
484
485static inline int
486ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
487{
488 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
489 struct sk_buff *skb;
490 int ret;
491
492 if (!ar->wmi.ops->gen_mgmt_tx)
493 return -EOPNOTSUPP;
494
495 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
496 if (IS_ERR(skb))
497 return PTR_ERR(skb);
498
499 ret = ath10k_wmi_cmd_send(ar, skb,
500 ar->wmi.cmd->mgmt_tx_cmdid);
501 if (ret)
502 return ret;
503
504 /* FIXME There's no ACK event for Management Tx. This probably
505 * shouldn't be called here either.
506 */
507 info->flags |= IEEE80211_TX_STAT_ACK;
508 ieee80211_tx_status_irqsafe(ar->hw, msdu);
509
510 return 0;
511}
512
513static inline int
514ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
515 u16 ctl2g, u16 ctl5g,
516 enum wmi_dfs_region dfs_reg)
517{
518 struct sk_buff *skb;
519
520 if (!ar->wmi.ops->gen_pdev_set_rd)
521 return -EOPNOTSUPP;
522
523 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
524 dfs_reg);
525 if (IS_ERR(skb))
526 return PTR_ERR(skb);
527
528 return ath10k_wmi_cmd_send(ar, skb,
529 ar->wmi.cmd->pdev_set_regdomain_cmdid);
530}
531
532static inline int
533ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
534{
535 struct sk_buff *skb;
536
537 if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
538 return -EOPNOTSUPP;
539
540 skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
541 if (IS_ERR(skb))
542 return PTR_ERR(skb);
543
544 return ath10k_wmi_cmd_send(ar, skb,
545 ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
546}
547
548static inline int
549ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
550{
551 struct sk_buff *skb;
552
553 if (!ar->wmi.ops->gen_pdev_suspend)
554 return -EOPNOTSUPP;
555
556 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
557 if (IS_ERR(skb))
558 return PTR_ERR(skb);
559
560 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
561}
562
563static inline int
564ath10k_wmi_pdev_resume_target(struct ath10k *ar)
565{
566 struct sk_buff *skb;
567
568 if (!ar->wmi.ops->gen_pdev_resume)
569 return -EOPNOTSUPP;
570
571 skb = ar->wmi.ops->gen_pdev_resume(ar);
572 if (IS_ERR(skb))
573 return PTR_ERR(skb);
574
575 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
576}
577
578static inline int
579ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
580{
581 struct sk_buff *skb;
582
583 if (!ar->wmi.ops->gen_pdev_set_param)
584 return -EOPNOTSUPP;
585
586 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
587 if (IS_ERR(skb))
588 return PTR_ERR(skb);
589
590 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
591}
592
593static inline int
594ath10k_wmi_cmd_init(struct ath10k *ar)
595{
596 struct sk_buff *skb;
597
598 if (!ar->wmi.ops->gen_init)
599 return -EOPNOTSUPP;
600
601 skb = ar->wmi.ops->gen_init(ar);
602 if (IS_ERR(skb))
603 return PTR_ERR(skb);
604
605 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
606}
607
608static inline int
609ath10k_wmi_start_scan(struct ath10k *ar,
610 const struct wmi_start_scan_arg *arg)
611{
612 struct sk_buff *skb;
613
614 if (!ar->wmi.ops->gen_start_scan)
615 return -EOPNOTSUPP;
616
617 skb = ar->wmi.ops->gen_start_scan(ar, arg);
618 if (IS_ERR(skb))
619 return PTR_ERR(skb);
620
621 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
622}
623
624static inline int
625ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
626{
627 struct sk_buff *skb;
628
629 if (!ar->wmi.ops->gen_stop_scan)
630 return -EOPNOTSUPP;
631
632 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
633 if (IS_ERR(skb))
634 return PTR_ERR(skb);
635
636 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
637}
638
639static inline int
640ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
641 enum wmi_vdev_type type,
642 enum wmi_vdev_subtype subtype,
643 const u8 macaddr[ETH_ALEN])
644{
645 struct sk_buff *skb;
646
647 if (!ar->wmi.ops->gen_vdev_create)
648 return -EOPNOTSUPP;
649
650 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
651 if (IS_ERR(skb))
652 return PTR_ERR(skb);
653
654 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
655}
656
657static inline int
658ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
659{
660 struct sk_buff *skb;
661
662 if (!ar->wmi.ops->gen_vdev_delete)
663 return -EOPNOTSUPP;
664
665 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
666 if (IS_ERR(skb))
667 return PTR_ERR(skb);
668
669 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
670}
671
672static inline int
673ath10k_wmi_vdev_start(struct ath10k *ar,
674 const struct wmi_vdev_start_request_arg *arg)
675{
676 struct sk_buff *skb;
677
678 if (!ar->wmi.ops->gen_vdev_start)
679 return -EOPNOTSUPP;
680
681 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
682 if (IS_ERR(skb))
683 return PTR_ERR(skb);
684
685 return ath10k_wmi_cmd_send(ar, skb,
686 ar->wmi.cmd->vdev_start_request_cmdid);
687}
688
689static inline int
690ath10k_wmi_vdev_restart(struct ath10k *ar,
691 const struct wmi_vdev_start_request_arg *arg)
692{
693 struct sk_buff *skb;
694
695 if (!ar->wmi.ops->gen_vdev_start)
696 return -EOPNOTSUPP;
697
698 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
699 if (IS_ERR(skb))
700 return PTR_ERR(skb);
701
702 return ath10k_wmi_cmd_send(ar, skb,
703 ar->wmi.cmd->vdev_restart_request_cmdid);
704}
705
706static inline int
707ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
708{
709 struct sk_buff *skb;
710
711 if (!ar->wmi.ops->gen_vdev_stop)
712 return -EOPNOTSUPP;
713
714 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
715 if (IS_ERR(skb))
716 return PTR_ERR(skb);
717
718 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
719}
720
721static inline int
722ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
723{
724 struct sk_buff *skb;
725
726 if (!ar->wmi.ops->gen_vdev_up)
727 return -EOPNOTSUPP;
728
729 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
730 if (IS_ERR(skb))
731 return PTR_ERR(skb);
732
733 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
734}
735
736static inline int
737ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
738{
739 struct sk_buff *skb;
740
741 if (!ar->wmi.ops->gen_vdev_down)
742 return -EOPNOTSUPP;
743
744 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
745 if (IS_ERR(skb))
746 return PTR_ERR(skb);
747
748 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
749}
750
751static inline int
752ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
753 u32 param_value)
754{
755 struct sk_buff *skb;
756
757 if (!ar->wmi.ops->gen_vdev_set_param)
758 return -EOPNOTSUPP;
759
760 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
761 param_value);
762 if (IS_ERR(skb))
763 return PTR_ERR(skb);
764
765 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
766}
767
768static inline int
769ath10k_wmi_vdev_install_key(struct ath10k *ar,
770 const struct wmi_vdev_install_key_arg *arg)
771{
772 struct sk_buff *skb;
773
774 if (!ar->wmi.ops->gen_vdev_install_key)
775 return -EOPNOTSUPP;
776
777 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
778 if (IS_ERR(skb))
779 return PTR_ERR(skb);
780
781 return ath10k_wmi_cmd_send(ar, skb,
782 ar->wmi.cmd->vdev_install_key_cmdid);
783}
784
785static inline int
786ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
787 const struct wmi_vdev_spectral_conf_arg *arg)
788{
789 struct sk_buff *skb;
790 u32 cmd_id;
791
792 if (!ar->wmi.ops->gen_vdev_spectral_conf)
793 return -EOPNOTSUPP;
794
795 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
796 if (IS_ERR(skb))
797 return PTR_ERR(skb);
798
799 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
800 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
801}
802
803static inline int
804ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
805 u32 enable)
806{
807 struct sk_buff *skb;
808 u32 cmd_id;
809
810 if (!ar->wmi.ops->gen_vdev_spectral_enable)
811 return -EOPNOTSUPP;
812
813 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
814 enable);
815 if (IS_ERR(skb))
816 return PTR_ERR(skb);
817
818 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
819 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
820}
821
822static inline int
823ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
824 const u8 peer_addr[ETH_ALEN],
825 const struct wmi_sta_uapsd_auto_trig_arg *args,
826 u32 num_ac)
827{
828 struct sk_buff *skb;
829 u32 cmd_id;
830
831 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
832 return -EOPNOTSUPP;
833
834 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
835 num_ac);
836 if (IS_ERR(skb))
837 return PTR_ERR(skb);
838
839 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
840 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
841}
842
843static inline int
844ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
845 const struct wmi_wmm_params_all_arg *arg)
846{
847 struct sk_buff *skb;
848 u32 cmd_id;
849
850 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
851 if (IS_ERR(skb))
852 return PTR_ERR(skb);
853
854 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
855 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
856}
857
858static inline int
859ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
860 const u8 peer_addr[ETH_ALEN],
861 enum wmi_peer_type peer_type)
862{
863 struct sk_buff *skb;
864
865 if (!ar->wmi.ops->gen_peer_create)
866 return -EOPNOTSUPP;
867
868 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
869 if (IS_ERR(skb))
870 return PTR_ERR(skb);
871
872 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
873}
874
875static inline int
876ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
877 const u8 peer_addr[ETH_ALEN])
878{
879 struct sk_buff *skb;
880
881 if (!ar->wmi.ops->gen_peer_delete)
882 return -EOPNOTSUPP;
883
884 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
885 if (IS_ERR(skb))
886 return PTR_ERR(skb);
887
888 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
889}
890
891static inline int
892ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
893 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
894{
895 struct sk_buff *skb;
896
897 if (!ar->wmi.ops->gen_peer_flush)
898 return -EOPNOTSUPP;
899
900 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
901 if (IS_ERR(skb))
902 return PTR_ERR(skb);
903
904 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
905}
906
907static inline int
908ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
909 enum wmi_peer_param param_id, u32 param_value)
910{
911 struct sk_buff *skb;
912
913 if (!ar->wmi.ops->gen_peer_set_param)
914 return -EOPNOTSUPP;
915
916 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
917 param_value);
918 if (IS_ERR(skb))
919 return PTR_ERR(skb);
920
921 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
922}
923
924static inline int
925ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
926 enum wmi_sta_ps_mode psmode)
927{
928 struct sk_buff *skb;
929
930 if (!ar->wmi.ops->gen_set_psmode)
931 return -EOPNOTSUPP;
932
933 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
934 if (IS_ERR(skb))
935 return PTR_ERR(skb);
936
937 return ath10k_wmi_cmd_send(ar, skb,
938 ar->wmi.cmd->sta_powersave_mode_cmdid);
939}
940
941static inline int
942ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
943 enum wmi_sta_powersave_param param_id, u32 value)
944{
945 struct sk_buff *skb;
946
947 if (!ar->wmi.ops->gen_set_sta_ps)
948 return -EOPNOTSUPP;
949
950 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
951 if (IS_ERR(skb))
952 return PTR_ERR(skb);
953
954 return ath10k_wmi_cmd_send(ar, skb,
955 ar->wmi.cmd->sta_powersave_param_cmdid);
956}
957
958static inline int
959ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
960 enum wmi_ap_ps_peer_param param_id, u32 value)
961{
962 struct sk_buff *skb;
963
964 if (!ar->wmi.ops->gen_set_ap_ps)
965 return -EOPNOTSUPP;
966
967 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
968 if (IS_ERR(skb))
969 return PTR_ERR(skb);
970
971 return ath10k_wmi_cmd_send(ar, skb,
972 ar->wmi.cmd->ap_ps_peer_param_cmdid);
973}
974
975static inline int
976ath10k_wmi_scan_chan_list(struct ath10k *ar,
977 const struct wmi_scan_chan_list_arg *arg)
978{
979 struct sk_buff *skb;
980
981 if (!ar->wmi.ops->gen_scan_chan_list)
982 return -EOPNOTSUPP;
983
984 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
985 if (IS_ERR(skb))
986 return PTR_ERR(skb);
987
988 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
989}
990
991static inline int
992ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
993{
994 struct sk_buff *skb;
995 u32 prob_req_oui;
996
997 prob_req_oui = (((u32)mac_addr[0]) << 16) |
998 (((u32)mac_addr[1]) << 8) | mac_addr[2];
999
1000 if (!ar->wmi.ops->gen_scan_prob_req_oui)
1001 return -EOPNOTSUPP;
1002
1003 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
1004 if (IS_ERR(skb))
1005 return PTR_ERR(skb);
1006
1007 return ath10k_wmi_cmd_send(ar, skb,
1008 ar->wmi.cmd->scan_prob_req_oui_cmdid);
1009}
1010
1011static inline int
1012ath10k_wmi_peer_assoc(struct ath10k *ar,
1013 const struct wmi_peer_assoc_complete_arg *arg)
1014{
1015 struct sk_buff *skb;
1016
1017 if (!ar->wmi.ops->gen_peer_assoc)
1018 return -EOPNOTSUPP;
1019
1020 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
1021 if (IS_ERR(skb))
1022 return PTR_ERR(skb);
1023
1024 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
1025}
1026
1027static inline int
1028ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
1029 const void *bcn, size_t bcn_len,
1030 u32 bcn_paddr, bool dtim_zero,
1031 bool deliver_cab)
1032{
1033 struct sk_buff *skb;
1034 int ret;
1035
1036 if (!ar->wmi.ops->gen_beacon_dma)
1037 return -EOPNOTSUPP;
1038
1039 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1040 dtim_zero, deliver_cab);
1041 if (IS_ERR(skb))
1042 return PTR_ERR(skb);
1043
1044 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1045 ar->wmi.cmd->pdev_send_bcn_cmdid);
1046 if (ret) {
1047 dev_kfree_skb(skb);
1048 return ret;
1049 }
1050
1051 return 0;
1052}
1053
1054static inline int
1055ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1056 const struct wmi_wmm_params_all_arg *arg)
1057{
1058 struct sk_buff *skb;
1059
1060 if (!ar->wmi.ops->gen_pdev_set_wmm)
1061 return -EOPNOTSUPP;
1062
1063 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1064 if (IS_ERR(skb))
1065 return PTR_ERR(skb);
1066
1067 return ath10k_wmi_cmd_send(ar, skb,
1068 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1069}
1070
1071static inline int
1072ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1073{
1074 struct sk_buff *skb;
1075
1076 if (!ar->wmi.ops->gen_request_stats)
1077 return -EOPNOTSUPP;
1078
1079 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1080 if (IS_ERR(skb))
1081 return PTR_ERR(skb);
1082
1083 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1084}
1085
1086static inline int
1087ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
1088 u32 vdev_id,
1089 enum wmi_peer_stats_info_request_type type,
1090 u8 *addr,
1091 u32 reset)
1092{
1093 struct sk_buff *skb;
1094
1095 if (!ar->wmi.ops->gen_request_peer_stats_info)
1096 return -EOPNOTSUPP;
1097
1098 skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
1099 vdev_id,
1100 type,
1101 addr,
1102 reset);
1103 if (IS_ERR(skb))
1104 return PTR_ERR(skb);
1105
1106 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
1107}
1108
1109static inline int
1110ath10k_wmi_force_fw_hang(struct ath10k *ar,
1111 enum wmi_force_fw_hang_type type, u32 delay_ms)
1112{
1113 struct sk_buff *skb;
1114
1115 if (!ar->wmi.ops->gen_force_fw_hang)
1116 return -EOPNOTSUPP;
1117
1118 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1119 if (IS_ERR(skb))
1120 return PTR_ERR(skb);
1121
1122 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1123}
1124
1125static inline int
1126ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1127{
1128 struct sk_buff *skb;
1129
1130 if (!ar->wmi.ops->gen_dbglog_cfg)
1131 return -EOPNOTSUPP;
1132
1133 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1134 if (IS_ERR(skb))
1135 return PTR_ERR(skb);
1136
1137 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1138}
1139
1140static inline int
1141ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1142{
1143 struct sk_buff *skb;
1144
1145 if (!ar->wmi.ops->gen_pktlog_enable)
1146 return -EOPNOTSUPP;
1147
1148 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1149 if (IS_ERR(skb))
1150 return PTR_ERR(skb);
1151
1152 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1153}
1154
1155static inline int
1156ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1157{
1158 struct sk_buff *skb;
1159
1160 if (!ar->wmi.ops->gen_pktlog_disable)
1161 return -EOPNOTSUPP;
1162
1163 skb = ar->wmi.ops->gen_pktlog_disable(ar);
1164 if (IS_ERR(skb))
1165 return PTR_ERR(skb);
1166
1167 return ath10k_wmi_cmd_send(ar, skb,
1168 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1169}
1170
1171static inline int
1172ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1173 u32 next_offset, u32 enabled)
1174{
1175 struct sk_buff *skb;
1176
1177 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1178 return -EOPNOTSUPP;
1179
1180 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1181 next_offset, enabled);
1182 if (IS_ERR(skb))
1183 return PTR_ERR(skb);
1184
1185 return ath10k_wmi_cmd_send(ar, skb,
1186 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1187}
1188
1189static inline int
1190ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1191{
1192 struct sk_buff *skb;
1193
1194 if (!ar->wmi.ops->gen_pdev_get_temperature)
1195 return -EOPNOTSUPP;
1196
1197 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1198 if (IS_ERR(skb))
1199 return PTR_ERR(skb);
1200
1201 return ath10k_wmi_cmd_send(ar, skb,
1202 ar->wmi.cmd->pdev_get_temperature_cmdid);
1203}
1204
1205static inline int
1206ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1207{
1208 struct sk_buff *skb;
1209
1210 if (!ar->wmi.ops->gen_addba_clear_resp)
1211 return -EOPNOTSUPP;
1212
1213 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1214 if (IS_ERR(skb))
1215 return PTR_ERR(skb);
1216
1217 return ath10k_wmi_cmd_send(ar, skb,
1218 ar->wmi.cmd->addba_clear_resp_cmdid);
1219}
1220
1221static inline int
1222ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1223 u32 tid, u32 buf_size)
1224{
1225 struct sk_buff *skb;
1226
1227 if (!ar->wmi.ops->gen_addba_send)
1228 return -EOPNOTSUPP;
1229
1230 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1231 if (IS_ERR(skb))
1232 return PTR_ERR(skb);
1233
1234 return ath10k_wmi_cmd_send(ar, skb,
1235 ar->wmi.cmd->addba_send_cmdid);
1236}
1237
1238static inline int
1239ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1240 u32 tid, u32 status)
1241{
1242 struct sk_buff *skb;
1243
1244 if (!ar->wmi.ops->gen_addba_set_resp)
1245 return -EOPNOTSUPP;
1246
1247 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1248 if (IS_ERR(skb))
1249 return PTR_ERR(skb);
1250
1251 return ath10k_wmi_cmd_send(ar, skb,
1252 ar->wmi.cmd->addba_set_resp_cmdid);
1253}
1254
1255static inline int
1256ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1257 u32 tid, u32 initiator, u32 reason)
1258{
1259 struct sk_buff *skb;
1260
1261 if (!ar->wmi.ops->gen_delba_send)
1262 return -EOPNOTSUPP;
1263
1264 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1265 reason);
1266 if (IS_ERR(skb))
1267 return PTR_ERR(skb);
1268
1269 return ath10k_wmi_cmd_send(ar, skb,
1270 ar->wmi.cmd->delba_send_cmdid);
1271}
1272
1273static inline int
1274ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1275 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1276 void *prb_ies, size_t prb_ies_len)
1277{
1278 struct sk_buff *skb;
1279
1280 if (!ar->wmi.ops->gen_bcn_tmpl)
1281 return -EOPNOTSUPP;
1282
1283 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1284 prb_caps, prb_erp, prb_ies,
1285 prb_ies_len);
1286 if (IS_ERR(skb))
1287 return PTR_ERR(skb);
1288
1289 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1290}
1291
1292static inline int
1293ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1294{
1295 struct sk_buff *skb;
1296
1297 if (!ar->wmi.ops->gen_prb_tmpl)
1298 return -EOPNOTSUPP;
1299
1300 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1301 if (IS_ERR(skb))
1302 return PTR_ERR(skb);
1303
1304 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1305}
1306
1307static inline int
1308ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1309{
1310 struct sk_buff *skb;
1311
1312 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1313 return -EOPNOTSUPP;
1314
1315 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1316 if (IS_ERR(skb))
1317 return PTR_ERR(skb);
1318
1319 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1320}
1321
1322static inline int
1323ath10k_wmi_sta_keepalive(struct ath10k *ar,
1324 const struct wmi_sta_keepalive_arg *arg)
1325{
1326 struct sk_buff *skb;
1327 u32 cmd_id;
1328
1329 if (!ar->wmi.ops->gen_sta_keepalive)
1330 return -EOPNOTSUPP;
1331
1332 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1333 if (IS_ERR(skb))
1334 return PTR_ERR(skb);
1335
1336 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1337 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1338}
1339
1340static inline int
1341ath10k_wmi_wow_enable(struct ath10k *ar)
1342{
1343 struct sk_buff *skb;
1344 u32 cmd_id;
1345
1346 if (!ar->wmi.ops->gen_wow_enable)
1347 return -EOPNOTSUPP;
1348
1349 skb = ar->wmi.ops->gen_wow_enable(ar);
1350 if (IS_ERR(skb))
1351 return PTR_ERR(skb);
1352
1353 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1354 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1355}
1356
1357static inline int
1358ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1359 enum wmi_wow_wakeup_event event,
1360 u32 enable)
1361{
1362 struct sk_buff *skb;
1363 u32 cmd_id;
1364
1365 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1366 return -EOPNOTSUPP;
1367
1368 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1369 if (IS_ERR(skb))
1370 return PTR_ERR(skb);
1371
1372 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1373 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1374}
1375
1376static inline int
1377ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1378{
1379 struct sk_buff *skb;
1380 u32 cmd_id;
1381
1382 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1383 return -EOPNOTSUPP;
1384
1385 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1386 if (IS_ERR(skb))
1387 return PTR_ERR(skb);
1388
1389 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1390 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1391}
1392
1393static inline int
1394ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1395 const u8 *pattern, const u8 *mask,
1396 int pattern_len, int pattern_offset)
1397{
1398 struct sk_buff *skb;
1399 u32 cmd_id;
1400
1401 if (!ar->wmi.ops->gen_wow_add_pattern)
1402 return -EOPNOTSUPP;
1403
1404 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1405 pattern, mask, pattern_len,
1406 pattern_offset);
1407 if (IS_ERR(skb))
1408 return PTR_ERR(skb);
1409
1410 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1411 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1412}
1413
1414static inline int
1415ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1416{
1417 struct sk_buff *skb;
1418 u32 cmd_id;
1419
1420 if (!ar->wmi.ops->gen_wow_del_pattern)
1421 return -EOPNOTSUPP;
1422
1423 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1424 if (IS_ERR(skb))
1425 return PTR_ERR(skb);
1426
1427 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1428 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1429}
1430
1431static inline int
1432ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1433 struct wmi_pno_scan_req *pno_scan)
1434{
1435 struct sk_buff *skb;
1436 u32 cmd_id;
1437
1438 if (!ar->wmi.ops->gen_wow_config_pno)
1439 return -EOPNOTSUPP;
1440
1441 skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1442 if (IS_ERR(skb))
1443 return PTR_ERR(skb);
1444
1445 cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1446 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1447}
1448
1449static inline int
1450ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1451 enum wmi_tdls_state state)
1452{
1453 struct sk_buff *skb;
1454
1455 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1456 return -EOPNOTSUPP;
1457
1458 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1459 if (IS_ERR(skb))
1460 return PTR_ERR(skb);
1461
1462 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1463}
1464
1465static inline int
1466ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1467 const struct wmi_tdls_peer_update_cmd_arg *arg,
1468 const struct wmi_tdls_peer_capab_arg *cap,
1469 const struct wmi_channel_arg *chan)
1470{
1471 struct sk_buff *skb;
1472
1473 if (!ar->wmi.ops->gen_tdls_peer_update)
1474 return -EOPNOTSUPP;
1475
1476 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1477 if (IS_ERR(skb))
1478 return PTR_ERR(skb);
1479
1480 return ath10k_wmi_cmd_send(ar, skb,
1481 ar->wmi.cmd->tdls_peer_update_cmdid);
1482}
1483
1484static inline int
1485ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1486{
1487 struct sk_buff *skb;
1488
1489 if (!ar->wmi.ops->gen_adaptive_qcs)
1490 return -EOPNOTSUPP;
1491
1492 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1493 if (IS_ERR(skb))
1494 return PTR_ERR(skb);
1495
1496 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1497}
1498
1499static inline int
1500ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1501{
1502 struct sk_buff *skb;
1503
1504 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1505 return -EOPNOTSUPP;
1506
1507 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1508
1509 if (IS_ERR(skb))
1510 return PTR_ERR(skb);
1511
1512 return ath10k_wmi_cmd_send(ar, skb,
1513 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1514}
1515
1516static inline int
1517ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1518 char *buf)
1519{
1520 if (!ar->wmi.ops->fw_stats_fill)
1521 return -EOPNOTSUPP;
1522
1523 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1524 return 0;
1525}
1526
1527static inline int
1528ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1529 u32 detect_level, u32 detect_margin)
1530{
1531 struct sk_buff *skb;
1532
1533 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1534 return -EOPNOTSUPP;
1535
1536 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1537 detect_level,
1538 detect_margin);
1539
1540 if (IS_ERR(skb))
1541 return PTR_ERR(skb);
1542
1543 return ath10k_wmi_cmd_send(ar, skb,
1544 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1545}
1546
1547static inline int
1548ath10k_wmi_ext_resource_config(struct ath10k *ar,
1549 enum wmi_host_platform_type type,
1550 u32 fw_feature_bitmap)
1551{
1552 struct sk_buff *skb;
1553
1554 if (!ar->wmi.ops->ext_resource_config)
1555 return -EOPNOTSUPP;
1556
1557 skb = ar->wmi.ops->ext_resource_config(ar, type,
1558 fw_feature_bitmap);
1559
1560 if (IS_ERR(skb))
1561 return PTR_ERR(skb);
1562
1563 return ath10k_wmi_cmd_send(ar, skb,
1564 ar->wmi.cmd->ext_resource_cfg_cmdid);
1565}
1566
1567static inline int
1568ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1569{
1570 if (!ar->wmi.ops->get_vdev_subtype)
1571 return -EOPNOTSUPP;
1572
1573 return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1574}
1575
1576static inline int
1577ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1578 enum wmi_bss_survey_req_type type)
1579{
1580 struct ath10k_wmi *wmi = &ar->wmi;
1581 struct sk_buff *skb;
1582
1583 if (!wmi->ops->gen_pdev_bss_chan_info_req)
1584 return -EOPNOTSUPP;
1585
1586 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1587 if (IS_ERR(skb))
1588 return PTR_ERR(skb);
1589
1590 return ath10k_wmi_cmd_send(ar, skb,
1591 wmi->cmd->pdev_bss_chan_info_request_cmdid);
1592}
1593
1594static inline int
1595ath10k_wmi_echo(struct ath10k *ar, u32 value)
1596{
1597 struct ath10k_wmi *wmi = &ar->wmi;
1598 struct sk_buff *skb;
1599
1600 if (!wmi->ops->gen_echo)
1601 return -EOPNOTSUPP;
1602
1603 skb = wmi->ops->gen_echo(ar, value);
1604 if (IS_ERR(skb))
1605 return PTR_ERR(skb);
1606
1607 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1608}
1609
1610static inline int
1611ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1612{
1613 struct sk_buff *skb;
1614
1615 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1616 return -EOPNOTSUPP;
1617
1618 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1619
1620 if (IS_ERR(skb))
1621 return PTR_ERR(skb);
1622
1623 return ath10k_wmi_cmd_send(ar, skb,
1624 ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1625}
1626
1627static inline int
1628ath10k_wmi_report_radar_found(struct ath10k *ar,
1629 const struct ath10k_radar_found_info *arg)
1630{
1631 struct sk_buff *skb;
1632
1633 if (!ar->wmi.ops->gen_radar_found)
1634 return -EOPNOTSUPP;
1635
1636 skb = ar->wmi.ops->gen_radar_found(ar, arg);
1637 if (IS_ERR(skb))
1638 return PTR_ERR(skb);
1639
1640 return ath10k_wmi_cmd_send(ar, skb,
1641 ar->wmi.cmd->radar_found_cmdid);
1642}
1643
1644static inline int
1645ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
1646 const struct wmi_bb_timing_cfg_arg *arg)
1647{
1648 struct sk_buff *skb;
1649
1650 if (!ar->wmi.ops->gen_bb_timing)
1651 return -EOPNOTSUPP;
1652
1653 skb = ar->wmi.ops->gen_bb_timing(ar, arg);
1654
1655 if (IS_ERR(skb))
1656 return PTR_ERR(skb);
1657
1658 return ath10k_wmi_cmd_send(ar, skb,
1659 ar->wmi.cmd->set_bb_timing_cmdid);
1660}
1661
1662static inline int
1663ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar,
1664 const struct wmi_per_peer_per_tid_cfg_arg *arg)
1665{
1666 struct sk_buff *skb;
1667
1668 if (!ar->wmi.ops->gen_per_peer_per_tid_cfg)
1669 return -EOPNOTSUPP;
1670
1671 skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg);
1672 if (IS_ERR(skb))
1673 return PTR_ERR(skb);
1674
1675 return ath10k_wmi_cmd_send(ar, skb,
1676 ar->wmi.cmd->per_peer_per_tid_config_cmdid);
1677}
1678#endif
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef _WMI_OPS_H_
20#define _WMI_OPS_H_
21
22struct ath10k;
23struct sk_buff;
24
25struct wmi_ops {
26 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
28
29 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
30 struct wmi_scan_ev_arg *arg);
31 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
32 struct wmi_mgmt_rx_ev_arg *arg);
33 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
34 struct wmi_ch_info_ev_arg *arg);
35 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
36 struct wmi_vdev_start_ev_arg *arg);
37 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
38 struct wmi_peer_kick_ev_arg *arg);
39 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
40 struct wmi_swba_ev_arg *arg);
41 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
42 struct wmi_phyerr_hdr_arg *arg);
43 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
44 int left_len, struct wmi_phyerr_ev_arg *arg);
45 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
46 struct wmi_svc_rdy_ev_arg *arg);
47 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
48 struct wmi_rdy_ev_arg *arg);
49 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
50 struct ath10k_fw_stats *stats);
51 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
52 struct wmi_roam_ev_arg *arg);
53 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
54 struct wmi_wow_ev_arg *arg);
55 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
56 struct wmi_echo_ev_arg *arg);
57 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
58
59 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
60 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
61 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
62 u16 rd5g, u16 ctl2g, u16 ctl5g,
63 enum wmi_dfs_region dfs_reg);
64 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
65 u32 value);
66 struct sk_buff *(*gen_init)(struct ath10k *ar);
67 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
68 const struct wmi_start_scan_arg *arg);
69 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
70 const struct wmi_stop_scan_arg *arg);
71 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
72 enum wmi_vdev_type type,
73 enum wmi_vdev_subtype subtype,
74 const u8 macaddr[ETH_ALEN]);
75 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
76 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
77 const struct wmi_vdev_start_request_arg *arg,
78 bool restart);
79 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
80 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
81 const u8 *bssid);
82 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
83 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
84 u32 param_id, u32 param_value);
85 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
86 const struct wmi_vdev_install_key_arg *arg);
87 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
88 const struct wmi_vdev_spectral_conf_arg *arg);
89 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
90 u32 trigger, u32 enable);
91 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
92 const struct wmi_wmm_params_all_arg *arg);
93 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
94 const u8 peer_addr[ETH_ALEN],
95 enum wmi_peer_type peer_type);
96 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
97 const u8 peer_addr[ETH_ALEN]);
98 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
99 const u8 peer_addr[ETH_ALEN],
100 u32 tid_bitmap);
101 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
102 const u8 *peer_addr,
103 enum wmi_peer_param param_id,
104 u32 param_value);
105 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
106 const struct wmi_peer_assoc_complete_arg *arg);
107 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
108 enum wmi_sta_ps_mode psmode);
109 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
110 enum wmi_sta_powersave_param param_id,
111 u32 value);
112 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
113 const u8 *mac,
114 enum wmi_ap_ps_peer_param param_id,
115 u32 value);
116 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
117 const struct wmi_scan_chan_list_arg *arg);
118 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
119 const void *bcn, size_t bcn_len,
120 u32 bcn_paddr, bool dtim_zero,
121 bool deliver_cab);
122 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
123 const struct wmi_wmm_params_all_arg *arg);
124 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
125 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
126 enum wmi_force_fw_hang_type type,
127 u32 delay_ms);
128 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
129 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
130 struct sk_buff *skb,
131 dma_addr_t paddr);
132 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
133 u32 log_level);
134 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
135 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
136 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
137 u32 period, u32 duration,
138 u32 next_offset,
139 u32 enabled);
140 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
141 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
142 const u8 *mac);
143 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
144 const u8 *mac, u32 tid, u32 buf_size);
145 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
146 const u8 *mac, u32 tid,
147 u32 status);
148 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
149 const u8 *mac, u32 tid, u32 initiator,
150 u32 reason);
151 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
152 u32 tim_ie_offset, struct sk_buff *bcn,
153 u32 prb_caps, u32 prb_erp,
154 void *prb_ies, size_t prb_ies_len);
155 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
156 struct sk_buff *bcn);
157 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
158 const u8 *p2p_ie);
159 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
160 const u8 peer_addr[ETH_ALEN],
161 const struct wmi_sta_uapsd_auto_trig_arg *args,
162 u32 num_ac);
163 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
164 const struct wmi_sta_keepalive_arg *arg);
165 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
166 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
167 enum wmi_wow_wakeup_event event,
168 u32 enable);
169 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
170 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
171 u32 pattern_id,
172 const u8 *pattern,
173 const u8 *mask,
174 int pattern_len,
175 int pattern_offset);
176 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
177 u32 pattern_id);
178 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
179 u32 vdev_id,
180 enum wmi_tdls_state state);
181 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
182 const struct wmi_tdls_peer_update_cmd_arg *arg,
183 const struct wmi_tdls_peer_capab_arg *cap,
184 const struct wmi_channel_arg *chan);
185 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
186 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
187 u32 param);
188 void (*fw_stats_fill)(struct ath10k *ar,
189 struct ath10k_fw_stats *fw_stats,
190 char *buf);
191 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
192 u8 enable,
193 u32 detect_level,
194 u32 detect_margin);
195 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
196 enum wmi_host_platform_type type,
197 u32 fw_feature_bitmap);
198 int (*get_vdev_subtype)(struct ath10k *ar,
199 enum wmi_vdev_subtype subtype);
200 struct sk_buff *(*gen_pdev_bss_chan_info_req)
201 (struct ath10k *ar,
202 enum wmi_bss_survey_req_type type);
203 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
204 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
205 u32 param);
206
207};
208
209int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
210
211static inline int
212ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
213{
214 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
215 return -EOPNOTSUPP;
216
217 ar->wmi.ops->rx(ar, skb);
218 return 0;
219}
220
221static inline int
222ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
223 size_t len)
224{
225 if (!ar->wmi.ops->map_svc)
226 return -EOPNOTSUPP;
227
228 ar->wmi.ops->map_svc(in, out, len);
229 return 0;
230}
231
232static inline int
233ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
234 struct wmi_scan_ev_arg *arg)
235{
236 if (!ar->wmi.ops->pull_scan)
237 return -EOPNOTSUPP;
238
239 return ar->wmi.ops->pull_scan(ar, skb, arg);
240}
241
242static inline int
243ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
244 struct wmi_mgmt_rx_ev_arg *arg)
245{
246 if (!ar->wmi.ops->pull_mgmt_rx)
247 return -EOPNOTSUPP;
248
249 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
250}
251
252static inline int
253ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
254 struct wmi_ch_info_ev_arg *arg)
255{
256 if (!ar->wmi.ops->pull_ch_info)
257 return -EOPNOTSUPP;
258
259 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
260}
261
262static inline int
263ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
264 struct wmi_vdev_start_ev_arg *arg)
265{
266 if (!ar->wmi.ops->pull_vdev_start)
267 return -EOPNOTSUPP;
268
269 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
270}
271
272static inline int
273ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
274 struct wmi_peer_kick_ev_arg *arg)
275{
276 if (!ar->wmi.ops->pull_peer_kick)
277 return -EOPNOTSUPP;
278
279 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
280}
281
282static inline int
283ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
284 struct wmi_swba_ev_arg *arg)
285{
286 if (!ar->wmi.ops->pull_swba)
287 return -EOPNOTSUPP;
288
289 return ar->wmi.ops->pull_swba(ar, skb, arg);
290}
291
292static inline int
293ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
294 struct wmi_phyerr_hdr_arg *arg)
295{
296 if (!ar->wmi.ops->pull_phyerr_hdr)
297 return -EOPNOTSUPP;
298
299 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
300}
301
302static inline int
303ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
304 int left_len, struct wmi_phyerr_ev_arg *arg)
305{
306 if (!ar->wmi.ops->pull_phyerr)
307 return -EOPNOTSUPP;
308
309 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
310}
311
312static inline int
313ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
314 struct wmi_svc_rdy_ev_arg *arg)
315{
316 if (!ar->wmi.ops->pull_svc_rdy)
317 return -EOPNOTSUPP;
318
319 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
320}
321
322static inline int
323ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
324 struct wmi_rdy_ev_arg *arg)
325{
326 if (!ar->wmi.ops->pull_rdy)
327 return -EOPNOTSUPP;
328
329 return ar->wmi.ops->pull_rdy(ar, skb, arg);
330}
331
332static inline int
333ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
334 struct ath10k_fw_stats *stats)
335{
336 if (!ar->wmi.ops->pull_fw_stats)
337 return -EOPNOTSUPP;
338
339 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
340}
341
342static inline int
343ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
344 struct wmi_roam_ev_arg *arg)
345{
346 if (!ar->wmi.ops->pull_roam_ev)
347 return -EOPNOTSUPP;
348
349 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
350}
351
352static inline int
353ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
354 struct wmi_wow_ev_arg *arg)
355{
356 if (!ar->wmi.ops->pull_wow_event)
357 return -EOPNOTSUPP;
358
359 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
360}
361
362static inline int
363ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
364 struct wmi_echo_ev_arg *arg)
365{
366 if (!ar->wmi.ops->pull_echo_ev)
367 return -EOPNOTSUPP;
368
369 return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
370}
371
372static inline enum wmi_txbf_conf
373ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
374{
375 if (!ar->wmi.ops->get_txbf_conf_scheme)
376 return WMI_TXBF_CONF_UNSUPPORTED;
377
378 return ar->wmi.ops->get_txbf_conf_scheme(ar);
379}
380
381static inline int
382ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
383 dma_addr_t paddr)
384{
385 struct sk_buff *skb;
386 int ret;
387
388 if (!ar->wmi.ops->gen_mgmt_tx_send)
389 return -EOPNOTSUPP;
390
391 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
392 if (IS_ERR(skb))
393 return PTR_ERR(skb);
394
395 ret = ath10k_wmi_cmd_send(ar, skb,
396 ar->wmi.cmd->mgmt_tx_send_cmdid);
397 if (ret)
398 return ret;
399
400 return 0;
401}
402
403static inline int
404ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
405{
406 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
407 struct sk_buff *skb;
408 int ret;
409
410 if (!ar->wmi.ops->gen_mgmt_tx)
411 return -EOPNOTSUPP;
412
413 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
414 if (IS_ERR(skb))
415 return PTR_ERR(skb);
416
417 ret = ath10k_wmi_cmd_send(ar, skb,
418 ar->wmi.cmd->mgmt_tx_cmdid);
419 if (ret)
420 return ret;
421
422 /* FIXME There's no ACK event for Management Tx. This probably
423 * shouldn't be called here either.
424 */
425 info->flags |= IEEE80211_TX_STAT_ACK;
426 ieee80211_tx_status_irqsafe(ar->hw, msdu);
427
428 return 0;
429}
430
431static inline int
432ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
433 u16 ctl2g, u16 ctl5g,
434 enum wmi_dfs_region dfs_reg)
435{
436 struct sk_buff *skb;
437
438 if (!ar->wmi.ops->gen_pdev_set_rd)
439 return -EOPNOTSUPP;
440
441 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
442 dfs_reg);
443 if (IS_ERR(skb))
444 return PTR_ERR(skb);
445
446 return ath10k_wmi_cmd_send(ar, skb,
447 ar->wmi.cmd->pdev_set_regdomain_cmdid);
448}
449
450static inline int
451ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
452{
453 struct sk_buff *skb;
454
455 if (!ar->wmi.ops->gen_pdev_suspend)
456 return -EOPNOTSUPP;
457
458 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
459 if (IS_ERR(skb))
460 return PTR_ERR(skb);
461
462 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
463}
464
465static inline int
466ath10k_wmi_pdev_resume_target(struct ath10k *ar)
467{
468 struct sk_buff *skb;
469
470 if (!ar->wmi.ops->gen_pdev_resume)
471 return -EOPNOTSUPP;
472
473 skb = ar->wmi.ops->gen_pdev_resume(ar);
474 if (IS_ERR(skb))
475 return PTR_ERR(skb);
476
477 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
478}
479
480static inline int
481ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
482{
483 struct sk_buff *skb;
484
485 if (!ar->wmi.ops->gen_pdev_set_param)
486 return -EOPNOTSUPP;
487
488 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
489 if (IS_ERR(skb))
490 return PTR_ERR(skb);
491
492 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
493}
494
495static inline int
496ath10k_wmi_cmd_init(struct ath10k *ar)
497{
498 struct sk_buff *skb;
499
500 if (!ar->wmi.ops->gen_init)
501 return -EOPNOTSUPP;
502
503 skb = ar->wmi.ops->gen_init(ar);
504 if (IS_ERR(skb))
505 return PTR_ERR(skb);
506
507 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
508}
509
510static inline int
511ath10k_wmi_start_scan(struct ath10k *ar,
512 const struct wmi_start_scan_arg *arg)
513{
514 struct sk_buff *skb;
515
516 if (!ar->wmi.ops->gen_start_scan)
517 return -EOPNOTSUPP;
518
519 skb = ar->wmi.ops->gen_start_scan(ar, arg);
520 if (IS_ERR(skb))
521 return PTR_ERR(skb);
522
523 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
524}
525
526static inline int
527ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
528{
529 struct sk_buff *skb;
530
531 if (!ar->wmi.ops->gen_stop_scan)
532 return -EOPNOTSUPP;
533
534 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
535 if (IS_ERR(skb))
536 return PTR_ERR(skb);
537
538 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
539}
540
541static inline int
542ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
543 enum wmi_vdev_type type,
544 enum wmi_vdev_subtype subtype,
545 const u8 macaddr[ETH_ALEN])
546{
547 struct sk_buff *skb;
548
549 if (!ar->wmi.ops->gen_vdev_create)
550 return -EOPNOTSUPP;
551
552 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
553 if (IS_ERR(skb))
554 return PTR_ERR(skb);
555
556 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
557}
558
559static inline int
560ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
561{
562 struct sk_buff *skb;
563
564 if (!ar->wmi.ops->gen_vdev_delete)
565 return -EOPNOTSUPP;
566
567 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
568 if (IS_ERR(skb))
569 return PTR_ERR(skb);
570
571 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
572}
573
574static inline int
575ath10k_wmi_vdev_start(struct ath10k *ar,
576 const struct wmi_vdev_start_request_arg *arg)
577{
578 struct sk_buff *skb;
579
580 if (!ar->wmi.ops->gen_vdev_start)
581 return -EOPNOTSUPP;
582
583 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
584 if (IS_ERR(skb))
585 return PTR_ERR(skb);
586
587 return ath10k_wmi_cmd_send(ar, skb,
588 ar->wmi.cmd->vdev_start_request_cmdid);
589}
590
591static inline int
592ath10k_wmi_vdev_restart(struct ath10k *ar,
593 const struct wmi_vdev_start_request_arg *arg)
594{
595 struct sk_buff *skb;
596
597 if (!ar->wmi.ops->gen_vdev_start)
598 return -EOPNOTSUPP;
599
600 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
601 if (IS_ERR(skb))
602 return PTR_ERR(skb);
603
604 return ath10k_wmi_cmd_send(ar, skb,
605 ar->wmi.cmd->vdev_restart_request_cmdid);
606}
607
608static inline int
609ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
610{
611 struct sk_buff *skb;
612
613 if (!ar->wmi.ops->gen_vdev_stop)
614 return -EOPNOTSUPP;
615
616 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
617 if (IS_ERR(skb))
618 return PTR_ERR(skb);
619
620 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
621}
622
623static inline int
624ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
625{
626 struct sk_buff *skb;
627
628 if (!ar->wmi.ops->gen_vdev_up)
629 return -EOPNOTSUPP;
630
631 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
632 if (IS_ERR(skb))
633 return PTR_ERR(skb);
634
635 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
636}
637
638static inline int
639ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
640{
641 struct sk_buff *skb;
642
643 if (!ar->wmi.ops->gen_vdev_down)
644 return -EOPNOTSUPP;
645
646 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
647 if (IS_ERR(skb))
648 return PTR_ERR(skb);
649
650 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
651}
652
653static inline int
654ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
655 u32 param_value)
656{
657 struct sk_buff *skb;
658
659 if (!ar->wmi.ops->gen_vdev_set_param)
660 return -EOPNOTSUPP;
661
662 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
663 param_value);
664 if (IS_ERR(skb))
665 return PTR_ERR(skb);
666
667 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
668}
669
670static inline int
671ath10k_wmi_vdev_install_key(struct ath10k *ar,
672 const struct wmi_vdev_install_key_arg *arg)
673{
674 struct sk_buff *skb;
675
676 if (!ar->wmi.ops->gen_vdev_install_key)
677 return -EOPNOTSUPP;
678
679 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
680 if (IS_ERR(skb))
681 return PTR_ERR(skb);
682
683 return ath10k_wmi_cmd_send(ar, skb,
684 ar->wmi.cmd->vdev_install_key_cmdid);
685}
686
687static inline int
688ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
689 const struct wmi_vdev_spectral_conf_arg *arg)
690{
691 struct sk_buff *skb;
692 u32 cmd_id;
693
694 if (!ar->wmi.ops->gen_vdev_spectral_conf)
695 return -EOPNOTSUPP;
696
697 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
698 if (IS_ERR(skb))
699 return PTR_ERR(skb);
700
701 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
702 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
703}
704
705static inline int
706ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
707 u32 enable)
708{
709 struct sk_buff *skb;
710 u32 cmd_id;
711
712 if (!ar->wmi.ops->gen_vdev_spectral_enable)
713 return -EOPNOTSUPP;
714
715 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
716 enable);
717 if (IS_ERR(skb))
718 return PTR_ERR(skb);
719
720 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
721 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
722}
723
724static inline int
725ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
726 const u8 peer_addr[ETH_ALEN],
727 const struct wmi_sta_uapsd_auto_trig_arg *args,
728 u32 num_ac)
729{
730 struct sk_buff *skb;
731 u32 cmd_id;
732
733 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
734 return -EOPNOTSUPP;
735
736 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
737 num_ac);
738 if (IS_ERR(skb))
739 return PTR_ERR(skb);
740
741 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
742 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
743}
744
745static inline int
746ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
747 const struct wmi_wmm_params_all_arg *arg)
748{
749 struct sk_buff *skb;
750 u32 cmd_id;
751
752 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
753 if (IS_ERR(skb))
754 return PTR_ERR(skb);
755
756 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
757 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
758}
759
760static inline int
761ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
762 const u8 peer_addr[ETH_ALEN],
763 enum wmi_peer_type peer_type)
764{
765 struct sk_buff *skb;
766
767 if (!ar->wmi.ops->gen_peer_create)
768 return -EOPNOTSUPP;
769
770 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
771 if (IS_ERR(skb))
772 return PTR_ERR(skb);
773
774 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
775}
776
777static inline int
778ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
779 const u8 peer_addr[ETH_ALEN])
780{
781 struct sk_buff *skb;
782
783 if (!ar->wmi.ops->gen_peer_delete)
784 return -EOPNOTSUPP;
785
786 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
787 if (IS_ERR(skb))
788 return PTR_ERR(skb);
789
790 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
791}
792
793static inline int
794ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
795 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
796{
797 struct sk_buff *skb;
798
799 if (!ar->wmi.ops->gen_peer_flush)
800 return -EOPNOTSUPP;
801
802 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
803 if (IS_ERR(skb))
804 return PTR_ERR(skb);
805
806 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
807}
808
809static inline int
810ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
811 enum wmi_peer_param param_id, u32 param_value)
812{
813 struct sk_buff *skb;
814
815 if (!ar->wmi.ops->gen_peer_set_param)
816 return -EOPNOTSUPP;
817
818 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
819 param_value);
820 if (IS_ERR(skb))
821 return PTR_ERR(skb);
822
823 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
824}
825
826static inline int
827ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
828 enum wmi_sta_ps_mode psmode)
829{
830 struct sk_buff *skb;
831
832 if (!ar->wmi.ops->gen_set_psmode)
833 return -EOPNOTSUPP;
834
835 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
836 if (IS_ERR(skb))
837 return PTR_ERR(skb);
838
839 return ath10k_wmi_cmd_send(ar, skb,
840 ar->wmi.cmd->sta_powersave_mode_cmdid);
841}
842
843static inline int
844ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
845 enum wmi_sta_powersave_param param_id, u32 value)
846{
847 struct sk_buff *skb;
848
849 if (!ar->wmi.ops->gen_set_sta_ps)
850 return -EOPNOTSUPP;
851
852 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
853 if (IS_ERR(skb))
854 return PTR_ERR(skb);
855
856 return ath10k_wmi_cmd_send(ar, skb,
857 ar->wmi.cmd->sta_powersave_param_cmdid);
858}
859
860static inline int
861ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
862 enum wmi_ap_ps_peer_param param_id, u32 value)
863{
864 struct sk_buff *skb;
865
866 if (!ar->wmi.ops->gen_set_ap_ps)
867 return -EOPNOTSUPP;
868
869 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
870 if (IS_ERR(skb))
871 return PTR_ERR(skb);
872
873 return ath10k_wmi_cmd_send(ar, skb,
874 ar->wmi.cmd->ap_ps_peer_param_cmdid);
875}
876
877static inline int
878ath10k_wmi_scan_chan_list(struct ath10k *ar,
879 const struct wmi_scan_chan_list_arg *arg)
880{
881 struct sk_buff *skb;
882
883 if (!ar->wmi.ops->gen_scan_chan_list)
884 return -EOPNOTSUPP;
885
886 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
887 if (IS_ERR(skb))
888 return PTR_ERR(skb);
889
890 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
891}
892
893static inline int
894ath10k_wmi_peer_assoc(struct ath10k *ar,
895 const struct wmi_peer_assoc_complete_arg *arg)
896{
897 struct sk_buff *skb;
898
899 if (!ar->wmi.ops->gen_peer_assoc)
900 return -EOPNOTSUPP;
901
902 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
903 if (IS_ERR(skb))
904 return PTR_ERR(skb);
905
906 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
907}
908
909static inline int
910ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
911 const void *bcn, size_t bcn_len,
912 u32 bcn_paddr, bool dtim_zero,
913 bool deliver_cab)
914{
915 struct sk_buff *skb;
916 int ret;
917
918 if (!ar->wmi.ops->gen_beacon_dma)
919 return -EOPNOTSUPP;
920
921 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
922 dtim_zero, deliver_cab);
923 if (IS_ERR(skb))
924 return PTR_ERR(skb);
925
926 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
927 ar->wmi.cmd->pdev_send_bcn_cmdid);
928 if (ret) {
929 dev_kfree_skb(skb);
930 return ret;
931 }
932
933 return 0;
934}
935
936static inline int
937ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
938 const struct wmi_wmm_params_all_arg *arg)
939{
940 struct sk_buff *skb;
941
942 if (!ar->wmi.ops->gen_pdev_set_wmm)
943 return -EOPNOTSUPP;
944
945 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
946 if (IS_ERR(skb))
947 return PTR_ERR(skb);
948
949 return ath10k_wmi_cmd_send(ar, skb,
950 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
951}
952
953static inline int
954ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
955{
956 struct sk_buff *skb;
957
958 if (!ar->wmi.ops->gen_request_stats)
959 return -EOPNOTSUPP;
960
961 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
962 if (IS_ERR(skb))
963 return PTR_ERR(skb);
964
965 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
966}
967
968static inline int
969ath10k_wmi_force_fw_hang(struct ath10k *ar,
970 enum wmi_force_fw_hang_type type, u32 delay_ms)
971{
972 struct sk_buff *skb;
973
974 if (!ar->wmi.ops->gen_force_fw_hang)
975 return -EOPNOTSUPP;
976
977 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
978 if (IS_ERR(skb))
979 return PTR_ERR(skb);
980
981 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
982}
983
984static inline int
985ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
986{
987 struct sk_buff *skb;
988
989 if (!ar->wmi.ops->gen_dbglog_cfg)
990 return -EOPNOTSUPP;
991
992 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
993 if (IS_ERR(skb))
994 return PTR_ERR(skb);
995
996 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
997}
998
999static inline int
1000ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1001{
1002 struct sk_buff *skb;
1003
1004 if (!ar->wmi.ops->gen_pktlog_enable)
1005 return -EOPNOTSUPP;
1006
1007 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1008 if (IS_ERR(skb))
1009 return PTR_ERR(skb);
1010
1011 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1012}
1013
1014static inline int
1015ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1016{
1017 struct sk_buff *skb;
1018
1019 if (!ar->wmi.ops->gen_pktlog_disable)
1020 return -EOPNOTSUPP;
1021
1022 skb = ar->wmi.ops->gen_pktlog_disable(ar);
1023 if (IS_ERR(skb))
1024 return PTR_ERR(skb);
1025
1026 return ath10k_wmi_cmd_send(ar, skb,
1027 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1028}
1029
1030static inline int
1031ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1032 u32 next_offset, u32 enabled)
1033{
1034 struct sk_buff *skb;
1035
1036 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1037 return -EOPNOTSUPP;
1038
1039 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1040 next_offset, enabled);
1041 if (IS_ERR(skb))
1042 return PTR_ERR(skb);
1043
1044 return ath10k_wmi_cmd_send(ar, skb,
1045 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1046}
1047
1048static inline int
1049ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1050{
1051 struct sk_buff *skb;
1052
1053 if (!ar->wmi.ops->gen_pdev_get_temperature)
1054 return -EOPNOTSUPP;
1055
1056 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1057 if (IS_ERR(skb))
1058 return PTR_ERR(skb);
1059
1060 return ath10k_wmi_cmd_send(ar, skb,
1061 ar->wmi.cmd->pdev_get_temperature_cmdid);
1062}
1063
1064static inline int
1065ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1066{
1067 struct sk_buff *skb;
1068
1069 if (!ar->wmi.ops->gen_addba_clear_resp)
1070 return -EOPNOTSUPP;
1071
1072 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1073 if (IS_ERR(skb))
1074 return PTR_ERR(skb);
1075
1076 return ath10k_wmi_cmd_send(ar, skb,
1077 ar->wmi.cmd->addba_clear_resp_cmdid);
1078}
1079
1080static inline int
1081ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1082 u32 tid, u32 buf_size)
1083{
1084 struct sk_buff *skb;
1085
1086 if (!ar->wmi.ops->gen_addba_send)
1087 return -EOPNOTSUPP;
1088
1089 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1090 if (IS_ERR(skb))
1091 return PTR_ERR(skb);
1092
1093 return ath10k_wmi_cmd_send(ar, skb,
1094 ar->wmi.cmd->addba_send_cmdid);
1095}
1096
1097static inline int
1098ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1099 u32 tid, u32 status)
1100{
1101 struct sk_buff *skb;
1102
1103 if (!ar->wmi.ops->gen_addba_set_resp)
1104 return -EOPNOTSUPP;
1105
1106 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1107 if (IS_ERR(skb))
1108 return PTR_ERR(skb);
1109
1110 return ath10k_wmi_cmd_send(ar, skb,
1111 ar->wmi.cmd->addba_set_resp_cmdid);
1112}
1113
1114static inline int
1115ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1116 u32 tid, u32 initiator, u32 reason)
1117{
1118 struct sk_buff *skb;
1119
1120 if (!ar->wmi.ops->gen_delba_send)
1121 return -EOPNOTSUPP;
1122
1123 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1124 reason);
1125 if (IS_ERR(skb))
1126 return PTR_ERR(skb);
1127
1128 return ath10k_wmi_cmd_send(ar, skb,
1129 ar->wmi.cmd->delba_send_cmdid);
1130}
1131
1132static inline int
1133ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1134 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1135 void *prb_ies, size_t prb_ies_len)
1136{
1137 struct sk_buff *skb;
1138
1139 if (!ar->wmi.ops->gen_bcn_tmpl)
1140 return -EOPNOTSUPP;
1141
1142 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1143 prb_caps, prb_erp, prb_ies,
1144 prb_ies_len);
1145 if (IS_ERR(skb))
1146 return PTR_ERR(skb);
1147
1148 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1149}
1150
1151static inline int
1152ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1153{
1154 struct sk_buff *skb;
1155
1156 if (!ar->wmi.ops->gen_prb_tmpl)
1157 return -EOPNOTSUPP;
1158
1159 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1160 if (IS_ERR(skb))
1161 return PTR_ERR(skb);
1162
1163 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1164}
1165
1166static inline int
1167ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1168{
1169 struct sk_buff *skb;
1170
1171 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1172 return -EOPNOTSUPP;
1173
1174 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1175 if (IS_ERR(skb))
1176 return PTR_ERR(skb);
1177
1178 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1179}
1180
1181static inline int
1182ath10k_wmi_sta_keepalive(struct ath10k *ar,
1183 const struct wmi_sta_keepalive_arg *arg)
1184{
1185 struct sk_buff *skb;
1186 u32 cmd_id;
1187
1188 if (!ar->wmi.ops->gen_sta_keepalive)
1189 return -EOPNOTSUPP;
1190
1191 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1192 if (IS_ERR(skb))
1193 return PTR_ERR(skb);
1194
1195 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1196 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1197}
1198
1199static inline int
1200ath10k_wmi_wow_enable(struct ath10k *ar)
1201{
1202 struct sk_buff *skb;
1203 u32 cmd_id;
1204
1205 if (!ar->wmi.ops->gen_wow_enable)
1206 return -EOPNOTSUPP;
1207
1208 skb = ar->wmi.ops->gen_wow_enable(ar);
1209 if (IS_ERR(skb))
1210 return PTR_ERR(skb);
1211
1212 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1213 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1214}
1215
1216static inline int
1217ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1218 enum wmi_wow_wakeup_event event,
1219 u32 enable)
1220{
1221 struct sk_buff *skb;
1222 u32 cmd_id;
1223
1224 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1225 return -EOPNOTSUPP;
1226
1227 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1228 if (IS_ERR(skb))
1229 return PTR_ERR(skb);
1230
1231 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1232 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1233}
1234
1235static inline int
1236ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1237{
1238 struct sk_buff *skb;
1239 u32 cmd_id;
1240
1241 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1242 return -EOPNOTSUPP;
1243
1244 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1245 if (IS_ERR(skb))
1246 return PTR_ERR(skb);
1247
1248 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1249 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1250}
1251
1252static inline int
1253ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1254 const u8 *pattern, const u8 *mask,
1255 int pattern_len, int pattern_offset)
1256{
1257 struct sk_buff *skb;
1258 u32 cmd_id;
1259
1260 if (!ar->wmi.ops->gen_wow_add_pattern)
1261 return -EOPNOTSUPP;
1262
1263 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1264 pattern, mask, pattern_len,
1265 pattern_offset);
1266 if (IS_ERR(skb))
1267 return PTR_ERR(skb);
1268
1269 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1270 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1271}
1272
1273static inline int
1274ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1275{
1276 struct sk_buff *skb;
1277 u32 cmd_id;
1278
1279 if (!ar->wmi.ops->gen_wow_del_pattern)
1280 return -EOPNOTSUPP;
1281
1282 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1283 if (IS_ERR(skb))
1284 return PTR_ERR(skb);
1285
1286 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1287 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1288}
1289
1290static inline int
1291ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1292 enum wmi_tdls_state state)
1293{
1294 struct sk_buff *skb;
1295
1296 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1297 return -EOPNOTSUPP;
1298
1299 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1300 if (IS_ERR(skb))
1301 return PTR_ERR(skb);
1302
1303 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1304}
1305
1306static inline int
1307ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1308 const struct wmi_tdls_peer_update_cmd_arg *arg,
1309 const struct wmi_tdls_peer_capab_arg *cap,
1310 const struct wmi_channel_arg *chan)
1311{
1312 struct sk_buff *skb;
1313
1314 if (!ar->wmi.ops->gen_tdls_peer_update)
1315 return -EOPNOTSUPP;
1316
1317 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1318 if (IS_ERR(skb))
1319 return PTR_ERR(skb);
1320
1321 return ath10k_wmi_cmd_send(ar, skb,
1322 ar->wmi.cmd->tdls_peer_update_cmdid);
1323}
1324
1325static inline int
1326ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1327{
1328 struct sk_buff *skb;
1329
1330 if (!ar->wmi.ops->gen_adaptive_qcs)
1331 return -EOPNOTSUPP;
1332
1333 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1334 if (IS_ERR(skb))
1335 return PTR_ERR(skb);
1336
1337 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1338}
1339
1340static inline int
1341ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1342{
1343 struct sk_buff *skb;
1344
1345 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1346 return -EOPNOTSUPP;
1347
1348 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1349
1350 if (IS_ERR(skb))
1351 return PTR_ERR(skb);
1352
1353 return ath10k_wmi_cmd_send(ar, skb,
1354 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1355}
1356
1357static inline int
1358ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1359 char *buf)
1360{
1361 if (!ar->wmi.ops->fw_stats_fill)
1362 return -EOPNOTSUPP;
1363
1364 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1365 return 0;
1366}
1367
1368static inline int
1369ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1370 u32 detect_level, u32 detect_margin)
1371{
1372 struct sk_buff *skb;
1373
1374 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1375 return -EOPNOTSUPP;
1376
1377 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1378 detect_level,
1379 detect_margin);
1380
1381 if (IS_ERR(skb))
1382 return PTR_ERR(skb);
1383
1384 return ath10k_wmi_cmd_send(ar, skb,
1385 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1386}
1387
1388static inline int
1389ath10k_wmi_ext_resource_config(struct ath10k *ar,
1390 enum wmi_host_platform_type type,
1391 u32 fw_feature_bitmap)
1392{
1393 struct sk_buff *skb;
1394
1395 if (!ar->wmi.ops->ext_resource_config)
1396 return -EOPNOTSUPP;
1397
1398 skb = ar->wmi.ops->ext_resource_config(ar, type,
1399 fw_feature_bitmap);
1400
1401 if (IS_ERR(skb))
1402 return PTR_ERR(skb);
1403
1404 return ath10k_wmi_cmd_send(ar, skb,
1405 ar->wmi.cmd->ext_resource_cfg_cmdid);
1406}
1407
1408static inline int
1409ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1410{
1411 if (!ar->wmi.ops->get_vdev_subtype)
1412 return -EOPNOTSUPP;
1413
1414 return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1415}
1416
1417static inline int
1418ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1419 enum wmi_bss_survey_req_type type)
1420{
1421 struct ath10k_wmi *wmi = &ar->wmi;
1422 struct sk_buff *skb;
1423
1424 if (!wmi->ops->gen_pdev_bss_chan_info_req)
1425 return -EOPNOTSUPP;
1426
1427 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1428 if (IS_ERR(skb))
1429 return PTR_ERR(skb);
1430
1431 return ath10k_wmi_cmd_send(ar, skb,
1432 wmi->cmd->pdev_bss_chan_info_request_cmdid);
1433}
1434
1435static inline int
1436ath10k_wmi_echo(struct ath10k *ar, u32 value)
1437{
1438 struct ath10k_wmi *wmi = &ar->wmi;
1439 struct sk_buff *skb;
1440
1441 if (!wmi->ops->gen_echo)
1442 return -EOPNOTSUPP;
1443
1444 skb = wmi->ops->gen_echo(ar, value);
1445 if (IS_ERR(skb))
1446 return PTR_ERR(skb);
1447
1448 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1449}
1450
1451static inline int
1452ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1453{
1454 struct sk_buff *skb;
1455
1456 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1457 return -EOPNOTSUPP;
1458
1459 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1460
1461 if (IS_ERR(skb))
1462 return PTR_ERR(skb);
1463
1464 return ath10k_wmi_cmd_send(ar, skb,
1465 ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1466}
1467
1468#endif