Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2014 Intel Mobile Communications GmbH
4 * Copyright (C) 2017 Intel Deutschland GmbH
5 * Copyright (C) 2018-2020, 2022-2023 Intel Corporation
6 */
7#include <linux/etherdevice.h>
8#include "mvm.h"
9#include "time-event.h"
10#include "iwl-io.h"
11#include "iwl-prph.h"
12
13#define TU_TO_US(x) (x * 1024)
14#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
15
16void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
17{
18 struct ieee80211_sta *sta;
19 struct iwl_mvm_sta *mvmsta;
20 int i;
21
22 lockdep_assert_held(&mvm->mutex);
23
24 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
25 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
26 lockdep_is_held(&mvm->mutex));
27 if (!sta || IS_ERR(sta) || !sta->tdls)
28 continue;
29
30 mvmsta = iwl_mvm_sta_from_mac80211(sta);
31 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
32 NL80211_TDLS_TEARDOWN,
33 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
34 GFP_KERNEL);
35 }
36}
37
38int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
39{
40 struct ieee80211_sta *sta;
41 struct iwl_mvm_sta *mvmsta;
42 int count = 0;
43 int i;
44
45 lockdep_assert_held(&mvm->mutex);
46
47 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
48 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
49 lockdep_is_held(&mvm->mutex));
50 if (!sta || IS_ERR(sta) || !sta->tdls)
51 continue;
52
53 if (vif) {
54 mvmsta = iwl_mvm_sta_from_mac80211(sta);
55 if (mvmsta->vif != vif)
56 continue;
57 }
58
59 count++;
60 }
61
62 return count;
63}
64
65static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
66{
67 struct iwl_rx_packet *pkt;
68 struct iwl_tdls_config_res *resp;
69 struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
70 struct iwl_host_cmd cmd = {
71 .id = TDLS_CONFIG_CMD,
72 .flags = CMD_WANT_SKB,
73 .data = { &tdls_cfg_cmd, },
74 .len = { sizeof(struct iwl_tdls_config_cmd), },
75 };
76 struct ieee80211_sta *sta;
77 int ret, i, cnt;
78 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
79
80 lockdep_assert_held(&mvm->mutex);
81
82 tdls_cfg_cmd.id_and_color =
83 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
84 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
85 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
86
87 /* for now the Tx cmd is empty and unused */
88
89 /* populate TDLS peer data */
90 cnt = 0;
91 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
92 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
93 lockdep_is_held(&mvm->mutex));
94 if (IS_ERR_OR_NULL(sta) || !sta->tdls)
95 continue;
96
97 tdls_cfg_cmd.sta_info[cnt].sta_id = i;
98 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
99 IWL_MVM_TDLS_FW_TID;
100 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
101 tdls_cfg_cmd.sta_info[cnt].is_initiator =
102 cpu_to_le32(sta->tdls_initiator ? 1 : 0);
103
104 cnt++;
105 }
106
107 tdls_cfg_cmd.tdls_peer_count = cnt;
108 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
109
110 ret = iwl_mvm_send_cmd(mvm, &cmd);
111 if (WARN_ON_ONCE(ret))
112 return;
113
114 pkt = cmd.resp_pkt;
115
116 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
117
118 /* we don't really care about the response at this point */
119
120 iwl_free_resp(&cmd);
121}
122
123void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
124 bool sta_added)
125{
126 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
127
128 /* when the first peer joins, send a power update first */
129 if (tdls_sta_cnt == 1 && sta_added)
130 iwl_mvm_power_update_mac(mvm);
131
132 /* Configure the FW with TDLS peer info only if TDLS channel switch
133 * capability is set.
134 * TDLS config data is used currently only in TDLS channel switch code.
135 * Supposed to serve also TDLS buffer station which is not implemneted
136 * yet in FW*/
137 if (fw_has_capa(&mvm->fw->ucode_capa,
138 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
139 iwl_mvm_tdls_config(mvm, vif);
140
141 /* when the last peer leaves, send a power update last */
142 if (tdls_sta_cnt == 0 && !sta_added)
143 iwl_mvm_power_update_mac(mvm);
144}
145
146void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
147 struct ieee80211_vif *vif,
148 unsigned int link_id)
149{
150 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
151 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
152
153 /* Protect the session to hear the TDLS setup response on the channel */
154 mutex_lock(&mvm->mutex);
155 if (fw_has_capa(&mvm->fw->ucode_capa,
156 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
157 iwl_mvm_schedule_session_protection(mvm, vif, duration,
158 duration, true, link_id);
159 else
160 iwl_mvm_protect_session(mvm, vif, duration,
161 duration, 100, true);
162 mutex_unlock(&mvm->mutex);
163}
164
165static const char *
166iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
167{
168 switch (state) {
169 case IWL_MVM_TDLS_SW_IDLE:
170 return "IDLE";
171 case IWL_MVM_TDLS_SW_REQ_SENT:
172 return "REQ SENT";
173 case IWL_MVM_TDLS_SW_RESP_RCVD:
174 return "RESP RECEIVED";
175 case IWL_MVM_TDLS_SW_REQ_RCVD:
176 return "REQ RECEIVED";
177 case IWL_MVM_TDLS_SW_ACTIVE:
178 return "ACTIVE";
179 }
180
181 return NULL;
182}
183
184static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
185 enum iwl_mvm_tdls_cs_state state)
186{
187 if (mvm->tdls_cs.state == state)
188 return;
189
190 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
191 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
192 iwl_mvm_tdls_cs_state_str(state));
193 mvm->tdls_cs.state = state;
194
195 /* we only send requests to our switching peer - update sent time */
196 if (state == IWL_MVM_TDLS_SW_REQ_SENT)
197 mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
198
199 if (state == IWL_MVM_TDLS_SW_IDLE)
200 mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
201}
202
203void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
204{
205 struct iwl_rx_packet *pkt = rxb_addr(rxb);
206 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
207 struct ieee80211_sta *sta;
208 unsigned int delay;
209 struct iwl_mvm_sta *mvmsta;
210 struct ieee80211_vif *vif;
211 u32 sta_id = le32_to_cpu(notif->sta_id);
212
213 lockdep_assert_held(&mvm->mutex);
214
215 /* can fail sometimes */
216 if (!le32_to_cpu(notif->status)) {
217 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
218 return;
219 }
220
221 if (WARN_ON(sta_id >= mvm->fw->ucode_capa.num_stations))
222 return;
223
224 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
225 lockdep_is_held(&mvm->mutex));
226 /* the station may not be here, but if it is, it must be a TDLS peer */
227 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
228 return;
229
230 mvmsta = iwl_mvm_sta_from_mac80211(sta);
231 vif = mvmsta->vif;
232
233 /*
234 * Update state and possibly switch again after this is over (DTIM).
235 * Also convert TU to msec.
236 */
237 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
238 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
239 msecs_to_jiffies(delay));
240
241 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
242}
243
244static int
245iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
246 enum iwl_tdls_channel_switch_type type,
247 const u8 *peer, bool peer_initiator, u32 timestamp)
248{
249 bool same_peer = false;
250 int ret = 0;
251
252 /* get the existing peer if it's there */
253 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
254 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
255 struct ieee80211_sta *sta = rcu_dereference_protected(
256 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
257 lockdep_is_held(&mvm->mutex));
258 if (!IS_ERR_OR_NULL(sta))
259 same_peer = ether_addr_equal(peer, sta->addr);
260 }
261
262 switch (mvm->tdls_cs.state) {
263 case IWL_MVM_TDLS_SW_IDLE:
264 /*
265 * might be spurious packet from the peer after the switch is
266 * already done
267 */
268 if (type == TDLS_MOVE_CH)
269 ret = -EINVAL;
270 break;
271 case IWL_MVM_TDLS_SW_REQ_SENT:
272 /* only allow requests from the same peer */
273 if (!same_peer)
274 ret = -EBUSY;
275 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
276 !peer_initiator)
277 /*
278 * We received a ch-switch request while an outgoing
279 * one is pending. Allow it if the peer is the link
280 * initiator.
281 */
282 ret = -EBUSY;
283 else if (type == TDLS_SEND_CHAN_SW_REQ)
284 /* wait for idle before sending another request */
285 ret = -EBUSY;
286 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
287 /* we got a stale response - ignore it */
288 ret = -EINVAL;
289 break;
290 case IWL_MVM_TDLS_SW_RESP_RCVD:
291 /*
292 * we are waiting for the FW to give an "active" notification,
293 * so ignore requests in the meantime
294 */
295 ret = -EBUSY;
296 break;
297 case IWL_MVM_TDLS_SW_REQ_RCVD:
298 /* as above, allow the link initiator to proceed */
299 if (type == TDLS_SEND_CHAN_SW_REQ) {
300 if (!same_peer)
301 ret = -EBUSY;
302 else if (peer_initiator) /* they are the initiator */
303 ret = -EBUSY;
304 } else if (type == TDLS_MOVE_CH) {
305 ret = -EINVAL;
306 }
307 break;
308 case IWL_MVM_TDLS_SW_ACTIVE:
309 /*
310 * the only valid request when active is a request to return
311 * to the base channel by the current off-channel peer
312 */
313 if (type != TDLS_MOVE_CH || !same_peer)
314 ret = -EBUSY;
315 break;
316 }
317
318 if (ret)
319 IWL_DEBUG_TDLS(mvm,
320 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
321 type, mvm->tdls_cs.state, peer, same_peer,
322 peer_initiator);
323
324 return ret;
325}
326
327static int
328iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
329 struct ieee80211_vif *vif,
330 enum iwl_tdls_channel_switch_type type,
331 const u8 *peer, bool peer_initiator,
332 u8 oper_class,
333 struct cfg80211_chan_def *chandef,
334 u32 timestamp, u16 switch_time,
335 u16 switch_timeout, struct sk_buff *skb,
336 u32 ch_sw_tm_ie)
337{
338 struct ieee80211_sta *sta;
339 struct iwl_mvm_sta *mvmsta;
340 struct ieee80211_tx_info *info;
341 struct ieee80211_hdr *hdr;
342 struct iwl_tdls_channel_switch_cmd cmd = {0};
343 struct iwl_tdls_channel_switch_cmd_tail *tail =
344 iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
345 u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
346 int ret;
347
348 lockdep_assert_held(&mvm->mutex);
349
350 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
351 timestamp);
352 if (ret)
353 return ret;
354
355 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
356 ret = -EINVAL;
357 goto out;
358 }
359
360 cmd.switch_type = type;
361 tail->timing.frame_timestamp = cpu_to_le32(timestamp);
362 tail->timing.switch_time = cpu_to_le32(switch_time);
363 tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
364
365 rcu_read_lock();
366 sta = ieee80211_find_sta(vif, peer);
367 if (!sta) {
368 rcu_read_unlock();
369 ret = -ENOENT;
370 goto out;
371 }
372 mvmsta = iwl_mvm_sta_from_mac80211(sta);
373 cmd.peer_sta_id = cpu_to_le32(mvmsta->deflink.sta_id);
374
375 if (!chandef) {
376 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
377 mvm->tdls_cs.peer.chandef.chan) {
378 /* actually moving to the channel */
379 chandef = &mvm->tdls_cs.peer.chandef;
380 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
381 type == TDLS_MOVE_CH) {
382 /* we need to return to base channel */
383 struct ieee80211_chanctx_conf *chanctx =
384 rcu_dereference(vif->bss_conf.chanctx_conf);
385
386 if (WARN_ON_ONCE(!chanctx)) {
387 rcu_read_unlock();
388 goto out;
389 }
390
391 chandef = &chanctx->def;
392 }
393 }
394
395 if (chandef)
396 iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
397
398 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
399 tail->timing.max_offchan_duration =
400 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
401 vif->bss_conf.beacon_int) / 2);
402
403 /* Switch time is the first element in the switch-timing IE. */
404 tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
405
406 info = IEEE80211_SKB_CB(skb);
407 hdr = (void *)skb->data;
408 if (info->control.hw_key) {
409 if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
410 rcu_read_unlock();
411 ret = -EINVAL;
412 goto out;
413 }
414 iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
415 }
416
417 iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
418 mvmsta->deflink.sta_id);
419
420 iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
421 hdr->frame_control);
422 rcu_read_unlock();
423
424 memcpy(tail->frame.data, skb->data, skb->len);
425
426 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
427 if (ret) {
428 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
429 ret);
430 goto out;
431 }
432
433 /* channel switch has started, update state */
434 if (type != TDLS_MOVE_CH) {
435 mvm->tdls_cs.cur_sta_id = mvmsta->deflink.sta_id;
436 iwl_mvm_tdls_update_cs_state(mvm,
437 type == TDLS_SEND_CHAN_SW_REQ ?
438 IWL_MVM_TDLS_SW_REQ_SENT :
439 IWL_MVM_TDLS_SW_REQ_RCVD);
440 } else {
441 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
442 }
443
444out:
445
446 /* channel switch failed - we are idle */
447 if (ret)
448 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
449
450 return ret;
451}
452
453void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
454{
455 struct iwl_mvm *mvm;
456 struct ieee80211_sta *sta;
457 struct iwl_mvm_sta *mvmsta;
458 struct ieee80211_vif *vif;
459 unsigned int delay;
460 int ret;
461
462 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
463 mutex_lock(&mvm->mutex);
464
465 /* called after an active channel switch has finished or timed-out */
466 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
467
468 /* station might be gone, in that case do nothing */
469 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
470 goto out;
471
472 sta = rcu_dereference_protected(
473 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
474 lockdep_is_held(&mvm->mutex));
475 /* the station may not be here, but if it is, it must be a TDLS peer */
476 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
477 goto out;
478
479 mvmsta = iwl_mvm_sta_from_mac80211(sta);
480 vif = mvmsta->vif;
481 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
482 TDLS_SEND_CHAN_SW_REQ,
483 sta->addr,
484 mvm->tdls_cs.peer.initiator,
485 mvm->tdls_cs.peer.op_class,
486 &mvm->tdls_cs.peer.chandef,
487 0, 0, 0,
488 mvm->tdls_cs.peer.skb,
489 mvm->tdls_cs.peer.ch_sw_tm_ie);
490 if (ret)
491 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
492
493 /* retry after a DTIM if we failed sending now */
494 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
495 schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
496out:
497 mutex_unlock(&mvm->mutex);
498}
499
500int
501iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
502 struct ieee80211_vif *vif,
503 struct ieee80211_sta *sta, u8 oper_class,
504 struct cfg80211_chan_def *chandef,
505 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
506{
507 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
508 struct iwl_mvm_sta *mvmsta;
509 unsigned int delay;
510 int ret;
511
512 mutex_lock(&mvm->mutex);
513
514 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
515 sta->addr, chandef->chan->center_freq, chandef->width);
516
517 /* we only support a single peer for channel switching */
518 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
519 IWL_DEBUG_TDLS(mvm,
520 "Existing peer. Can't start switch with %pM\n",
521 sta->addr);
522 ret = -EBUSY;
523 goto out;
524 }
525
526 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
527 TDLS_SEND_CHAN_SW_REQ,
528 sta->addr, sta->tdls_initiator,
529 oper_class, chandef, 0, 0, 0,
530 tmpl_skb, ch_sw_tm_ie);
531 if (ret)
532 goto out;
533
534 /*
535 * Mark the peer as "in tdls switch" for this vif. We only allow a
536 * single such peer per vif.
537 */
538 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
539 if (!mvm->tdls_cs.peer.skb) {
540 ret = -ENOMEM;
541 goto out;
542 }
543
544 mvmsta = iwl_mvm_sta_from_mac80211(sta);
545 mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id;
546 mvm->tdls_cs.peer.chandef = *chandef;
547 mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
548 mvm->tdls_cs.peer.op_class = oper_class;
549 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
550
551 /*
552 * Wait for 2 DTIM periods before attempting the next switch. The next
553 * switch will be made sooner if the current one completes before that.
554 */
555 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
556 vif->bss_conf.beacon_int);
557 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
558 msecs_to_jiffies(delay));
559
560out:
561 mutex_unlock(&mvm->mutex);
562 return ret;
563}
564
565void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
566 struct ieee80211_vif *vif,
567 struct ieee80211_sta *sta)
568{
569 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
570 struct ieee80211_sta *cur_sta;
571 bool wait_for_phy = false;
572
573 mutex_lock(&mvm->mutex);
574
575 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
576
577 /* we only support a single peer for channel switching */
578 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
579 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
580 goto out;
581 }
582
583 cur_sta = rcu_dereference_protected(
584 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
585 lockdep_is_held(&mvm->mutex));
586 /* make sure it's the same peer */
587 if (cur_sta != sta)
588 goto out;
589
590 /*
591 * If we're currently in a switch because of the now canceled peer,
592 * wait a DTIM here to make sure the phy is back on the base channel.
593 * We can't otherwise force it.
594 */
595 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
596 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
597 wait_for_phy = true;
598
599 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
600 dev_kfree_skb(mvm->tdls_cs.peer.skb);
601 mvm->tdls_cs.peer.skb = NULL;
602
603out:
604 mutex_unlock(&mvm->mutex);
605
606 /* make sure the phy is on the base channel */
607 if (wait_for_phy)
608 msleep(TU_TO_MS(vif->bss_conf.dtim_period *
609 vif->bss_conf.beacon_int));
610
611 /* flush the channel switch state */
612 flush_delayed_work(&mvm->tdls_cs.dwork);
613
614 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
615}
616
617void
618iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
619 struct ieee80211_vif *vif,
620 struct ieee80211_tdls_ch_sw_params *params)
621{
622 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
623 enum iwl_tdls_channel_switch_type type;
624 unsigned int delay;
625 const char *action_str =
626 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
627 "REQ" : "RESP";
628
629 mutex_lock(&mvm->mutex);
630
631 IWL_DEBUG_TDLS(mvm,
632 "Received TDLS ch switch action %s from %pM status %d\n",
633 action_str, params->sta->addr, params->status);
634
635 /*
636 * we got a non-zero status from a peer we were switching to - move to
637 * the idle state and retry again later
638 */
639 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
640 params->status != 0 &&
641 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
642 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
643 struct ieee80211_sta *cur_sta;
644
645 /* make sure it's the same peer */
646 cur_sta = rcu_dereference_protected(
647 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
648 lockdep_is_held(&mvm->mutex));
649 if (cur_sta == params->sta) {
650 iwl_mvm_tdls_update_cs_state(mvm,
651 IWL_MVM_TDLS_SW_IDLE);
652 goto retry;
653 }
654 }
655
656 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
657 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
658
659 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
660 params->sta->tdls_initiator, 0,
661 params->chandef, params->timestamp,
662 params->switch_time,
663 params->switch_timeout,
664 params->tmpl_skb,
665 params->ch_sw_tm_ie);
666
667retry:
668 /* register a timeout in case we don't succeed in switching */
669 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
670 1024 / 1000;
671 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
672 msecs_to_jiffies(delay));
673 mutex_unlock(&mvm->mutex);
674}
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Mobile Communications GmbH
9 * Copyright(c) 2017 Intel Deutschland GmbH
10 * Copyright(C) 2018 - 2019 Intel Corporation
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <linuxwifi@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 * BSD LICENSE
29 *
30 * Copyright(c) 2014 Intel Mobile Communications GmbH
31 * Copyright(c) 2017 Intel Deutschland GmbH
32 * Copyright(C) 2018 - 2019 Intel Corporation
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
44 * distribution.
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *
61 *****************************************************************************/
62
63#include <linux/etherdevice.h>
64#include "mvm.h"
65#include "time-event.h"
66#include "iwl-io.h"
67#include "iwl-prph.h"
68
69#define TU_TO_US(x) (x * 1024)
70#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
71
72void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
73{
74 struct ieee80211_sta *sta;
75 struct iwl_mvm_sta *mvmsta;
76 int i;
77
78 lockdep_assert_held(&mvm->mutex);
79
80 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
81 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
82 lockdep_is_held(&mvm->mutex));
83 if (!sta || IS_ERR(sta) || !sta->tdls)
84 continue;
85
86 mvmsta = iwl_mvm_sta_from_mac80211(sta);
87 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
88 NL80211_TDLS_TEARDOWN,
89 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
90 GFP_KERNEL);
91 }
92}
93
94int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
95{
96 struct ieee80211_sta *sta;
97 struct iwl_mvm_sta *mvmsta;
98 int count = 0;
99 int i;
100
101 lockdep_assert_held(&mvm->mutex);
102
103 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
104 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
105 lockdep_is_held(&mvm->mutex));
106 if (!sta || IS_ERR(sta) || !sta->tdls)
107 continue;
108
109 if (vif) {
110 mvmsta = iwl_mvm_sta_from_mac80211(sta);
111 if (mvmsta->vif != vif)
112 continue;
113 }
114
115 count++;
116 }
117
118 return count;
119}
120
121static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
122{
123 struct iwl_rx_packet *pkt;
124 struct iwl_tdls_config_res *resp;
125 struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
126 struct iwl_host_cmd cmd = {
127 .id = TDLS_CONFIG_CMD,
128 .flags = CMD_WANT_SKB,
129 .data = { &tdls_cfg_cmd, },
130 .len = { sizeof(struct iwl_tdls_config_cmd), },
131 };
132 struct ieee80211_sta *sta;
133 int ret, i, cnt;
134 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
135
136 lockdep_assert_held(&mvm->mutex);
137
138 tdls_cfg_cmd.id_and_color =
139 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
140 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
141 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
142
143 /* for now the Tx cmd is empty and unused */
144
145 /* populate TDLS peer data */
146 cnt = 0;
147 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
148 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
149 lockdep_is_held(&mvm->mutex));
150 if (IS_ERR_OR_NULL(sta) || !sta->tdls)
151 continue;
152
153 tdls_cfg_cmd.sta_info[cnt].sta_id = i;
154 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
155 IWL_MVM_TDLS_FW_TID;
156 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
157 tdls_cfg_cmd.sta_info[cnt].is_initiator =
158 cpu_to_le32(sta->tdls_initiator ? 1 : 0);
159
160 cnt++;
161 }
162
163 tdls_cfg_cmd.tdls_peer_count = cnt;
164 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
165
166 ret = iwl_mvm_send_cmd(mvm, &cmd);
167 if (WARN_ON_ONCE(ret))
168 return;
169
170 pkt = cmd.resp_pkt;
171
172 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
173
174 /* we don't really care about the response at this point */
175
176 iwl_free_resp(&cmd);
177}
178
179void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
180 bool sta_added)
181{
182 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
183
184 /* when the first peer joins, send a power update first */
185 if (tdls_sta_cnt == 1 && sta_added)
186 iwl_mvm_power_update_mac(mvm);
187
188 /* Configure the FW with TDLS peer info only if TDLS channel switch
189 * capability is set.
190 * TDLS config data is used currently only in TDLS channel switch code.
191 * Supposed to serve also TDLS buffer station which is not implemneted
192 * yet in FW*/
193 if (fw_has_capa(&mvm->fw->ucode_capa,
194 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
195 iwl_mvm_tdls_config(mvm, vif);
196
197 /* when the last peer leaves, send a power update last */
198 if (tdls_sta_cnt == 0 && !sta_added)
199 iwl_mvm_power_update_mac(mvm);
200}
201
202void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
203 struct ieee80211_vif *vif)
204{
205 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
206 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
207
208 /* Protect the session to hear the TDLS setup response on the channel */
209 mutex_lock(&mvm->mutex);
210 if (fw_has_capa(&mvm->fw->ucode_capa,
211 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
212 iwl_mvm_schedule_session_protection(mvm, vif, duration,
213 duration, true);
214 else
215 iwl_mvm_protect_session(mvm, vif, duration,
216 duration, 100, true);
217 mutex_unlock(&mvm->mutex);
218}
219
220static const char *
221iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
222{
223 switch (state) {
224 case IWL_MVM_TDLS_SW_IDLE:
225 return "IDLE";
226 case IWL_MVM_TDLS_SW_REQ_SENT:
227 return "REQ SENT";
228 case IWL_MVM_TDLS_SW_RESP_RCVD:
229 return "RESP RECEIVED";
230 case IWL_MVM_TDLS_SW_REQ_RCVD:
231 return "REQ RECEIVED";
232 case IWL_MVM_TDLS_SW_ACTIVE:
233 return "ACTIVE";
234 }
235
236 return NULL;
237}
238
239static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
240 enum iwl_mvm_tdls_cs_state state)
241{
242 if (mvm->tdls_cs.state == state)
243 return;
244
245 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
246 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
247 iwl_mvm_tdls_cs_state_str(state));
248 mvm->tdls_cs.state = state;
249
250 /* we only send requests to our switching peer - update sent time */
251 if (state == IWL_MVM_TDLS_SW_REQ_SENT)
252 mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
253
254 if (state == IWL_MVM_TDLS_SW_IDLE)
255 mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
256}
257
258void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
259{
260 struct iwl_rx_packet *pkt = rxb_addr(rxb);
261 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
262 struct ieee80211_sta *sta;
263 unsigned int delay;
264 struct iwl_mvm_sta *mvmsta;
265 struct ieee80211_vif *vif;
266 u32 sta_id = le32_to_cpu(notif->sta_id);
267
268 lockdep_assert_held(&mvm->mutex);
269
270 /* can fail sometimes */
271 if (!le32_to_cpu(notif->status)) {
272 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
273 return;
274 }
275
276 if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
277 return;
278
279 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
280 lockdep_is_held(&mvm->mutex));
281 /* the station may not be here, but if it is, it must be a TDLS peer */
282 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
283 return;
284
285 mvmsta = iwl_mvm_sta_from_mac80211(sta);
286 vif = mvmsta->vif;
287
288 /*
289 * Update state and possibly switch again after this is over (DTIM).
290 * Also convert TU to msec.
291 */
292 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
293 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
294 msecs_to_jiffies(delay));
295
296 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
297}
298
299static int
300iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
301 enum iwl_tdls_channel_switch_type type,
302 const u8 *peer, bool peer_initiator, u32 timestamp)
303{
304 bool same_peer = false;
305 int ret = 0;
306
307 /* get the existing peer if it's there */
308 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
309 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
310 struct ieee80211_sta *sta = rcu_dereference_protected(
311 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
312 lockdep_is_held(&mvm->mutex));
313 if (!IS_ERR_OR_NULL(sta))
314 same_peer = ether_addr_equal(peer, sta->addr);
315 }
316
317 switch (mvm->tdls_cs.state) {
318 case IWL_MVM_TDLS_SW_IDLE:
319 /*
320 * might be spurious packet from the peer after the switch is
321 * already done
322 */
323 if (type == TDLS_MOVE_CH)
324 ret = -EINVAL;
325 break;
326 case IWL_MVM_TDLS_SW_REQ_SENT:
327 /* only allow requests from the same peer */
328 if (!same_peer)
329 ret = -EBUSY;
330 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
331 !peer_initiator)
332 /*
333 * We received a ch-switch request while an outgoing
334 * one is pending. Allow it if the peer is the link
335 * initiator.
336 */
337 ret = -EBUSY;
338 else if (type == TDLS_SEND_CHAN_SW_REQ)
339 /* wait for idle before sending another request */
340 ret = -EBUSY;
341 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
342 /* we got a stale response - ignore it */
343 ret = -EINVAL;
344 break;
345 case IWL_MVM_TDLS_SW_RESP_RCVD:
346 /*
347 * we are waiting for the FW to give an "active" notification,
348 * so ignore requests in the meantime
349 */
350 ret = -EBUSY;
351 break;
352 case IWL_MVM_TDLS_SW_REQ_RCVD:
353 /* as above, allow the link initiator to proceed */
354 if (type == TDLS_SEND_CHAN_SW_REQ) {
355 if (!same_peer)
356 ret = -EBUSY;
357 else if (peer_initiator) /* they are the initiator */
358 ret = -EBUSY;
359 } else if (type == TDLS_MOVE_CH) {
360 ret = -EINVAL;
361 }
362 break;
363 case IWL_MVM_TDLS_SW_ACTIVE:
364 /*
365 * the only valid request when active is a request to return
366 * to the base channel by the current off-channel peer
367 */
368 if (type != TDLS_MOVE_CH || !same_peer)
369 ret = -EBUSY;
370 break;
371 }
372
373 if (ret)
374 IWL_DEBUG_TDLS(mvm,
375 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
376 type, mvm->tdls_cs.state, peer, same_peer,
377 peer_initiator);
378
379 return ret;
380}
381
382static int
383iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
384 struct ieee80211_vif *vif,
385 enum iwl_tdls_channel_switch_type type,
386 const u8 *peer, bool peer_initiator,
387 u8 oper_class,
388 struct cfg80211_chan_def *chandef,
389 u32 timestamp, u16 switch_time,
390 u16 switch_timeout, struct sk_buff *skb,
391 u32 ch_sw_tm_ie)
392{
393 struct ieee80211_sta *sta;
394 struct iwl_mvm_sta *mvmsta;
395 struct ieee80211_tx_info *info;
396 struct ieee80211_hdr *hdr;
397 struct iwl_tdls_channel_switch_cmd cmd = {0};
398 struct iwl_tdls_channel_switch_cmd_tail *tail =
399 iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
400 u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
401 int ret;
402
403 lockdep_assert_held(&mvm->mutex);
404
405 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
406 timestamp);
407 if (ret)
408 return ret;
409
410 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
411 ret = -EINVAL;
412 goto out;
413 }
414
415 cmd.switch_type = type;
416 tail->timing.frame_timestamp = cpu_to_le32(timestamp);
417 tail->timing.switch_time = cpu_to_le32(switch_time);
418 tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
419
420 rcu_read_lock();
421 sta = ieee80211_find_sta(vif, peer);
422 if (!sta) {
423 rcu_read_unlock();
424 ret = -ENOENT;
425 goto out;
426 }
427 mvmsta = iwl_mvm_sta_from_mac80211(sta);
428 cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
429
430 if (!chandef) {
431 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
432 mvm->tdls_cs.peer.chandef.chan) {
433 /* actually moving to the channel */
434 chandef = &mvm->tdls_cs.peer.chandef;
435 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
436 type == TDLS_MOVE_CH) {
437 /* we need to return to base channel */
438 struct ieee80211_chanctx_conf *chanctx =
439 rcu_dereference(vif->chanctx_conf);
440
441 if (WARN_ON_ONCE(!chanctx)) {
442 rcu_read_unlock();
443 goto out;
444 }
445
446 chandef = &chanctx->def;
447 }
448 }
449
450 if (chandef)
451 iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
452
453 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
454 tail->timing.max_offchan_duration =
455 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
456 vif->bss_conf.beacon_int) / 2);
457
458 /* Switch time is the first element in the switch-timing IE. */
459 tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
460
461 info = IEEE80211_SKB_CB(skb);
462 hdr = (void *)skb->data;
463 if (info->control.hw_key) {
464 if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
465 rcu_read_unlock();
466 ret = -EINVAL;
467 goto out;
468 }
469 iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
470 }
471
472 iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
473 mvmsta->sta_id);
474
475 iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
476 hdr->frame_control);
477 rcu_read_unlock();
478
479 memcpy(tail->frame.data, skb->data, skb->len);
480
481 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
482 if (ret) {
483 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
484 ret);
485 goto out;
486 }
487
488 /* channel switch has started, update state */
489 if (type != TDLS_MOVE_CH) {
490 mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
491 iwl_mvm_tdls_update_cs_state(mvm,
492 type == TDLS_SEND_CHAN_SW_REQ ?
493 IWL_MVM_TDLS_SW_REQ_SENT :
494 IWL_MVM_TDLS_SW_REQ_RCVD);
495 } else {
496 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
497 }
498
499out:
500
501 /* channel switch failed - we are idle */
502 if (ret)
503 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
504
505 return ret;
506}
507
508void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
509{
510 struct iwl_mvm *mvm;
511 struct ieee80211_sta *sta;
512 struct iwl_mvm_sta *mvmsta;
513 struct ieee80211_vif *vif;
514 unsigned int delay;
515 int ret;
516
517 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
518 mutex_lock(&mvm->mutex);
519
520 /* called after an active channel switch has finished or timed-out */
521 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
522
523 /* station might be gone, in that case do nothing */
524 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
525 goto out;
526
527 sta = rcu_dereference_protected(
528 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
529 lockdep_is_held(&mvm->mutex));
530 /* the station may not be here, but if it is, it must be a TDLS peer */
531 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
532 goto out;
533
534 mvmsta = iwl_mvm_sta_from_mac80211(sta);
535 vif = mvmsta->vif;
536 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
537 TDLS_SEND_CHAN_SW_REQ,
538 sta->addr,
539 mvm->tdls_cs.peer.initiator,
540 mvm->tdls_cs.peer.op_class,
541 &mvm->tdls_cs.peer.chandef,
542 0, 0, 0,
543 mvm->tdls_cs.peer.skb,
544 mvm->tdls_cs.peer.ch_sw_tm_ie);
545 if (ret)
546 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
547
548 /* retry after a DTIM if we failed sending now */
549 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
550 schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
551out:
552 mutex_unlock(&mvm->mutex);
553}
554
555int
556iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
557 struct ieee80211_vif *vif,
558 struct ieee80211_sta *sta, u8 oper_class,
559 struct cfg80211_chan_def *chandef,
560 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
561{
562 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
563 struct iwl_mvm_sta *mvmsta;
564 unsigned int delay;
565 int ret;
566
567 mutex_lock(&mvm->mutex);
568
569 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
570 sta->addr, chandef->chan->center_freq, chandef->width);
571
572 /* we only support a single peer for channel switching */
573 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
574 IWL_DEBUG_TDLS(mvm,
575 "Existing peer. Can't start switch with %pM\n",
576 sta->addr);
577 ret = -EBUSY;
578 goto out;
579 }
580
581 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
582 TDLS_SEND_CHAN_SW_REQ,
583 sta->addr, sta->tdls_initiator,
584 oper_class, chandef, 0, 0, 0,
585 tmpl_skb, ch_sw_tm_ie);
586 if (ret)
587 goto out;
588
589 /*
590 * Mark the peer as "in tdls switch" for this vif. We only allow a
591 * single such peer per vif.
592 */
593 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
594 if (!mvm->tdls_cs.peer.skb) {
595 ret = -ENOMEM;
596 goto out;
597 }
598
599 mvmsta = iwl_mvm_sta_from_mac80211(sta);
600 mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
601 mvm->tdls_cs.peer.chandef = *chandef;
602 mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
603 mvm->tdls_cs.peer.op_class = oper_class;
604 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
605
606 /*
607 * Wait for 2 DTIM periods before attempting the next switch. The next
608 * switch will be made sooner if the current one completes before that.
609 */
610 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
611 vif->bss_conf.beacon_int);
612 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
613 msecs_to_jiffies(delay));
614
615out:
616 mutex_unlock(&mvm->mutex);
617 return ret;
618}
619
620void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
621 struct ieee80211_vif *vif,
622 struct ieee80211_sta *sta)
623{
624 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
625 struct ieee80211_sta *cur_sta;
626 bool wait_for_phy = false;
627
628 mutex_lock(&mvm->mutex);
629
630 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
631
632 /* we only support a single peer for channel switching */
633 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
634 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
635 goto out;
636 }
637
638 cur_sta = rcu_dereference_protected(
639 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
640 lockdep_is_held(&mvm->mutex));
641 /* make sure it's the same peer */
642 if (cur_sta != sta)
643 goto out;
644
645 /*
646 * If we're currently in a switch because of the now canceled peer,
647 * wait a DTIM here to make sure the phy is back on the base channel.
648 * We can't otherwise force it.
649 */
650 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
651 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
652 wait_for_phy = true;
653
654 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
655 dev_kfree_skb(mvm->tdls_cs.peer.skb);
656 mvm->tdls_cs.peer.skb = NULL;
657
658out:
659 mutex_unlock(&mvm->mutex);
660
661 /* make sure the phy is on the base channel */
662 if (wait_for_phy)
663 msleep(TU_TO_MS(vif->bss_conf.dtim_period *
664 vif->bss_conf.beacon_int));
665
666 /* flush the channel switch state */
667 flush_delayed_work(&mvm->tdls_cs.dwork);
668
669 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
670}
671
672void
673iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
674 struct ieee80211_vif *vif,
675 struct ieee80211_tdls_ch_sw_params *params)
676{
677 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
678 enum iwl_tdls_channel_switch_type type;
679 unsigned int delay;
680 const char *action_str =
681 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
682 "REQ" : "RESP";
683
684 mutex_lock(&mvm->mutex);
685
686 IWL_DEBUG_TDLS(mvm,
687 "Received TDLS ch switch action %s from %pM status %d\n",
688 action_str, params->sta->addr, params->status);
689
690 /*
691 * we got a non-zero status from a peer we were switching to - move to
692 * the idle state and retry again later
693 */
694 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
695 params->status != 0 &&
696 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
697 mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
698 struct ieee80211_sta *cur_sta;
699
700 /* make sure it's the same peer */
701 cur_sta = rcu_dereference_protected(
702 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
703 lockdep_is_held(&mvm->mutex));
704 if (cur_sta == params->sta) {
705 iwl_mvm_tdls_update_cs_state(mvm,
706 IWL_MVM_TDLS_SW_IDLE);
707 goto retry;
708 }
709 }
710
711 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
712 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
713
714 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
715 params->sta->tdls_initiator, 0,
716 params->chandef, params->timestamp,
717 params->switch_time,
718 params->switch_timeout,
719 params->tmpl_skb,
720 params->ch_sw_tm_ie);
721
722retry:
723 /* register a timeout in case we don't succeed in switching */
724 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
725 1024 / 1000;
726 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
727 msecs_to_jiffies(delay));
728 mutex_unlock(&mvm->mutex);
729}