Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/*
  3 * Copyright (C) 2014 Intel Mobile Communications GmbH
  4 * Copyright (C) 2017 Intel Deutschland GmbH
  5 * Copyright (C) 2018-2020, 2022-2023 Intel Corporation
  6 */
  7#include <linux/etherdevice.h>
  8#include "mvm.h"
  9#include "time-event.h"
 10#include "iwl-io.h"
 11#include "iwl-prph.h"
 12
 13#define TU_TO_US(x) (x * 1024)
 14#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
 15
 16void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
 17{
 18	struct ieee80211_sta *sta;
 19	struct iwl_mvm_sta *mvmsta;
 20	int i;
 21
 22	lockdep_assert_held(&mvm->mutex);
 23
 24	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
 25		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
 26						lockdep_is_held(&mvm->mutex));
 27		if (!sta || IS_ERR(sta) || !sta->tdls)
 28			continue;
 29
 30		mvmsta = iwl_mvm_sta_from_mac80211(sta);
 31		ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
 32				NL80211_TDLS_TEARDOWN,
 33				WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
 34				GFP_KERNEL);
 35	}
 36}
 37
 38int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 39{
 40	struct ieee80211_sta *sta;
 41	struct iwl_mvm_sta *mvmsta;
 42	int count = 0;
 43	int i;
 44
 45	lockdep_assert_held(&mvm->mutex);
 46
 47	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
 48		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
 49						lockdep_is_held(&mvm->mutex));
 50		if (!sta || IS_ERR(sta) || !sta->tdls)
 51			continue;
 52
 53		if (vif) {
 54			mvmsta = iwl_mvm_sta_from_mac80211(sta);
 55			if (mvmsta->vif != vif)
 56				continue;
 57		}
 58
 59		count++;
 60	}
 61
 62	return count;
 63}
 64
 65static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 66{
 67	struct iwl_rx_packet *pkt;
 68	struct iwl_tdls_config_res *resp;
 69	struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
 70	struct iwl_host_cmd cmd = {
 71		.id = TDLS_CONFIG_CMD,
 72		.flags = CMD_WANT_SKB,
 73		.data = { &tdls_cfg_cmd, },
 74		.len = { sizeof(struct iwl_tdls_config_cmd), },
 75	};
 76	struct ieee80211_sta *sta;
 77	int ret, i, cnt;
 78	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 79
 80	lockdep_assert_held(&mvm->mutex);
 81
 82	tdls_cfg_cmd.id_and_color =
 83		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 84	tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
 85	tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
 86
 87	/* for now the Tx cmd is empty and unused */
 88
 89	/* populate TDLS peer data */
 90	cnt = 0;
 91	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
 92		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
 93						lockdep_is_held(&mvm->mutex));
 94		if (IS_ERR_OR_NULL(sta) || !sta->tdls)
 95			continue;
 96
 97		tdls_cfg_cmd.sta_info[cnt].sta_id = i;
 98		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
 99							IWL_MVM_TDLS_FW_TID;
100		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
101		tdls_cfg_cmd.sta_info[cnt].is_initiator =
102				cpu_to_le32(sta->tdls_initiator ? 1 : 0);
103
104		cnt++;
105	}
106
107	tdls_cfg_cmd.tdls_peer_count = cnt;
108	IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
109
110	ret = iwl_mvm_send_cmd(mvm, &cmd);
111	if (WARN_ON_ONCE(ret))
112		return;
113
114	pkt = cmd.resp_pkt;
115
116	WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
117
118	/* we don't really care about the response at this point */
119
120	iwl_free_resp(&cmd);
121}
122
123void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
124			       bool sta_added)
125{
126	int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
127
128	/* when the first peer joins, send a power update first */
129	if (tdls_sta_cnt == 1 && sta_added)
130		iwl_mvm_power_update_mac(mvm);
131
132	/* Configure the FW with TDLS peer info only if TDLS channel switch
133	 * capability is set.
134	 * TDLS config data is used currently only in TDLS channel switch code.
135	 * Supposed to serve also TDLS buffer station which is not implemneted
136	 * yet in FW*/
137	if (fw_has_capa(&mvm->fw->ucode_capa,
138			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
139		iwl_mvm_tdls_config(mvm, vif);
140
141	/* when the last peer leaves, send a power update last */
142	if (tdls_sta_cnt == 0 && !sta_added)
143		iwl_mvm_power_update_mac(mvm);
144}
145
146void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
147					   struct ieee80211_vif *vif,
148					   unsigned int link_id)
149{
150	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
151	u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
152
153	/* Protect the session to hear the TDLS setup response on the channel */
154	mutex_lock(&mvm->mutex);
155	if (fw_has_capa(&mvm->fw->ucode_capa,
156			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
157		iwl_mvm_schedule_session_protection(mvm, vif, duration,
158						    duration, true, link_id);
159	else
160		iwl_mvm_protect_session(mvm, vif, duration,
161					duration, 100, true);
162	mutex_unlock(&mvm->mutex);
163}
164
165static const char *
166iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
167{
168	switch (state) {
169	case IWL_MVM_TDLS_SW_IDLE:
170		return "IDLE";
171	case IWL_MVM_TDLS_SW_REQ_SENT:
172		return "REQ SENT";
173	case IWL_MVM_TDLS_SW_RESP_RCVD:
174		return "RESP RECEIVED";
175	case IWL_MVM_TDLS_SW_REQ_RCVD:
176		return "REQ RECEIVED";
177	case IWL_MVM_TDLS_SW_ACTIVE:
178		return "ACTIVE";
179	}
180
181	return NULL;
182}
183
184static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
185					 enum iwl_mvm_tdls_cs_state state)
186{
187	if (mvm->tdls_cs.state == state)
188		return;
189
190	IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
191		       iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
192		       iwl_mvm_tdls_cs_state_str(state));
193	mvm->tdls_cs.state = state;
194
195	/* we only send requests to our switching peer - update sent time */
196	if (state == IWL_MVM_TDLS_SW_REQ_SENT)
197		mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
198
199	if (state == IWL_MVM_TDLS_SW_IDLE)
200		mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
201}
202
203void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
204{
205	struct iwl_rx_packet *pkt = rxb_addr(rxb);
206	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
207	struct ieee80211_sta *sta;
208	unsigned int delay;
209	struct iwl_mvm_sta *mvmsta;
210	struct ieee80211_vif *vif;
211	u32 sta_id = le32_to_cpu(notif->sta_id);
212
213	lockdep_assert_held(&mvm->mutex);
214
215	/* can fail sometimes */
216	if (!le32_to_cpu(notif->status)) {
217		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
218		return;
219	}
220
221	if (WARN_ON(sta_id >= mvm->fw->ucode_capa.num_stations))
222		return;
223
224	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
225					lockdep_is_held(&mvm->mutex));
226	/* the station may not be here, but if it is, it must be a TDLS peer */
227	if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
228		return;
229
230	mvmsta = iwl_mvm_sta_from_mac80211(sta);
231	vif = mvmsta->vif;
232
233	/*
234	 * Update state and possibly switch again after this is over (DTIM).
235	 * Also convert TU to msec.
236	 */
237	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
238	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
239			 msecs_to_jiffies(delay));
240
241	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
242}
243
244static int
245iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
246			  enum iwl_tdls_channel_switch_type type,
247			  const u8 *peer, bool peer_initiator, u32 timestamp)
248{
249	bool same_peer = false;
250	int ret = 0;
251
252	/* get the existing peer if it's there */
253	if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
254	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
255		struct ieee80211_sta *sta = rcu_dereference_protected(
256				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
257				lockdep_is_held(&mvm->mutex));
258		if (!IS_ERR_OR_NULL(sta))
259			same_peer = ether_addr_equal(peer, sta->addr);
260	}
261
262	switch (mvm->tdls_cs.state) {
263	case IWL_MVM_TDLS_SW_IDLE:
264		/*
265		 * might be spurious packet from the peer after the switch is
266		 * already done
267		 */
268		if (type == TDLS_MOVE_CH)
269			ret = -EINVAL;
270		break;
271	case IWL_MVM_TDLS_SW_REQ_SENT:
272		/* only allow requests from the same peer */
273		if (!same_peer)
274			ret = -EBUSY;
275		else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
276			 !peer_initiator)
277			/*
278			 * We received a ch-switch request while an outgoing
279			 * one is pending. Allow it if the peer is the link
280			 * initiator.
281			 */
282			ret = -EBUSY;
283		else if (type == TDLS_SEND_CHAN_SW_REQ)
284			/* wait for idle before sending another request */
285			ret = -EBUSY;
286		else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
287			/* we got a stale response - ignore it */
288			ret = -EINVAL;
289		break;
290	case IWL_MVM_TDLS_SW_RESP_RCVD:
291		/*
292		 * we are waiting for the FW to give an "active" notification,
293		 * so ignore requests in the meantime
294		 */
295		ret = -EBUSY;
296		break;
297	case IWL_MVM_TDLS_SW_REQ_RCVD:
298		/* as above, allow the link initiator to proceed */
299		if (type == TDLS_SEND_CHAN_SW_REQ) {
300			if (!same_peer)
301				ret = -EBUSY;
302			else if (peer_initiator) /* they are the initiator */
303				ret = -EBUSY;
304		} else if (type == TDLS_MOVE_CH) {
305			ret = -EINVAL;
306		}
307		break;
308	case IWL_MVM_TDLS_SW_ACTIVE:
309		/*
310		 * the only valid request when active is a request to return
311		 * to the base channel by the current off-channel peer
312		 */
313		if (type != TDLS_MOVE_CH || !same_peer)
314			ret = -EBUSY;
315		break;
316	}
317
318	if (ret)
319		IWL_DEBUG_TDLS(mvm,
320			       "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
321			       type, mvm->tdls_cs.state, peer, same_peer,
322			       peer_initiator);
323
324	return ret;
325}
326
327static int
328iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
329				   struct ieee80211_vif *vif,
330				   enum iwl_tdls_channel_switch_type type,
331				   const u8 *peer, bool peer_initiator,
332				   u8 oper_class,
333				   struct cfg80211_chan_def *chandef,
334				   u32 timestamp, u16 switch_time,
335				   u16 switch_timeout, struct sk_buff *skb,
336				   u32 ch_sw_tm_ie)
337{
338	struct ieee80211_sta *sta;
339	struct iwl_mvm_sta *mvmsta;
340	struct ieee80211_tx_info *info;
341	struct ieee80211_hdr *hdr;
342	struct iwl_tdls_channel_switch_cmd cmd = {0};
343	struct iwl_tdls_channel_switch_cmd_tail *tail =
344		iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
345	u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
346	int ret;
347
348	lockdep_assert_held(&mvm->mutex);
349
350	ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
351					timestamp);
352	if (ret)
353		return ret;
354
355	if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
356		ret = -EINVAL;
357		goto out;
358	}
359
360	cmd.switch_type = type;
361	tail->timing.frame_timestamp = cpu_to_le32(timestamp);
362	tail->timing.switch_time = cpu_to_le32(switch_time);
363	tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
364
365	rcu_read_lock();
366	sta = ieee80211_find_sta(vif, peer);
367	if (!sta) {
368		rcu_read_unlock();
369		ret = -ENOENT;
370		goto out;
371	}
372	mvmsta = iwl_mvm_sta_from_mac80211(sta);
373	cmd.peer_sta_id = cpu_to_le32(mvmsta->deflink.sta_id);
374
375	if (!chandef) {
376		if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
377		    mvm->tdls_cs.peer.chandef.chan) {
378			/* actually moving to the channel */
379			chandef = &mvm->tdls_cs.peer.chandef;
380		} else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
381			   type == TDLS_MOVE_CH) {
382			/* we need to return to base channel */
383			struct ieee80211_chanctx_conf *chanctx =
384					rcu_dereference(vif->bss_conf.chanctx_conf);
385
386			if (WARN_ON_ONCE(!chanctx)) {
387				rcu_read_unlock();
388				goto out;
389			}
390
391			chandef = &chanctx->def;
392		}
393	}
394
395	if (chandef)
396		iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
397
398	/* keep quota calculation simple for now - 50% of DTIM for TDLS */
399	tail->timing.max_offchan_duration =
400			cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
401					     vif->bss_conf.beacon_int) / 2);
402
403	/* Switch time is the first element in the switch-timing IE. */
404	tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
405
406	info = IEEE80211_SKB_CB(skb);
407	hdr = (void *)skb->data;
408	if (info->control.hw_key) {
409		if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
410			rcu_read_unlock();
411			ret = -EINVAL;
412			goto out;
413		}
414		iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
415	}
416
417	iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
418			   mvmsta->deflink.sta_id);
419
420	iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
421				hdr->frame_control);
422	rcu_read_unlock();
423
424	memcpy(tail->frame.data, skb->data, skb->len);
425
426	ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
427	if (ret) {
428		IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
429			ret);
430		goto out;
431	}
432
433	/* channel switch has started, update state */
434	if (type != TDLS_MOVE_CH) {
435		mvm->tdls_cs.cur_sta_id = mvmsta->deflink.sta_id;
436		iwl_mvm_tdls_update_cs_state(mvm,
437					     type == TDLS_SEND_CHAN_SW_REQ ?
438					     IWL_MVM_TDLS_SW_REQ_SENT :
439					     IWL_MVM_TDLS_SW_REQ_RCVD);
440	} else {
441		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
442	}
443
444out:
445
446	/* channel switch failed - we are idle */
447	if (ret)
448		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
449
450	return ret;
451}
452
453void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
454{
455	struct iwl_mvm *mvm;
456	struct ieee80211_sta *sta;
457	struct iwl_mvm_sta *mvmsta;
458	struct ieee80211_vif *vif;
459	unsigned int delay;
460	int ret;
461
462	mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
463	mutex_lock(&mvm->mutex);
464
465	/* called after an active channel switch has finished or timed-out */
466	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
467
468	/* station might be gone, in that case do nothing */
469	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
470		goto out;
471
472	sta = rcu_dereference_protected(
473				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
474				lockdep_is_held(&mvm->mutex));
475	/* the station may not be here, but if it is, it must be a TDLS peer */
476	if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
477		goto out;
478
479	mvmsta = iwl_mvm_sta_from_mac80211(sta);
480	vif = mvmsta->vif;
481	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
482						 TDLS_SEND_CHAN_SW_REQ,
483						 sta->addr,
484						 mvm->tdls_cs.peer.initiator,
485						 mvm->tdls_cs.peer.op_class,
486						 &mvm->tdls_cs.peer.chandef,
487						 0, 0, 0,
488						 mvm->tdls_cs.peer.skb,
489						 mvm->tdls_cs.peer.ch_sw_tm_ie);
490	if (ret)
491		IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
492
493	/* retry after a DTIM if we failed sending now */
494	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
495	schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
496out:
497	mutex_unlock(&mvm->mutex);
498}
499
500int
501iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
502			    struct ieee80211_vif *vif,
503			    struct ieee80211_sta *sta, u8 oper_class,
504			    struct cfg80211_chan_def *chandef,
505			    struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
506{
507	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
508	struct iwl_mvm_sta *mvmsta;
509	unsigned int delay;
510	int ret;
511
512	mutex_lock(&mvm->mutex);
513
514	IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
515		       sta->addr, chandef->chan->center_freq, chandef->width);
516
517	/* we only support a single peer for channel switching */
518	if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
519		IWL_DEBUG_TDLS(mvm,
520			       "Existing peer. Can't start switch with %pM\n",
521			       sta->addr);
522		ret = -EBUSY;
523		goto out;
524	}
525
526	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
527						 TDLS_SEND_CHAN_SW_REQ,
528						 sta->addr, sta->tdls_initiator,
529						 oper_class, chandef, 0, 0, 0,
530						 tmpl_skb, ch_sw_tm_ie);
531	if (ret)
532		goto out;
533
534	/*
535	 * Mark the peer as "in tdls switch" for this vif. We only allow a
536	 * single such peer per vif.
537	 */
538	mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
539	if (!mvm->tdls_cs.peer.skb) {
540		ret = -ENOMEM;
541		goto out;
542	}
543
544	mvmsta = iwl_mvm_sta_from_mac80211(sta);
545	mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id;
546	mvm->tdls_cs.peer.chandef = *chandef;
547	mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
548	mvm->tdls_cs.peer.op_class = oper_class;
549	mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
550
551	/*
552	 * Wait for 2 DTIM periods before attempting the next switch. The next
553	 * switch will be made sooner if the current one completes before that.
554	 */
555	delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
556			     vif->bss_conf.beacon_int);
557	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
558			 msecs_to_jiffies(delay));
559
560out:
561	mutex_unlock(&mvm->mutex);
562	return ret;
563}
564
565void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
566					struct ieee80211_vif *vif,
567					struct ieee80211_sta *sta)
568{
569	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
570	struct ieee80211_sta *cur_sta;
571	bool wait_for_phy = false;
572
573	mutex_lock(&mvm->mutex);
574
575	IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
576
577	/* we only support a single peer for channel switching */
578	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
579		IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
580		goto out;
581	}
582
583	cur_sta = rcu_dereference_protected(
584				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
585				lockdep_is_held(&mvm->mutex));
586	/* make sure it's the same peer */
587	if (cur_sta != sta)
588		goto out;
589
590	/*
591	 * If we're currently in a switch because of the now canceled peer,
592	 * wait a DTIM here to make sure the phy is back on the base channel.
593	 * We can't otherwise force it.
594	 */
595	if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
596	    mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
597		wait_for_phy = true;
598
599	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
600	dev_kfree_skb(mvm->tdls_cs.peer.skb);
601	mvm->tdls_cs.peer.skb = NULL;
602
603out:
604	mutex_unlock(&mvm->mutex);
605
606	/* make sure the phy is on the base channel */
607	if (wait_for_phy)
608		msleep(TU_TO_MS(vif->bss_conf.dtim_period *
609				vif->bss_conf.beacon_int));
610
611	/* flush the channel switch state */
612	flush_delayed_work(&mvm->tdls_cs.dwork);
613
614	IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
615}
616
617void
618iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
619				 struct ieee80211_vif *vif,
620				 struct ieee80211_tdls_ch_sw_params *params)
621{
622	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
623	enum iwl_tdls_channel_switch_type type;
624	unsigned int delay;
625	const char *action_str =
626		params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
627		"REQ" : "RESP";
628
629	mutex_lock(&mvm->mutex);
630
631	IWL_DEBUG_TDLS(mvm,
632		       "Received TDLS ch switch action %s from %pM status %d\n",
633		       action_str, params->sta->addr, params->status);
634
635	/*
636	 * we got a non-zero status from a peer we were switching to - move to
637	 * the idle state and retry again later
638	 */
639	if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
640	    params->status != 0 &&
641	    mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
642	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
643		struct ieee80211_sta *cur_sta;
644
645		/* make sure it's the same peer */
646		cur_sta = rcu_dereference_protected(
647				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
648				lockdep_is_held(&mvm->mutex));
649		if (cur_sta == params->sta) {
650			iwl_mvm_tdls_update_cs_state(mvm,
651						     IWL_MVM_TDLS_SW_IDLE);
652			goto retry;
653		}
654	}
655
656	type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
657	       TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
658
659	iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
660					   params->sta->tdls_initiator, 0,
661					   params->chandef, params->timestamp,
662					   params->switch_time,
663					   params->switch_timeout,
664					   params->tmpl_skb,
665					   params->ch_sw_tm_ie);
666
667retry:
668	/* register a timeout in case we don't succeed in switching */
669	delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
670		1024 / 1000;
671	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
672			 msecs_to_jiffies(delay));
673	mutex_unlock(&mvm->mutex);
674}
v6.2
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/*
  3 * Copyright (C) 2014 Intel Mobile Communications GmbH
  4 * Copyright (C) 2017 Intel Deutschland GmbH
  5 * Copyright (C) 2018-2020, 2022 Intel Corporation
  6 */
  7#include <linux/etherdevice.h>
  8#include "mvm.h"
  9#include "time-event.h"
 10#include "iwl-io.h"
 11#include "iwl-prph.h"
 12
 13#define TU_TO_US(x) (x * 1024)
 14#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
 15
 16void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
 17{
 18	struct ieee80211_sta *sta;
 19	struct iwl_mvm_sta *mvmsta;
 20	int i;
 21
 22	lockdep_assert_held(&mvm->mutex);
 23
 24	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
 25		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
 26						lockdep_is_held(&mvm->mutex));
 27		if (!sta || IS_ERR(sta) || !sta->tdls)
 28			continue;
 29
 30		mvmsta = iwl_mvm_sta_from_mac80211(sta);
 31		ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
 32				NL80211_TDLS_TEARDOWN,
 33				WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
 34				GFP_KERNEL);
 35	}
 36}
 37
 38int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 39{
 40	struct ieee80211_sta *sta;
 41	struct iwl_mvm_sta *mvmsta;
 42	int count = 0;
 43	int i;
 44
 45	lockdep_assert_held(&mvm->mutex);
 46
 47	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
 48		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
 49						lockdep_is_held(&mvm->mutex));
 50		if (!sta || IS_ERR(sta) || !sta->tdls)
 51			continue;
 52
 53		if (vif) {
 54			mvmsta = iwl_mvm_sta_from_mac80211(sta);
 55			if (mvmsta->vif != vif)
 56				continue;
 57		}
 58
 59		count++;
 60	}
 61
 62	return count;
 63}
 64
 65static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 66{
 67	struct iwl_rx_packet *pkt;
 68	struct iwl_tdls_config_res *resp;
 69	struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
 70	struct iwl_host_cmd cmd = {
 71		.id = TDLS_CONFIG_CMD,
 72		.flags = CMD_WANT_SKB,
 73		.data = { &tdls_cfg_cmd, },
 74		.len = { sizeof(struct iwl_tdls_config_cmd), },
 75	};
 76	struct ieee80211_sta *sta;
 77	int ret, i, cnt;
 78	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 79
 80	lockdep_assert_held(&mvm->mutex);
 81
 82	tdls_cfg_cmd.id_and_color =
 83		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 84	tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
 85	tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
 86
 87	/* for now the Tx cmd is empty and unused */
 88
 89	/* populate TDLS peer data */
 90	cnt = 0;
 91	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
 92		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
 93						lockdep_is_held(&mvm->mutex));
 94		if (IS_ERR_OR_NULL(sta) || !sta->tdls)
 95			continue;
 96
 97		tdls_cfg_cmd.sta_info[cnt].sta_id = i;
 98		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
 99							IWL_MVM_TDLS_FW_TID;
100		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
101		tdls_cfg_cmd.sta_info[cnt].is_initiator =
102				cpu_to_le32(sta->tdls_initiator ? 1 : 0);
103
104		cnt++;
105	}
106
107	tdls_cfg_cmd.tdls_peer_count = cnt;
108	IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
109
110	ret = iwl_mvm_send_cmd(mvm, &cmd);
111	if (WARN_ON_ONCE(ret))
112		return;
113
114	pkt = cmd.resp_pkt;
115
116	WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
117
118	/* we don't really care about the response at this point */
119
120	iwl_free_resp(&cmd);
121}
122
123void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
124			       bool sta_added)
125{
126	int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
127
128	/* when the first peer joins, send a power update first */
129	if (tdls_sta_cnt == 1 && sta_added)
130		iwl_mvm_power_update_mac(mvm);
131
132	/* Configure the FW with TDLS peer info only if TDLS channel switch
133	 * capability is set.
134	 * TDLS config data is used currently only in TDLS channel switch code.
135	 * Supposed to serve also TDLS buffer station which is not implemneted
136	 * yet in FW*/
137	if (fw_has_capa(&mvm->fw->ucode_capa,
138			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
139		iwl_mvm_tdls_config(mvm, vif);
140
141	/* when the last peer leaves, send a power update last */
142	if (tdls_sta_cnt == 0 && !sta_added)
143		iwl_mvm_power_update_mac(mvm);
144}
145
146void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
147					   struct ieee80211_vif *vif)
 
148{
149	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
150	u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
151
152	/* Protect the session to hear the TDLS setup response on the channel */
153	mutex_lock(&mvm->mutex);
154	if (fw_has_capa(&mvm->fw->ucode_capa,
155			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
156		iwl_mvm_schedule_session_protection(mvm, vif, duration,
157						    duration, true);
158	else
159		iwl_mvm_protect_session(mvm, vif, duration,
160					duration, 100, true);
161	mutex_unlock(&mvm->mutex);
162}
163
164static const char *
165iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
166{
167	switch (state) {
168	case IWL_MVM_TDLS_SW_IDLE:
169		return "IDLE";
170	case IWL_MVM_TDLS_SW_REQ_SENT:
171		return "REQ SENT";
172	case IWL_MVM_TDLS_SW_RESP_RCVD:
173		return "RESP RECEIVED";
174	case IWL_MVM_TDLS_SW_REQ_RCVD:
175		return "REQ RECEIVED";
176	case IWL_MVM_TDLS_SW_ACTIVE:
177		return "ACTIVE";
178	}
179
180	return NULL;
181}
182
183static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
184					 enum iwl_mvm_tdls_cs_state state)
185{
186	if (mvm->tdls_cs.state == state)
187		return;
188
189	IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
190		       iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
191		       iwl_mvm_tdls_cs_state_str(state));
192	mvm->tdls_cs.state = state;
193
194	/* we only send requests to our switching peer - update sent time */
195	if (state == IWL_MVM_TDLS_SW_REQ_SENT)
196		mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
197
198	if (state == IWL_MVM_TDLS_SW_IDLE)
199		mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
200}
201
202void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
203{
204	struct iwl_rx_packet *pkt = rxb_addr(rxb);
205	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
206	struct ieee80211_sta *sta;
207	unsigned int delay;
208	struct iwl_mvm_sta *mvmsta;
209	struct ieee80211_vif *vif;
210	u32 sta_id = le32_to_cpu(notif->sta_id);
211
212	lockdep_assert_held(&mvm->mutex);
213
214	/* can fail sometimes */
215	if (!le32_to_cpu(notif->status)) {
216		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
217		return;
218	}
219
220	if (WARN_ON(sta_id >= mvm->fw->ucode_capa.num_stations))
221		return;
222
223	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
224					lockdep_is_held(&mvm->mutex));
225	/* the station may not be here, but if it is, it must be a TDLS peer */
226	if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
227		return;
228
229	mvmsta = iwl_mvm_sta_from_mac80211(sta);
230	vif = mvmsta->vif;
231
232	/*
233	 * Update state and possibly switch again after this is over (DTIM).
234	 * Also convert TU to msec.
235	 */
236	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
237	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
238			 msecs_to_jiffies(delay));
239
240	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
241}
242
243static int
244iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
245			  enum iwl_tdls_channel_switch_type type,
246			  const u8 *peer, bool peer_initiator, u32 timestamp)
247{
248	bool same_peer = false;
249	int ret = 0;
250
251	/* get the existing peer if it's there */
252	if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
253	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
254		struct ieee80211_sta *sta = rcu_dereference_protected(
255				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
256				lockdep_is_held(&mvm->mutex));
257		if (!IS_ERR_OR_NULL(sta))
258			same_peer = ether_addr_equal(peer, sta->addr);
259	}
260
261	switch (mvm->tdls_cs.state) {
262	case IWL_MVM_TDLS_SW_IDLE:
263		/*
264		 * might be spurious packet from the peer after the switch is
265		 * already done
266		 */
267		if (type == TDLS_MOVE_CH)
268			ret = -EINVAL;
269		break;
270	case IWL_MVM_TDLS_SW_REQ_SENT:
271		/* only allow requests from the same peer */
272		if (!same_peer)
273			ret = -EBUSY;
274		else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
275			 !peer_initiator)
276			/*
277			 * We received a ch-switch request while an outgoing
278			 * one is pending. Allow it if the peer is the link
279			 * initiator.
280			 */
281			ret = -EBUSY;
282		else if (type == TDLS_SEND_CHAN_SW_REQ)
283			/* wait for idle before sending another request */
284			ret = -EBUSY;
285		else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
286			/* we got a stale response - ignore it */
287			ret = -EINVAL;
288		break;
289	case IWL_MVM_TDLS_SW_RESP_RCVD:
290		/*
291		 * we are waiting for the FW to give an "active" notification,
292		 * so ignore requests in the meantime
293		 */
294		ret = -EBUSY;
295		break;
296	case IWL_MVM_TDLS_SW_REQ_RCVD:
297		/* as above, allow the link initiator to proceed */
298		if (type == TDLS_SEND_CHAN_SW_REQ) {
299			if (!same_peer)
300				ret = -EBUSY;
301			else if (peer_initiator) /* they are the initiator */
302				ret = -EBUSY;
303		} else if (type == TDLS_MOVE_CH) {
304			ret = -EINVAL;
305		}
306		break;
307	case IWL_MVM_TDLS_SW_ACTIVE:
308		/*
309		 * the only valid request when active is a request to return
310		 * to the base channel by the current off-channel peer
311		 */
312		if (type != TDLS_MOVE_CH || !same_peer)
313			ret = -EBUSY;
314		break;
315	}
316
317	if (ret)
318		IWL_DEBUG_TDLS(mvm,
319			       "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
320			       type, mvm->tdls_cs.state, peer, same_peer,
321			       peer_initiator);
322
323	return ret;
324}
325
326static int
327iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
328				   struct ieee80211_vif *vif,
329				   enum iwl_tdls_channel_switch_type type,
330				   const u8 *peer, bool peer_initiator,
331				   u8 oper_class,
332				   struct cfg80211_chan_def *chandef,
333				   u32 timestamp, u16 switch_time,
334				   u16 switch_timeout, struct sk_buff *skb,
335				   u32 ch_sw_tm_ie)
336{
337	struct ieee80211_sta *sta;
338	struct iwl_mvm_sta *mvmsta;
339	struct ieee80211_tx_info *info;
340	struct ieee80211_hdr *hdr;
341	struct iwl_tdls_channel_switch_cmd cmd = {0};
342	struct iwl_tdls_channel_switch_cmd_tail *tail =
343		iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
344	u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
345	int ret;
346
347	lockdep_assert_held(&mvm->mutex);
348
349	ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
350					timestamp);
351	if (ret)
352		return ret;
353
354	if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
355		ret = -EINVAL;
356		goto out;
357	}
358
359	cmd.switch_type = type;
360	tail->timing.frame_timestamp = cpu_to_le32(timestamp);
361	tail->timing.switch_time = cpu_to_le32(switch_time);
362	tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
363
364	rcu_read_lock();
365	sta = ieee80211_find_sta(vif, peer);
366	if (!sta) {
367		rcu_read_unlock();
368		ret = -ENOENT;
369		goto out;
370	}
371	mvmsta = iwl_mvm_sta_from_mac80211(sta);
372	cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
373
374	if (!chandef) {
375		if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
376		    mvm->tdls_cs.peer.chandef.chan) {
377			/* actually moving to the channel */
378			chandef = &mvm->tdls_cs.peer.chandef;
379		} else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
380			   type == TDLS_MOVE_CH) {
381			/* we need to return to base channel */
382			struct ieee80211_chanctx_conf *chanctx =
383					rcu_dereference(vif->bss_conf.chanctx_conf);
384
385			if (WARN_ON_ONCE(!chanctx)) {
386				rcu_read_unlock();
387				goto out;
388			}
389
390			chandef = &chanctx->def;
391		}
392	}
393
394	if (chandef)
395		iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
396
397	/* keep quota calculation simple for now - 50% of DTIM for TDLS */
398	tail->timing.max_offchan_duration =
399			cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
400					     vif->bss_conf.beacon_int) / 2);
401
402	/* Switch time is the first element in the switch-timing IE. */
403	tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
404
405	info = IEEE80211_SKB_CB(skb);
406	hdr = (void *)skb->data;
407	if (info->control.hw_key) {
408		if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
409			rcu_read_unlock();
410			ret = -EINVAL;
411			goto out;
412		}
413		iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
414	}
415
416	iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
417			   mvmsta->sta_id);
418
419	iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
420				hdr->frame_control);
421	rcu_read_unlock();
422
423	memcpy(tail->frame.data, skb->data, skb->len);
424
425	ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
426	if (ret) {
427		IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
428			ret);
429		goto out;
430	}
431
432	/* channel switch has started, update state */
433	if (type != TDLS_MOVE_CH) {
434		mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
435		iwl_mvm_tdls_update_cs_state(mvm,
436					     type == TDLS_SEND_CHAN_SW_REQ ?
437					     IWL_MVM_TDLS_SW_REQ_SENT :
438					     IWL_MVM_TDLS_SW_REQ_RCVD);
439	} else {
440		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
441	}
442
443out:
444
445	/* channel switch failed - we are idle */
446	if (ret)
447		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
448
449	return ret;
450}
451
452void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
453{
454	struct iwl_mvm *mvm;
455	struct ieee80211_sta *sta;
456	struct iwl_mvm_sta *mvmsta;
457	struct ieee80211_vif *vif;
458	unsigned int delay;
459	int ret;
460
461	mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
462	mutex_lock(&mvm->mutex);
463
464	/* called after an active channel switch has finished or timed-out */
465	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
466
467	/* station might be gone, in that case do nothing */
468	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
469		goto out;
470
471	sta = rcu_dereference_protected(
472				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
473				lockdep_is_held(&mvm->mutex));
474	/* the station may not be here, but if it is, it must be a TDLS peer */
475	if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
476		goto out;
477
478	mvmsta = iwl_mvm_sta_from_mac80211(sta);
479	vif = mvmsta->vif;
480	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
481						 TDLS_SEND_CHAN_SW_REQ,
482						 sta->addr,
483						 mvm->tdls_cs.peer.initiator,
484						 mvm->tdls_cs.peer.op_class,
485						 &mvm->tdls_cs.peer.chandef,
486						 0, 0, 0,
487						 mvm->tdls_cs.peer.skb,
488						 mvm->tdls_cs.peer.ch_sw_tm_ie);
489	if (ret)
490		IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
491
492	/* retry after a DTIM if we failed sending now */
493	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
494	schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
495out:
496	mutex_unlock(&mvm->mutex);
497}
498
499int
500iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
501			    struct ieee80211_vif *vif,
502			    struct ieee80211_sta *sta, u8 oper_class,
503			    struct cfg80211_chan_def *chandef,
504			    struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
505{
506	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
507	struct iwl_mvm_sta *mvmsta;
508	unsigned int delay;
509	int ret;
510
511	mutex_lock(&mvm->mutex);
512
513	IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
514		       sta->addr, chandef->chan->center_freq, chandef->width);
515
516	/* we only support a single peer for channel switching */
517	if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
518		IWL_DEBUG_TDLS(mvm,
519			       "Existing peer. Can't start switch with %pM\n",
520			       sta->addr);
521		ret = -EBUSY;
522		goto out;
523	}
524
525	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
526						 TDLS_SEND_CHAN_SW_REQ,
527						 sta->addr, sta->tdls_initiator,
528						 oper_class, chandef, 0, 0, 0,
529						 tmpl_skb, ch_sw_tm_ie);
530	if (ret)
531		goto out;
532
533	/*
534	 * Mark the peer as "in tdls switch" for this vif. We only allow a
535	 * single such peer per vif.
536	 */
537	mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
538	if (!mvm->tdls_cs.peer.skb) {
539		ret = -ENOMEM;
540		goto out;
541	}
542
543	mvmsta = iwl_mvm_sta_from_mac80211(sta);
544	mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
545	mvm->tdls_cs.peer.chandef = *chandef;
546	mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
547	mvm->tdls_cs.peer.op_class = oper_class;
548	mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
549
550	/*
551	 * Wait for 2 DTIM periods before attempting the next switch. The next
552	 * switch will be made sooner if the current one completes before that.
553	 */
554	delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
555			     vif->bss_conf.beacon_int);
556	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
557			 msecs_to_jiffies(delay));
558
559out:
560	mutex_unlock(&mvm->mutex);
561	return ret;
562}
563
564void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
565					struct ieee80211_vif *vif,
566					struct ieee80211_sta *sta)
567{
568	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
569	struct ieee80211_sta *cur_sta;
570	bool wait_for_phy = false;
571
572	mutex_lock(&mvm->mutex);
573
574	IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
575
576	/* we only support a single peer for channel switching */
577	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
578		IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
579		goto out;
580	}
581
582	cur_sta = rcu_dereference_protected(
583				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
584				lockdep_is_held(&mvm->mutex));
585	/* make sure it's the same peer */
586	if (cur_sta != sta)
587		goto out;
588
589	/*
590	 * If we're currently in a switch because of the now canceled peer,
591	 * wait a DTIM here to make sure the phy is back on the base channel.
592	 * We can't otherwise force it.
593	 */
594	if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
595	    mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
596		wait_for_phy = true;
597
598	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
599	dev_kfree_skb(mvm->tdls_cs.peer.skb);
600	mvm->tdls_cs.peer.skb = NULL;
601
602out:
603	mutex_unlock(&mvm->mutex);
604
605	/* make sure the phy is on the base channel */
606	if (wait_for_phy)
607		msleep(TU_TO_MS(vif->bss_conf.dtim_period *
608				vif->bss_conf.beacon_int));
609
610	/* flush the channel switch state */
611	flush_delayed_work(&mvm->tdls_cs.dwork);
612
613	IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
614}
615
616void
617iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
618				 struct ieee80211_vif *vif,
619				 struct ieee80211_tdls_ch_sw_params *params)
620{
621	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
622	enum iwl_tdls_channel_switch_type type;
623	unsigned int delay;
624	const char *action_str =
625		params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
626		"REQ" : "RESP";
627
628	mutex_lock(&mvm->mutex);
629
630	IWL_DEBUG_TDLS(mvm,
631		       "Received TDLS ch switch action %s from %pM status %d\n",
632		       action_str, params->sta->addr, params->status);
633
634	/*
635	 * we got a non-zero status from a peer we were switching to - move to
636	 * the idle state and retry again later
637	 */
638	if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
639	    params->status != 0 &&
640	    mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
641	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
642		struct ieee80211_sta *cur_sta;
643
644		/* make sure it's the same peer */
645		cur_sta = rcu_dereference_protected(
646				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
647				lockdep_is_held(&mvm->mutex));
648		if (cur_sta == params->sta) {
649			iwl_mvm_tdls_update_cs_state(mvm,
650						     IWL_MVM_TDLS_SW_IDLE);
651			goto retry;
652		}
653	}
654
655	type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
656	       TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
657
658	iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
659					   params->sta->tdls_initiator, 0,
660					   params->chandef, params->timestamp,
661					   params->switch_time,
662					   params->switch_timeout,
663					   params->tmpl_skb,
664					   params->ch_sw_tm_ie);
665
666retry:
667	/* register a timeout in case we don't succeed in switching */
668	delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
669		1024 / 1000;
670	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
671			 msecs_to_jiffies(delay));
672	mutex_unlock(&mvm->mutex);
673}