Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
   4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
   5 * Copyright (C) 2017 Intel Deutschland GmbH
   6 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7#include <linux/jiffies.h>
   8#include <net/mac80211.h>
   9
  10#include "fw/notif-wait.h"
  11#include "iwl-trans.h"
  12#include "fw-api.h"
  13#include "time-event.h"
  14#include "mvm.h"
  15#include "iwl-io.h"
  16#include "iwl-prph.h"
  17
  18/*
  19 * For the high priority TE use a time event type that has similar priority to
  20 * the FW's action scan priority.
  21 */
  22#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
  23#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
  24
  25void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
  26			   struct iwl_mvm_time_event_data *te_data)
  27{
  28	lockdep_assert_held(&mvm->time_event_lock);
  29
  30	if (!te_data || !te_data->vif)
  31		return;
  32
  33	list_del(&te_data->list);
  34
  35	/*
  36	 * the list is only used for AUX ROC events so make sure it is always
  37	 * initialized
  38	 */
  39	INIT_LIST_HEAD(&te_data->list);
  40
  41	te_data->running = false;
  42	te_data->uid = 0;
  43	te_data->id = TE_MAX;
  44	te_data->vif = NULL;
  45	te_data->link_id = -1;
  46}
  47
  48void iwl_mvm_roc_done_wk(struct work_struct *wk)
  49{
  50	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
  51
  52	/*
  53	 * Clear the ROC_RUNNING status bit.
  54	 * This will cause the TX path to drop offchannel transmissions.
  55	 * That would also be done by mac80211, but it is racy, in particular
  56	 * in the case that the time event actually completed in the firmware
  57	 * (which is handled in iwl_mvm_te_handle_notif).
  58	 */
  59	clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
 
 
 
  60
  61	synchronize_net();
  62
  63	/*
  64	 * Flush the offchannel queue -- this is called when the time
  65	 * event finishes or is canceled, so that frames queued for it
  66	 * won't get stuck on the queue and be transmitted in the next
  67	 * time event.
 
 
 
 
  68	 */
 
  69
  70	mutex_lock(&mvm->mutex);
  71	if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
  72		struct iwl_mvm_vif *mvmvif;
  73
  74		/*
  75		 * NB: access to this pointer would be racy, but the flush bit
  76		 * can only be set when we had a P2P-Device VIF, and we have a
  77		 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
  78		 * not really racy.
  79		 */
  80
  81		if (!WARN_ON(!mvm->p2p_device_vif)) {
  82			struct ieee80211_vif *vif = mvm->p2p_device_vif;
  83
  84			mvmvif = iwl_mvm_vif_from_mac80211(vif);
  85			iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
  86					  mvmvif->deflink.bcast_sta.tfd_queue_msk);
  87
  88			if (mvm->mld_api_is_used) {
  89				iwl_mvm_mld_rm_bcast_sta(mvm, vif,
  90							 &vif->bss_conf);
  91
  92				iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
  93						     LINK_CONTEXT_MODIFY_ACTIVE,
  94						     false);
  95			} else {
  96				iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
  97				iwl_mvm_binding_remove_vif(mvm, vif);
  98			}
  99
 100			/* Do not remove the PHY context as removing and adding
 101			 * a PHY context has timing overheads. Leaving it
 102			 * configured in FW would be useful in case the next ROC
 103			 * is with the same channel.
 104			 */
 105		}
 106	}
 107
 108	/*
 109	 * Clear the ROC_AUX_RUNNING status bit.
 110	 * This will cause the TX path to drop offchannel transmissions.
 111	 * That would also be done by mac80211, but it is racy, in particular
 112	 * in the case that the time event actually completed in the firmware
 113	 * (which is handled in iwl_mvm_te_handle_notif).
 114	 */
 115	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
 116		/* do the same in case of hot spot 2.0 */
 117		iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
 118				  mvm->aux_sta.tfd_queue_msk);
 119
 120		if (mvm->mld_api_is_used) {
 121			iwl_mvm_mld_rm_aux_sta(mvm);
 122			goto out_unlock;
 123		}
 124
 125		/* In newer version of this command an aux station is added only
 126		 * in cases of dedicated tx queue and need to be removed in end
 127		 * of use */
 128		if (iwl_mvm_has_new_station_api(mvm->fw))
 129			iwl_mvm_rm_aux_sta(mvm);
 130	}
 131
 132out_unlock:
 133	mutex_unlock(&mvm->mutex);
 134}
 135
 136static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
 137{
 138	/*
 139	 * Of course, our status bit is just as racy as mac80211, so in
 140	 * addition, fire off the work struct which will drop all frames
 141	 * from the hardware queues that made it through the race. First
 142	 * it will of course synchronize the TX path to make sure that
 143	 * any *new* TX will be rejected.
 144	 */
 145	schedule_work(&mvm->roc_done_wk);
 146}
 147
 148static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
 149{
 150	struct ieee80211_vif *csa_vif;
 151
 152	rcu_read_lock();
 153
 154	csa_vif = rcu_dereference(mvm->csa_vif);
 155	if (!csa_vif || !csa_vif->bss_conf.csa_active)
 156		goto out_unlock;
 157
 158	IWL_DEBUG_TE(mvm, "CSA NOA started\n");
 159
 160	/*
 161	 * CSA NoA is started but we still have beacons to
 162	 * transmit on the current channel.
 163	 * So we just do nothing here and the switch
 164	 * will be performed on the last TBTT.
 165	 */
 166	if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
 167		IWL_WARN(mvm, "CSA NOA started too early\n");
 168		goto out_unlock;
 169	}
 170
 171	ieee80211_csa_finish(csa_vif);
 172
 173	rcu_read_unlock();
 174
 175	RCU_INIT_POINTER(mvm->csa_vif, NULL);
 176
 177	return;
 178
 179out_unlock:
 180	rcu_read_unlock();
 181}
 182
 183static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
 184					struct ieee80211_vif *vif,
 185					const char *errmsg)
 186{
 187	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 188
 189	if (vif->type != NL80211_IFTYPE_STATION)
 190		return false;
 191
 192	if (!mvmvif->csa_bcn_pending && vif->cfg.assoc &&
 193	    vif->bss_conf.dtim_period)
 194		return false;
 195	if (errmsg)
 196		IWL_ERR(mvm, "%s\n", errmsg);
 197
 198	if (mvmvif->csa_bcn_pending) {
 199		struct iwl_mvm_sta *mvmsta;
 200
 201		rcu_read_lock();
 202		mvmsta = iwl_mvm_sta_from_staid_rcu(mvm,
 203						    mvmvif->deflink.ap_sta_id);
 204		if (!WARN_ON(!mvmsta))
 205			iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
 206		rcu_read_unlock();
 207	}
 208
 209	if (vif->cfg.assoc) {
 210		/*
 211		 * When not associated, this will be called from
 212		 * iwl_mvm_event_mlme_callback_ini()
 213		 */
 214		iwl_dbg_tlv_time_point(&mvm->fwrt,
 215				       IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
 216				       NULL);
 217	}
 218
 219	iwl_mvm_connection_loss(mvm, vif, errmsg);
 220	return true;
 221}
 222
 223static void
 224iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
 225			     struct iwl_mvm_time_event_data *te_data,
 226			     struct iwl_time_event_notif *notif)
 227{
 228	struct ieee80211_vif *vif = te_data->vif;
 229	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 230
 231	if (!notif->status)
 232		IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
 233
 234	switch (te_data->vif->type) {
 235	case NL80211_IFTYPE_AP:
 236		if (!notif->status)
 237			mvmvif->csa_failed = true;
 238		iwl_mvm_csa_noa_start(mvm);
 239		break;
 240	case NL80211_IFTYPE_STATION:
 241		if (!notif->status) {
 242			iwl_mvm_connection_loss(mvm, vif,
 243						"CSA TE failed to start");
 244			break;
 245		}
 246		iwl_mvm_csa_client_absent(mvm, te_data->vif);
 247		cancel_delayed_work(&mvmvif->csa_work);
 248		ieee80211_chswitch_done(te_data->vif, true, 0);
 249		break;
 250	default:
 251		/* should never happen */
 252		WARN_ON_ONCE(1);
 253		break;
 254	}
 255
 256	/* we don't need it anymore */
 257	iwl_mvm_te_clear_data(mvm, te_data);
 258}
 259
 260static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
 261				     struct iwl_time_event_notif *notif,
 262				     struct iwl_mvm_time_event_data *te_data)
 263{
 264	struct iwl_fw_dbg_trigger_tlv *trig;
 265	struct iwl_fw_dbg_trigger_time_event *te_trig;
 266	int i;
 267
 268	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
 269				     ieee80211_vif_to_wdev(te_data->vif),
 270				     FW_DBG_TRIGGER_TIME_EVENT);
 271	if (!trig)
 272		return;
 273
 
 274	te_trig = (void *)trig->data;
 275
 
 
 
 
 
 276	for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
 277		u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
 278		u32 trig_action_bitmap =
 279			le32_to_cpu(te_trig->time_events[i].action_bitmap);
 280		u32 trig_status_bitmap =
 281			le32_to_cpu(te_trig->time_events[i].status_bitmap);
 282
 283		if (trig_te_id != te_data->id ||
 284		    !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
 285		    !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
 286			continue;
 287
 288		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 289					"Time event %d Action 0x%x received status: %d",
 290					te_data->id,
 291					le32_to_cpu(notif->action),
 292					le32_to_cpu(notif->status));
 293		break;
 294	}
 295}
 296
 297static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
 298{
 299	/*
 300	 * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
 301	 * roc_done_wk is already scheduled or running, so don't schedule it
 302	 * again to avoid a race where the roc_done_wk clears this bit after
 303	 * it is set here, affecting the next run of the roc_done_wk.
 304	 */
 305	if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
 306		iwl_mvm_roc_finished(mvm);
 307}
 308
 309/*
 310 * Handles a FW notification for an event that is known to the driver.
 311 *
 312 * @mvm: the mvm component
 313 * @te_data: the time event data
 314 * @notif: the notification data corresponding the time event data.
 315 */
 316static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
 317				    struct iwl_mvm_time_event_data *te_data,
 318				    struct iwl_time_event_notif *notif)
 319{
 320	lockdep_assert_held(&mvm->time_event_lock);
 321
 322	IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
 323		     le32_to_cpu(notif->unique_id),
 324		     le32_to_cpu(notif->action));
 325
 326	iwl_mvm_te_check_trigger(mvm, notif, te_data);
 327
 328	/*
 329	 * The FW sends the start/end time event notifications even for events
 330	 * that it fails to schedule. This is indicated in the status field of
 331	 * the notification. This happens in cases that the scheduler cannot
 332	 * find a schedule that can handle the event (for example requesting a
 333	 * P2P Device discoveribility, while there are other higher priority
 334	 * events in the system).
 335	 */
 336	if (!le32_to_cpu(notif->status)) {
 337		const char *msg;
 338
 339		if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
 340			msg = "Time Event start notification failure";
 341		else
 342			msg = "Time Event end notification failure";
 343
 344		IWL_DEBUG_TE(mvm, "%s\n", msg);
 345
 346		if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
 347			iwl_mvm_te_clear_data(mvm, te_data);
 348			return;
 349		}
 350	}
 351
 352	if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
 353		IWL_DEBUG_TE(mvm,
 354			     "TE ended - current time %lu, estimated end %lu\n",
 355			     jiffies, te_data->end_jiffies);
 356
 357		switch (te_data->vif->type) {
 358		case NL80211_IFTYPE_P2P_DEVICE:
 359			ieee80211_remain_on_channel_expired(mvm->hw);
 360			iwl_mvm_p2p_roc_finished(mvm);
 361			break;
 362		case NL80211_IFTYPE_STATION:
 363			/*
 364			 * If we are switching channel, don't disconnect
 365			 * if the time event is already done. Beacons can
 366			 * be delayed a bit after the switch.
 367			 */
 368			if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
 369				IWL_DEBUG_TE(mvm,
 370					     "No beacon heard and the CS time event is over, don't disconnect\n");
 371				break;
 372			}
 373
 374			/*
 375			 * By now, we should have finished association
 376			 * and know the dtim period.
 377			 */
 378			iwl_mvm_te_check_disconnect(mvm, te_data->vif,
 379				!te_data->vif->cfg.assoc ?
 380				"Not associated and the time event is over already..." :
 381				"No beacon heard and the time event is over already...");
 382			break;
 383		default:
 384			break;
 385		}
 386
 387		iwl_mvm_te_clear_data(mvm, te_data);
 388	} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
 389		te_data->running = true;
 390		te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
 391
 392		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
 393			set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
 
 394			ieee80211_ready_on_channel(mvm->hw);
 395		} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
 396			iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
 397		}
 398	} else {
 399		IWL_WARN(mvm, "Got TE with unknown action\n");
 400	}
 401}
 402
 403void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
 404			  struct iwl_rx_cmd_buffer *rxb)
 405{
 406	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 407	struct iwl_roc_notif *notif = (void *)pkt->data;
 408
 409	if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) &&
 410	    le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) {
 411		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
 412		ieee80211_ready_on_channel(mvm->hw);
 413	} else {
 414		iwl_mvm_roc_finished(mvm);
 415		ieee80211_remain_on_channel_expired(mvm->hw);
 416	}
 417}
 418
 419/*
 420 * Handle A Aux ROC time event
 421 */
 422static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
 423					   struct iwl_time_event_notif *notif)
 424{
 425	struct iwl_mvm_time_event_data *aux_roc_te = NULL, *te_data;
 
 426
 427	list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
 428		if (le32_to_cpu(notif->unique_id) == te_data->uid) {
 429			aux_roc_te = te_data;
 430			break;
 431		}
 432	}
 433	if (!aux_roc_te) /* Not a Aux ROC time event */
 434		return -EINVAL;
 435
 436	iwl_mvm_te_check_trigger(mvm, notif, te_data);
 437
 438	IWL_DEBUG_TE(mvm,
 439		     "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
 440		     le32_to_cpu(notif->unique_id),
 441		     le32_to_cpu(notif->action), le32_to_cpu(notif->status));
 442
 443	if (!le32_to_cpu(notif->status) ||
 444	    le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
 445		/* End TE, notify mac80211 */
 446		ieee80211_remain_on_channel_expired(mvm->hw);
 447		iwl_mvm_roc_finished(mvm); /* flush aux queue */
 448		list_del(&te_data->list); /* remove from list */
 449		te_data->running = false;
 450		te_data->vif = NULL;
 451		te_data->uid = 0;
 452		te_data->id = TE_MAX;
 453	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
 454		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
 455		te_data->running = true;
 
 456		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
 457	} else {
 458		IWL_DEBUG_TE(mvm,
 459			     "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
 460			     le32_to_cpu(notif->action));
 461		return -EINVAL;
 462	}
 463
 464	return 0;
 465}
 466
 467/*
 468 * The Rx handler for time event notifications
 469 */
 470void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
 471				 struct iwl_rx_cmd_buffer *rxb)
 472{
 473	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 474	struct iwl_time_event_notif *notif = (void *)pkt->data;
 475	struct iwl_mvm_time_event_data *te_data, *tmp;
 476
 477	IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
 478		     le32_to_cpu(notif->unique_id),
 479		     le32_to_cpu(notif->action));
 480
 481	spin_lock_bh(&mvm->time_event_lock);
 482	/* This time event is triggered for Aux ROC request */
 483	if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
 484		goto unlock;
 485
 486	list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
 487		if (le32_to_cpu(notif->unique_id) == te_data->uid)
 488			iwl_mvm_te_handle_notif(mvm, te_data, notif);
 489	}
 490unlock:
 491	spin_unlock_bh(&mvm->time_event_lock);
 492}
 493
 494static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
 495			     struct iwl_rx_packet *pkt, void *data)
 496{
 497	struct iwl_mvm *mvm =
 498		container_of(notif_wait, struct iwl_mvm, notif_wait);
 499	struct iwl_mvm_time_event_data *te_data = data;
 500	struct iwl_time_event_notif *resp;
 501	int resp_len = iwl_rx_packet_payload_len(pkt);
 502
 503	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
 504		return true;
 505
 506	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
 507		IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
 508		return true;
 509	}
 510
 511	resp = (void *)pkt->data;
 512
 513	/* te_data->uid is already set in the TIME_EVENT_CMD response */
 514	if (le32_to_cpu(resp->unique_id) != te_data->uid)
 515		return false;
 516
 517	IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
 518		     te_data->uid);
 519	if (!resp->status)
 520		IWL_ERR(mvm,
 521			"TIME_EVENT_NOTIFICATION received but not executed\n");
 522
 523	return true;
 524}
 525
 526static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
 527					struct iwl_rx_packet *pkt, void *data)
 528{
 529	struct iwl_mvm *mvm =
 530		container_of(notif_wait, struct iwl_mvm, notif_wait);
 531	struct iwl_mvm_time_event_data *te_data = data;
 532	struct iwl_time_event_resp *resp;
 533	int resp_len = iwl_rx_packet_payload_len(pkt);
 534
 535	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
 536		return true;
 537
 538	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
 539		IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
 540		return true;
 541	}
 542
 543	resp = (void *)pkt->data;
 544
 545	/* we should never get a response to another TIME_EVENT_CMD here */
 546	if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
 547		return false;
 548
 549	te_data->uid = le32_to_cpu(resp->unique_id);
 550	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
 551		     te_data->uid);
 552	return true;
 553}
 554
 555static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
 556				       struct ieee80211_vif *vif,
 557				       struct iwl_mvm_time_event_data *te_data,
 558				       struct iwl_time_event_cmd *te_cmd)
 559{
 560	static const u16 time_event_response[] = { TIME_EVENT_CMD };
 561	struct iwl_notification_wait wait_time_event;
 562	int ret;
 563
 564	lockdep_assert_held(&mvm->mutex);
 565
 566	IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
 567		     le32_to_cpu(te_cmd->duration));
 568
 569	spin_lock_bh(&mvm->time_event_lock);
 570	if (WARN_ON(te_data->id != TE_MAX)) {
 571		spin_unlock_bh(&mvm->time_event_lock);
 572		return -EIO;
 573	}
 574	te_data->vif = vif;
 575	te_data->duration = le32_to_cpu(te_cmd->duration);
 576	te_data->id = le32_to_cpu(te_cmd->id);
 577	list_add_tail(&te_data->list, &mvm->time_event_list);
 578	spin_unlock_bh(&mvm->time_event_lock);
 579
 580	/*
 581	 * Use a notification wait, which really just processes the
 582	 * command response and doesn't wait for anything, in order
 583	 * to be able to process the response and get the UID inside
 584	 * the RX path. Using CMD_WANT_SKB doesn't work because it
 585	 * stores the buffer and then wakes up this thread, by which
 586	 * time another notification (that the time event started)
 587	 * might already be processed unsuccessfully.
 588	 */
 589	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
 590				   time_event_response,
 591				   ARRAY_SIZE(time_event_response),
 592				   iwl_mvm_time_event_response, te_data);
 593
 594	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
 595					    sizeof(*te_cmd), te_cmd);
 596	if (ret) {
 597		IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
 598		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
 599		goto out_clear_te;
 600	}
 601
 602	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
 603	ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
 604	/* should never fail */
 605	WARN_ON_ONCE(ret);
 606
 607	if (ret) {
 608 out_clear_te:
 609		spin_lock_bh(&mvm->time_event_lock);
 610		iwl_mvm_te_clear_data(mvm, te_data);
 611		spin_unlock_bh(&mvm->time_event_lock);
 612	}
 613	return ret;
 614}
 615
 616void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 617			     struct ieee80211_vif *vif,
 618			     u32 duration, u32 min_duration,
 619			     u32 max_delay, bool wait_for_notif)
 620{
 621	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 622	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
 623	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
 624	struct iwl_notification_wait wait_te_notif;
 625	struct iwl_time_event_cmd time_cmd = {};
 626
 627	lockdep_assert_held(&mvm->mutex);
 628
 629	if (te_data->running &&
 630	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
 631		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
 632			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
 633		return;
 634	}
 635
 636	if (te_data->running) {
 637		IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
 638			     te_data->uid,
 639			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
 640		/*
 641		 * we don't have enough time
 642		 * cancel the current TE and issue a new one
 643		 * Of course it would be better to remove the old one only
 644		 * when the new one is added, but we don't care if we are off
 645		 * channel for a bit. All we need to do, is not to return
 646		 * before we actually begin to be on the channel.
 647		 */
 648		iwl_mvm_stop_session_protection(mvm, vif);
 649	}
 650
 651	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
 652	time_cmd.id_and_color =
 653		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 654	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
 655
 656	time_cmd.apply_time = cpu_to_le32(0);
 657
 658	time_cmd.max_frags = TE_V2_FRAG_NONE;
 659	time_cmd.max_delay = cpu_to_le32(max_delay);
 660	/* TODO: why do we need to interval = bi if it is not periodic? */
 661	time_cmd.interval = cpu_to_le32(1);
 662	time_cmd.duration = cpu_to_le32(duration);
 663	time_cmd.repeat = 1;
 664	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
 665				      TE_V2_NOTIF_HOST_EVENT_END |
 666				      TE_V2_START_IMMEDIATELY);
 667
 668	if (!wait_for_notif) {
 669		iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
 670		return;
 671	}
 672
 673	/*
 674	 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
 675	 * right after we send the time event
 676	 */
 677	iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
 678				   te_notif_response,
 679				   ARRAY_SIZE(te_notif_response),
 680				   iwl_mvm_te_notif, te_data);
 681
 682	/* If TE was sent OK - wait for the notification that started */
 683	if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
 684		IWL_ERR(mvm, "Failed to add TE to protect session\n");
 685		iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
 686	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
 687					 TU_TO_JIFFIES(max_delay))) {
 688		IWL_ERR(mvm, "Failed to protect session until TE\n");
 689	}
 690}
 691
 692/* Determine whether mac or link id should be used, and validate the link id */
 693static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
 694				       struct ieee80211_vif *vif,
 695				       u32 link_id)
 696{
 697	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 698	int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
 699					WIDE_ID(MAC_CONF_GROUP,
 700						SESSION_PROTECTION_CMD), 1);
 701
 702	if (ver < 2)
 703		return mvmvif->id;
 704
 705	if (WARN(link_id < 0 || !mvmvif->link[link_id],
 706		 "Invalid link ID for session protection: %u\n", link_id))
 707		return -EINVAL;
 708
 709	if (WARN(ieee80211_vif_is_mld(vif) &&
 710		 !(vif->active_links & BIT(link_id)),
 711		 "Session Protection on an inactive link: %u\n", link_id))
 712		return -EINVAL;
 713
 714	return mvmvif->link[link_id]->fw_link_id;
 715}
 716
 717static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
 718					      struct ieee80211_vif *vif,
 719					      u32 id, u32 link_id)
 720{
 721	int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
 722	struct iwl_mvm_session_prot_cmd cmd = {
 723		.id_and_color = cpu_to_le32(mac_link_id),
 724		.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
 725		.conf_id = cpu_to_le32(id),
 726	};
 727	int ret;
 728
 729	if (mac_link_id < 0)
 730		return;
 731
 732	ret = iwl_mvm_send_cmd_pdu(mvm,
 733				   WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
 734				   0, sizeof(cmd), &cmd);
 735	if (ret)
 736		IWL_ERR(mvm,
 737			"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
 738}
 739
 740static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
 741					struct iwl_mvm_time_event_data *te_data,
 742					u32 *uid)
 743{
 744	u32 id;
 745	struct ieee80211_vif *vif = te_data->vif;
 746	struct iwl_mvm_vif *mvmvif;
 747	enum nl80211_iftype iftype;
 748	unsigned int link_id;
 749
 750	if (!vif)
 751		return false;
 752
 753	mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
 754	iftype = te_data->vif->type;
 755
 756	/*
 757	 * It is possible that by the time we got to this point the time
 758	 * event was already removed.
 759	 */
 760	spin_lock_bh(&mvm->time_event_lock);
 761
 762	/* Save time event uid before clearing its data */
 763	*uid = te_data->uid;
 764	id = te_data->id;
 765	link_id = te_data->link_id;
 766
 767	/*
 768	 * The clear_data function handles time events that were already removed
 769	 */
 770	iwl_mvm_te_clear_data(mvm, te_data);
 771	spin_unlock_bh(&mvm->time_event_lock);
 772
 773	/* When session protection is used, the te_data->id field
 774	 * is reused to save session protection's configuration.
 775	 * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
 776	 * to HOT_SPOT_CMD.
 777	 */
 778	if (fw_has_capa(&mvm->fw->ucode_capa,
 779			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
 780	    id != HOT_SPOT_CMD) {
 781		if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
 782			/* Session protection is still ongoing. Cancel it */
 783			iwl_mvm_cancel_session_protection(mvm, vif, id,
 784							  link_id);
 785			if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
 786				iwl_mvm_p2p_roc_finished(mvm);
 787			}
 788		}
 789		return false;
 790	} else {
 791		/* It is possible that by the time we try to remove it, the
 792		 * time event has already ended and removed. In such a case
 793		 * there is no need to send a removal command.
 794		 */
 795		if (id == TE_MAX) {
 796			IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
 797			return false;
 798		}
 799	}
 800
 801	return true;
 802}
 803
 804/*
 805 * Explicit request to remove a aux roc time event. The removal of a time
 806 * event needs to be synchronized with the flow of a time event's end
 807 * notification, which also removes the time event from the op mode
 808 * data structures.
 809 */
 810static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
 811				      struct iwl_mvm_vif *mvmvif,
 812				      struct iwl_mvm_time_event_data *te_data)
 813{
 814	struct iwl_hs20_roc_req aux_cmd = {};
 815	u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
 816
 817	u32 uid;
 818	int ret;
 819
 820	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
 821		return;
 822
 823	aux_cmd.event_unique_id = cpu_to_le32(uid);
 824	aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
 825	aux_cmd.id_and_color =
 826		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 827	IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
 828		     le32_to_cpu(aux_cmd.event_unique_id));
 829	ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
 830				   len, &aux_cmd);
 831
 832	if (WARN_ON(ret))
 833		return;
 834}
 835
 836/*
 837 * Explicit request to remove a time event. The removal of a time event needs to
 838 * be synchronized with the flow of a time event's end notification, which also
 839 * removes the time event from the op mode data structures.
 840 */
 841void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
 842			       struct iwl_mvm_vif *mvmvif,
 843			       struct iwl_mvm_time_event_data *te_data)
 844{
 845	struct iwl_time_event_cmd time_cmd = {};
 846	u32 uid;
 847	int ret;
 848
 849	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
 850		return;
 851
 852	/* When we remove a TE, the UID is to be set in the id field */
 853	time_cmd.id = cpu_to_le32(uid);
 854	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
 855	time_cmd.id_and_color =
 856		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 857
 858	IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
 859	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
 860				   sizeof(time_cmd), &time_cmd);
 861	if (ret)
 862		IWL_ERR(mvm, "Couldn't remove the time event\n");
 863}
 864
 865void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
 866				     struct ieee80211_vif *vif)
 867{
 868	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 869	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
 870	u32 id;
 871
 872	lockdep_assert_held(&mvm->mutex);
 873
 874	spin_lock_bh(&mvm->time_event_lock);
 875	id = te_data->id;
 876	spin_unlock_bh(&mvm->time_event_lock);
 877
 878	if (fw_has_capa(&mvm->fw->ucode_capa,
 879			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
 880		if (id != SESSION_PROTECT_CONF_ASSOC) {
 881			IWL_DEBUG_TE(mvm,
 882				     "don't remove session protection id=%u\n",
 883				     id);
 884			return;
 885		}
 886	} else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
 887		IWL_DEBUG_TE(mvm,
 888			     "don't remove TE with id=%u (not session protection)\n",
 889			     id);
 890		return;
 891	}
 892
 893	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
 894}
 895
 896void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
 897				      struct iwl_rx_cmd_buffer *rxb)
 898{
 899	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 900	struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
 901	unsigned int ver =
 902		iwl_fw_lookup_cmd_ver(mvm->fw,
 903				      WIDE_ID(MAC_CONF_GROUP,
 904					      SESSION_PROTECTION_CMD), 2);
 905	int id = le32_to_cpu(notif->mac_link_id);
 906	struct ieee80211_vif *vif;
 907	struct iwl_mvm_vif *mvmvif;
 908	unsigned int notif_link_id;
 909
 910	rcu_read_lock();
 911
 912	if (ver <= 2) {
 913		vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
 914	} else {
 915		struct ieee80211_bss_conf *link_conf =
 916			iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true);
 917
 918		if (!link_conf)
 919			goto out_unlock;
 920
 921		notif_link_id = link_conf->link_id;
 922		vif = link_conf->vif;
 923	}
 924
 925	if (!vif)
 926		goto out_unlock;
 927
 928	mvmvif = iwl_mvm_vif_from_mac80211(vif);
 929
 930	if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
 931		 mvmvif->time_event_data.link_id != notif_link_id,
 932		 "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
 933		 notif_link_id, mvmvif->time_event_data.link_id))
 934		goto out_unlock;
 935
 936	/* The vif is not a P2P_DEVICE, maintain its time_event_data */
 937	if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
 938		struct iwl_mvm_time_event_data *te_data =
 939			&mvmvif->time_event_data;
 940
 941		if (!le32_to_cpu(notif->status)) {
 942			iwl_mvm_te_check_disconnect(mvm, vif,
 943						    "Session protection failure");
 944			spin_lock_bh(&mvm->time_event_lock);
 945			iwl_mvm_te_clear_data(mvm, te_data);
 946			spin_unlock_bh(&mvm->time_event_lock);
 947		}
 948
 949		if (le32_to_cpu(notif->start)) {
 950			spin_lock_bh(&mvm->time_event_lock);
 951			te_data->running = le32_to_cpu(notif->start);
 952			te_data->end_jiffies =
 953				TU_TO_EXP_TIME(te_data->duration);
 954			spin_unlock_bh(&mvm->time_event_lock);
 955		} else {
 956			/*
 957			 * By now, we should have finished association
 958			 * and know the dtim period.
 959			 */
 960			iwl_mvm_te_check_disconnect(mvm, vif,
 961						    !vif->cfg.assoc ?
 962						    "Not associated and the session protection is over already..." :
 963						    "No beacon heard and the session protection is over already...");
 964			spin_lock_bh(&mvm->time_event_lock);
 965			iwl_mvm_te_clear_data(mvm, te_data);
 966			spin_unlock_bh(&mvm->time_event_lock);
 967		}
 968
 969		goto out_unlock;
 970	}
 971
 972	if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
 973		/* End TE, notify mac80211 */
 974		mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
 975		mvmvif->time_event_data.link_id = -1;
 976		iwl_mvm_p2p_roc_finished(mvm);
 977		ieee80211_remain_on_channel_expired(mvm->hw);
 978	} else if (le32_to_cpu(notif->start)) {
 979		if (WARN_ON(mvmvif->time_event_data.id !=
 980				le32_to_cpu(notif->conf_id)))
 981			goto out_unlock;
 982		set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
 983		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
 984	}
 985
 986 out_unlock:
 987	rcu_read_unlock();
 988}
 989
 990static int
 991iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
 992					 struct ieee80211_vif *vif,
 993					 int duration,
 994					 enum ieee80211_roc_type type)
 995{
 996	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 997	struct iwl_mvm_session_prot_cmd cmd = {
 998		.id_and_color =
 999			cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
1000		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1001		.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1002	};
1003
1004	lockdep_assert_held(&mvm->mutex);
1005
1006	/* The time_event_data.id field is reused to save session
1007	 * protection's configuration.
1008	 */
1009
1010	mvmvif->time_event_data.link_id = 0;
1011
1012	switch (type) {
1013	case IEEE80211_ROC_TYPE_NORMAL:
1014		mvmvif->time_event_data.id =
1015			SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
1016		break;
1017	case IEEE80211_ROC_TYPE_MGMT_TX:
1018		mvmvif->time_event_data.id =
1019			SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
1020		break;
1021	default:
1022		WARN_ONCE(1, "Got an invalid ROC type\n");
1023		return -EINVAL;
1024	}
1025
1026	cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
1027	return iwl_mvm_send_cmd_pdu(mvm,
1028				    WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1029				    0, sizeof(cmd), &cmd);
1030}
1031
1032int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1033			  int duration, enum ieee80211_roc_type type)
1034{
1035	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1036	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1037	struct iwl_time_event_cmd time_cmd = {};
1038
1039	lockdep_assert_held(&mvm->mutex);
1040	if (te_data->running) {
1041		IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
1042		return -EBUSY;
1043	}
1044
1045	if (fw_has_capa(&mvm->fw->ucode_capa,
1046			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
1047		return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
1048								duration,
1049								type);
1050
1051	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1052	time_cmd.id_and_color =
1053		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1054
1055	switch (type) {
1056	case IEEE80211_ROC_TYPE_NORMAL:
1057		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
1058		break;
1059	case IEEE80211_ROC_TYPE_MGMT_TX:
1060		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
1061		break;
1062	default:
1063		WARN_ONCE(1, "Got an invalid ROC type\n");
1064		return -EINVAL;
1065	}
1066
1067	time_cmd.apply_time = cpu_to_le32(0);
1068	time_cmd.interval = cpu_to_le32(1);
1069
1070	/*
1071	 * The P2P Device TEs can have lower priority than other events
1072	 * that are being scheduled by the driver/fw, and thus it might not be
1073	 * scheduled. To improve the chances of it being scheduled, allow them
1074	 * to be fragmented, and in addition allow them to be delayed.
1075	 */
1076	time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
1077	time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
1078	time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
1079	time_cmd.repeat = 1;
1080	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1081				      TE_V2_NOTIF_HOST_EVENT_END |
1082				      TE_V2_START_IMMEDIATELY);
1083
1084	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1085}
1086
1087static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
1088{
1089	struct iwl_mvm_time_event_data *te_data;
1090
1091	lockdep_assert_held(&mvm->mutex);
1092
1093	spin_lock_bh(&mvm->time_event_lock);
1094
1095	/*
1096	 * Iterate over the list of time events and find the time event that is
1097	 * associated with a P2P_DEVICE interface.
1098	 * This assumes that a P2P_DEVICE interface can have only a single time
1099	 * event at any given time and this time event coresponds to a ROC
1100	 * request
1101	 */
1102	list_for_each_entry(te_data, &mvm->time_event_list, list) {
1103		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
1104			goto out;
1105	}
1106
1107	/* There can only be at most one AUX ROC time event, we just use the
1108	 * list to simplify/unify code. Remove it if it exists.
1109	 */
1110	te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
1111					   struct iwl_mvm_time_event_data,
1112					   list);
1113out:
1114	spin_unlock_bh(&mvm->time_event_lock);
1115	return te_data;
1116}
1117
1118void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
1119{
1120	struct iwl_mvm_time_event_data *te_data;
1121	u32 uid;
1122
1123	te_data = iwl_mvm_get_roc_te(mvm);
1124	if (te_data)
1125		__iwl_mvm_remove_time_event(mvm, te_data, &uid);
1126}
1127
1128static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
1129{
1130	int ret;
1131	struct iwl_roc_req roc_cmd = {
1132		.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
1133		.activity = cpu_to_le32(activity),
1134	};
1135
1136	lockdep_assert_held(&mvm->mutex);
1137	ret = iwl_mvm_send_cmd_pdu(mvm,
1138				   WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
1139				   0, sizeof(roc_cmd), &roc_cmd);
1140	WARN_ON(ret);
1141}
1142
1143static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm,
1144				       struct iwl_mvm_vif *mvmvif)
1145{
1146	u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
1147	u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
1148					  IWL_FW_CMD_VER_UNKNOWN);
1149
1150	if (fw_ver == IWL_FW_CMD_VER_UNKNOWN)
1151		iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
1152					  &mvmvif->hs_time_event_data);
1153	else if (fw_ver == 3)
1154		iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT);
1155	else
1156		IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
1157}
1158
1159void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1160{
1161	struct iwl_mvm_vif *mvmvif;
1162	struct iwl_mvm_time_event_data *te_data;
1163
1164	if (fw_has_capa(&mvm->fw->ucode_capa,
1165			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
1166		mvmvif = iwl_mvm_vif_from_mac80211(vif);
1167
1168		if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1169			iwl_mvm_cancel_session_protection(mvm, vif,
1170							  mvmvif->time_event_data.id,
1171							  mvmvif->time_event_data.link_id);
1172			iwl_mvm_p2p_roc_finished(mvm);
1173		} else {
1174			iwl_mvm_roc_station_remove(mvm, mvmvif);
1175			iwl_mvm_roc_finished(mvm);
1176		}
1177
1178		return;
1179	}
1180
1181	te_data = iwl_mvm_get_roc_te(mvm);
1182	if (!te_data) {
1183		IWL_WARN(mvm, "No remain on channel event\n");
1184		return;
1185	}
1186
1187	mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
1188
1189	if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1190		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1191		iwl_mvm_p2p_roc_finished(mvm);
1192	} else {
1193		iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
1194		iwl_mvm_roc_finished(mvm);
1195	}
1196}
1197
1198void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
1199			       struct ieee80211_vif *vif)
1200{
1201	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1202	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1203	u32 id;
1204
1205	lockdep_assert_held(&mvm->mutex);
1206
1207	spin_lock_bh(&mvm->time_event_lock);
1208	id = te_data->id;
1209	spin_unlock_bh(&mvm->time_event_lock);
1210
1211	if (id != TE_CHANNEL_SWITCH_PERIOD)
1212		return;
1213
1214	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1215}
1216
1217int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
1218				struct ieee80211_vif *vif,
1219				u32 duration, u32 apply_time)
1220{
1221	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1222	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1223	struct iwl_time_event_cmd time_cmd = {};
1224
1225	lockdep_assert_held(&mvm->mutex);
1226
1227	if (te_data->running) {
1228		u32 id;
1229
1230		spin_lock_bh(&mvm->time_event_lock);
1231		id = te_data->id;
1232		spin_unlock_bh(&mvm->time_event_lock);
1233
1234		if (id == TE_CHANNEL_SWITCH_PERIOD) {
1235			IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
1236			return -EBUSY;
1237		}
1238
1239		/*
1240		 * Remove the session protection time event to allow the
1241		 * channel switch. If we got here, we just heard a beacon so
1242		 * the session protection is not needed anymore anyway.
1243		 */
1244		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1245	}
1246
1247	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1248	time_cmd.id_and_color =
1249		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1250	time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
1251	time_cmd.apply_time = cpu_to_le32(apply_time);
1252	time_cmd.max_frags = TE_V2_FRAG_NONE;
1253	time_cmd.duration = cpu_to_le32(duration);
1254	time_cmd.repeat = 1;
1255	time_cmd.interval = cpu_to_le32(1);
1256	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1257				      TE_V2_ABSENCE);
1258	if (!apply_time)
1259		time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
1260
1261	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1262}
1263
1264static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
1265				       struct iwl_rx_packet *pkt, void *data)
1266{
1267	struct iwl_mvm *mvm =
1268		container_of(notif_wait, struct iwl_mvm, notif_wait);
1269	struct iwl_mvm_session_prot_notif *resp;
1270	int resp_len = iwl_rx_packet_payload_len(pkt);
1271
1272	if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
1273		    pkt->hdr.group_id != MAC_CONF_GROUP))
1274		return true;
1275
1276	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
1277		IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
1278		return true;
1279	}
1280
1281	resp = (void *)pkt->data;
1282
1283	if (!resp->status)
1284		IWL_ERR(mvm,
1285			"TIME_EVENT_NOTIFICATION received but not executed\n");
1286
1287	return true;
1288}
1289
1290void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
1291					 struct ieee80211_vif *vif,
1292					 u32 duration, u32 min_duration,
1293					 bool wait_for_notif,
1294					 unsigned int link_id)
1295{
1296	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1297	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1298	const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
1299	struct iwl_notification_wait wait_notif;
1300	int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
1301	struct iwl_mvm_session_prot_cmd cmd = {
1302		.id_and_color = cpu_to_le32(mac_link_id),
1303		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1304		.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
1305		.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1306	};
1307
1308	if (mac_link_id < 0)
1309		return;
1310
1311	lockdep_assert_held(&mvm->mutex);
1312
1313	spin_lock_bh(&mvm->time_event_lock);
1314	if (te_data->running && te_data->link_id == link_id &&
1315	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
1316		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
1317			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
1318		spin_unlock_bh(&mvm->time_event_lock);
1319
1320		return;
1321	}
1322
1323	iwl_mvm_te_clear_data(mvm, te_data);
1324	/*
1325	 * The time_event_data.id field is reused to save session
1326	 * protection's configuration.
1327	 */
1328	te_data->id = le32_to_cpu(cmd.conf_id);
1329	te_data->duration = le32_to_cpu(cmd.duration_tu);
1330	te_data->vif = vif;
1331	te_data->link_id = link_id;
1332	spin_unlock_bh(&mvm->time_event_lock);
1333
1334	IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
1335		     le32_to_cpu(cmd.duration_tu));
1336
1337	if (!wait_for_notif) {
1338		if (iwl_mvm_send_cmd_pdu(mvm,
1339					 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1340					 0, sizeof(cmd), &cmd)) {
1341			goto send_cmd_err;
1342		}
1343
1344		return;
1345	}
1346
1347	iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
1348				   notif, ARRAY_SIZE(notif),
1349				   iwl_mvm_session_prot_notif, NULL);
1350
1351	if (iwl_mvm_send_cmd_pdu(mvm,
1352				 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1353				 0, sizeof(cmd), &cmd)) {
1354		iwl_remove_notification(&mvm->notif_wait, &wait_notif);
1355		goto send_cmd_err;
1356	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
1357					 TU_TO_JIFFIES(100))) {
1358		IWL_ERR(mvm,
1359			"Failed to protect session until session protection\n");
1360	}
1361	return;
1362
1363send_cmd_err:
1364	IWL_ERR(mvm,
1365		"Couldn't send the SESSION_PROTECTION_CMD\n");
1366	spin_lock_bh(&mvm->time_event_lock);
1367	iwl_mvm_te_clear_data(mvm, te_data);
1368	spin_unlock_bh(&mvm->time_event_lock);
1369}
v4.17
  1/******************************************************************************
  2 *
  3 * This file is provided under a dual BSD/GPLv2 license.  When using or
  4 * redistributing this file, you may do so under either license.
  5 *
  6 * GPL LICENSE SUMMARY
  7 *
  8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 10 * Copyright(c) 2017 Intel Deutschland GmbH
 11 * Copyright(c) 2018 Intel Corporation
 12 *
 13 * This program is free software; you can redistribute it and/or modify
 14 * it under the terms of version 2 of the GNU General Public License as
 15 * published by the Free Software Foundation.
 16 *
 17 * This program is distributed in the hope that it will be useful, but
 18 * WITHOUT ANY WARRANTY; without even the implied warranty of
 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 20 * General Public License for more details.
 21 *
 22 * The full GNU General Public License is included in this distribution
 23 * in the file called COPYING.
 24 *
 25 * Contact Information:
 26 *  Intel Linux Wireless <linuxwifi@intel.com>
 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 28 *
 29 * BSD LICENSE
 30 *
 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 33 * Copyright(c) 2017 Intel Deutschland GmbH
 34 * Copyright(c) 2018 Intel Corporation
 35 * All rights reserved.
 36 *
 37 * Redistribution and use in source and binary forms, with or without
 38 * modification, are permitted provided that the following conditions
 39 * are met:
 40 *
 41 *  * Redistributions of source code must retain the above copyright
 42 *    notice, this list of conditions and the following disclaimer.
 43 *  * Redistributions in binary form must reproduce the above copyright
 44 *    notice, this list of conditions and the following disclaimer in
 45 *    the documentation and/or other materials provided with the
 46 *    distribution.
 47 *  * Neither the name Intel Corporation nor the names of its
 48 *    contributors may be used to endorse or promote products derived
 49 *    from this software without specific prior written permission.
 50 *
 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 62 *
 63 *****************************************************************************/
 64
 65#include <linux/jiffies.h>
 66#include <net/mac80211.h>
 67
 68#include "fw/notif-wait.h"
 69#include "iwl-trans.h"
 70#include "fw-api.h"
 71#include "time-event.h"
 72#include "mvm.h"
 73#include "iwl-io.h"
 74#include "iwl-prph.h"
 75
 76/*
 77 * For the high priority TE use a time event type that has similar priority to
 78 * the FW's action scan priority.
 79 */
 80#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
 81#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
 82
 83void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
 84			   struct iwl_mvm_time_event_data *te_data)
 85{
 86	lockdep_assert_held(&mvm->time_event_lock);
 87
 88	if (!te_data->vif)
 89		return;
 90
 91	list_del(&te_data->list);
 
 
 
 
 
 
 
 92	te_data->running = false;
 93	te_data->uid = 0;
 94	te_data->id = TE_MAX;
 95	te_data->vif = NULL;
 
 96}
 97
 98void iwl_mvm_roc_done_wk(struct work_struct *wk)
 99{
100	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
101
102	/*
103	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
104	 * This will cause the TX path to drop offchannel transmissions.
105	 * That would also be done by mac80211, but it is racy, in particular
106	 * in the case that the time event actually completed in the firmware
107	 * (which is handled in iwl_mvm_te_handle_notif).
108	 */
109	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
110		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
111	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
112		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
113
114	synchronize_net();
115
116	/*
117	 * Flush the offchannel queue -- this is called when the time
118	 * event finishes or is canceled, so that frames queued for it
119	 * won't get stuck on the queue and be transmitted in the next
120	 * time event.
121	 * We have to send the command asynchronously since this cannot
122	 * be under the mutex for locking reasons, but that's not an
123	 * issue as it will have to complete before the next command is
124	 * executed, and a new time event means a new command.
125	 */
126	iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
127
128	/* Do the same for the P2P device queue (STA) */
129	if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
130		struct iwl_mvm_vif *mvmvif;
131
132		/*
133		 * NB: access to this pointer would be racy, but the flush bit
134		 * can only be set when we had a P2P-Device VIF, and we have a
135		 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
136		 * not really racy.
137		 */
138
139		if (!WARN_ON(!mvm->p2p_device_vif)) {
140			mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
141			iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
142					  CMD_ASYNC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143		}
144	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145}
146
147static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
148{
149	/*
150	 * Of course, our status bit is just as racy as mac80211, so in
151	 * addition, fire off the work struct which will drop all frames
152	 * from the hardware queues that made it through the race. First
153	 * it will of course synchronize the TX path to make sure that
154	 * any *new* TX will be rejected.
155	 */
156	schedule_work(&mvm->roc_done_wk);
157}
158
159static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
160{
161	struct ieee80211_vif *csa_vif;
162
163	rcu_read_lock();
164
165	csa_vif = rcu_dereference(mvm->csa_vif);
166	if (!csa_vif || !csa_vif->csa_active)
167		goto out_unlock;
168
169	IWL_DEBUG_TE(mvm, "CSA NOA started\n");
170
171	/*
172	 * CSA NoA is started but we still have beacons to
173	 * transmit on the current channel.
174	 * So we just do nothing here and the switch
175	 * will be performed on the last TBTT.
176	 */
177	if (!ieee80211_csa_is_complete(csa_vif)) {
178		IWL_WARN(mvm, "CSA NOA started too early\n");
179		goto out_unlock;
180	}
181
182	ieee80211_csa_finish(csa_vif);
183
184	rcu_read_unlock();
185
186	RCU_INIT_POINTER(mvm->csa_vif, NULL);
187
188	return;
189
190out_unlock:
191	rcu_read_unlock();
192}
193
194static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
195					struct ieee80211_vif *vif,
196					const char *errmsg)
197{
198	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
199
200	if (vif->type != NL80211_IFTYPE_STATION)
201		return false;
202
203	if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
204	    vif->bss_conf.dtim_period)
205		return false;
206	if (errmsg)
207		IWL_ERR(mvm, "%s\n", errmsg);
208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209	iwl_mvm_connection_loss(mvm, vif, errmsg);
210	return true;
211}
212
213static void
214iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
215			     struct iwl_mvm_time_event_data *te_data,
216			     struct iwl_time_event_notif *notif)
217{
218	struct ieee80211_vif *vif = te_data->vif;
219	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
220
221	if (!notif->status)
222		IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
223
224	switch (te_data->vif->type) {
225	case NL80211_IFTYPE_AP:
226		if (!notif->status)
227			mvmvif->csa_failed = true;
228		iwl_mvm_csa_noa_start(mvm);
229		break;
230	case NL80211_IFTYPE_STATION:
231		if (!notif->status) {
232			iwl_mvm_connection_loss(mvm, vif,
233						"CSA TE failed to start");
234			break;
235		}
236		iwl_mvm_csa_client_absent(mvm, te_data->vif);
237		ieee80211_chswitch_done(te_data->vif, true);
 
238		break;
239	default:
240		/* should never happen */
241		WARN_ON_ONCE(1);
242		break;
243	}
244
245	/* we don't need it anymore */
246	iwl_mvm_te_clear_data(mvm, te_data);
247}
248
249static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
250				     struct iwl_time_event_notif *notif,
251				     struct iwl_mvm_time_event_data *te_data)
252{
253	struct iwl_fw_dbg_trigger_tlv *trig;
254	struct iwl_fw_dbg_trigger_time_event *te_trig;
255	int i;
256
257	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
 
 
 
258		return;
259
260	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
261	te_trig = (void *)trig->data;
262
263	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
264					   ieee80211_vif_to_wdev(te_data->vif),
265					   trig))
266		return;
267
268	for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
269		u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
270		u32 trig_action_bitmap =
271			le32_to_cpu(te_trig->time_events[i].action_bitmap);
272		u32 trig_status_bitmap =
273			le32_to_cpu(te_trig->time_events[i].status_bitmap);
274
275		if (trig_te_id != te_data->id ||
276		    !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
277		    !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
278			continue;
279
280		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
281					"Time event %d Action 0x%x received status: %d",
282					te_data->id,
283					le32_to_cpu(notif->action),
284					le32_to_cpu(notif->status));
285		break;
286	}
287}
288
 
 
 
 
 
 
 
 
 
 
 
 
289/*
290 * Handles a FW notification for an event that is known to the driver.
291 *
292 * @mvm: the mvm component
293 * @te_data: the time event data
294 * @notif: the notification data corresponding the time event data.
295 */
296static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
297				    struct iwl_mvm_time_event_data *te_data,
298				    struct iwl_time_event_notif *notif)
299{
300	lockdep_assert_held(&mvm->time_event_lock);
301
302	IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
303		     le32_to_cpu(notif->unique_id),
304		     le32_to_cpu(notif->action));
305
306	iwl_mvm_te_check_trigger(mvm, notif, te_data);
307
308	/*
309	 * The FW sends the start/end time event notifications even for events
310	 * that it fails to schedule. This is indicated in the status field of
311	 * the notification. This happens in cases that the scheduler cannot
312	 * find a schedule that can handle the event (for example requesting a
313	 * P2P Device discoveribility, while there are other higher priority
314	 * events in the system).
315	 */
316	if (!le32_to_cpu(notif->status)) {
317		const char *msg;
318
319		if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
320			msg = "Time Event start notification failure";
321		else
322			msg = "Time Event end notification failure";
323
324		IWL_DEBUG_TE(mvm, "%s\n", msg);
325
326		if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
327			iwl_mvm_te_clear_data(mvm, te_data);
328			return;
329		}
330	}
331
332	if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
333		IWL_DEBUG_TE(mvm,
334			     "TE ended - current time %lu, estimated end %lu\n",
335			     jiffies, te_data->end_jiffies);
336
337		switch (te_data->vif->type) {
338		case NL80211_IFTYPE_P2P_DEVICE:
339			ieee80211_remain_on_channel_expired(mvm->hw);
340			iwl_mvm_roc_finished(mvm);
341			break;
342		case NL80211_IFTYPE_STATION:
343			/*
 
 
 
 
 
 
 
 
 
 
 
344			 * By now, we should have finished association
345			 * and know the dtim period.
346			 */
347			iwl_mvm_te_check_disconnect(mvm, te_data->vif,
 
 
348				"No beacon heard and the time event is over already...");
349			break;
350		default:
351			break;
352		}
353
354		iwl_mvm_te_clear_data(mvm, te_data);
355	} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
356		te_data->running = true;
357		te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
358
359		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
360			set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
361			iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
362			ieee80211_ready_on_channel(mvm->hw);
363		} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
364			iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
365		}
366	} else {
367		IWL_WARN(mvm, "Got TE with unknown action\n");
368	}
369}
370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371/*
372 * Handle A Aux ROC time event
373 */
374static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
375					   struct iwl_time_event_notif *notif)
376{
377	struct iwl_mvm_time_event_data *te_data, *tmp;
378	bool aux_roc_te = false;
379
380	list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
381		if (le32_to_cpu(notif->unique_id) == te_data->uid) {
382			aux_roc_te = true;
383			break;
384		}
385	}
386	if (!aux_roc_te) /* Not a Aux ROC time event */
387		return -EINVAL;
388
389	iwl_mvm_te_check_trigger(mvm, notif, te_data);
390
391	IWL_DEBUG_TE(mvm,
392		     "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
393		     le32_to_cpu(notif->unique_id),
394		     le32_to_cpu(notif->action), le32_to_cpu(notif->status));
395
396	if (!le32_to_cpu(notif->status) ||
397	    le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
398		/* End TE, notify mac80211 */
399		ieee80211_remain_on_channel_expired(mvm->hw);
400		iwl_mvm_roc_finished(mvm); /* flush aux queue */
401		list_del(&te_data->list); /* remove from list */
402		te_data->running = false;
403		te_data->vif = NULL;
404		te_data->uid = 0;
405		te_data->id = TE_MAX;
406	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
407		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
408		te_data->running = true;
409		iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
410		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
411	} else {
412		IWL_DEBUG_TE(mvm,
413			     "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
414			     le32_to_cpu(notif->action));
415		return -EINVAL;
416	}
417
418	return 0;
419}
420
421/*
422 * The Rx handler for time event notifications
423 */
424void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
425				 struct iwl_rx_cmd_buffer *rxb)
426{
427	struct iwl_rx_packet *pkt = rxb_addr(rxb);
428	struct iwl_time_event_notif *notif = (void *)pkt->data;
429	struct iwl_mvm_time_event_data *te_data, *tmp;
430
431	IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
432		     le32_to_cpu(notif->unique_id),
433		     le32_to_cpu(notif->action));
434
435	spin_lock_bh(&mvm->time_event_lock);
436	/* This time event is triggered for Aux ROC request */
437	if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
438		goto unlock;
439
440	list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
441		if (le32_to_cpu(notif->unique_id) == te_data->uid)
442			iwl_mvm_te_handle_notif(mvm, te_data, notif);
443	}
444unlock:
445	spin_unlock_bh(&mvm->time_event_lock);
446}
447
448static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
449			     struct iwl_rx_packet *pkt, void *data)
450{
451	struct iwl_mvm *mvm =
452		container_of(notif_wait, struct iwl_mvm, notif_wait);
453	struct iwl_mvm_time_event_data *te_data = data;
454	struct iwl_time_event_notif *resp;
455	int resp_len = iwl_rx_packet_payload_len(pkt);
456
457	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
458		return true;
459
460	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
461		IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
462		return true;
463	}
464
465	resp = (void *)pkt->data;
466
467	/* te_data->uid is already set in the TIME_EVENT_CMD response */
468	if (le32_to_cpu(resp->unique_id) != te_data->uid)
469		return false;
470
471	IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
472		     te_data->uid);
473	if (!resp->status)
474		IWL_ERR(mvm,
475			"TIME_EVENT_NOTIFICATION received but not executed\n");
476
477	return true;
478}
479
480static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
481					struct iwl_rx_packet *pkt, void *data)
482{
483	struct iwl_mvm *mvm =
484		container_of(notif_wait, struct iwl_mvm, notif_wait);
485	struct iwl_mvm_time_event_data *te_data = data;
486	struct iwl_time_event_resp *resp;
487	int resp_len = iwl_rx_packet_payload_len(pkt);
488
489	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
490		return true;
491
492	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
493		IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
494		return true;
495	}
496
497	resp = (void *)pkt->data;
498
499	/* we should never get a response to another TIME_EVENT_CMD here */
500	if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
501		return false;
502
503	te_data->uid = le32_to_cpu(resp->unique_id);
504	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
505		     te_data->uid);
506	return true;
507}
508
509static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
510				       struct ieee80211_vif *vif,
511				       struct iwl_mvm_time_event_data *te_data,
512				       struct iwl_time_event_cmd *te_cmd)
513{
514	static const u16 time_event_response[] = { TIME_EVENT_CMD };
515	struct iwl_notification_wait wait_time_event;
516	int ret;
517
518	lockdep_assert_held(&mvm->mutex);
519
520	IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
521		     le32_to_cpu(te_cmd->duration));
522
523	spin_lock_bh(&mvm->time_event_lock);
524	if (WARN_ON(te_data->id != TE_MAX)) {
525		spin_unlock_bh(&mvm->time_event_lock);
526		return -EIO;
527	}
528	te_data->vif = vif;
529	te_data->duration = le32_to_cpu(te_cmd->duration);
530	te_data->id = le32_to_cpu(te_cmd->id);
531	list_add_tail(&te_data->list, &mvm->time_event_list);
532	spin_unlock_bh(&mvm->time_event_lock);
533
534	/*
535	 * Use a notification wait, which really just processes the
536	 * command response and doesn't wait for anything, in order
537	 * to be able to process the response and get the UID inside
538	 * the RX path. Using CMD_WANT_SKB doesn't work because it
539	 * stores the buffer and then wakes up this thread, by which
540	 * time another notification (that the time event started)
541	 * might already be processed unsuccessfully.
542	 */
543	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
544				   time_event_response,
545				   ARRAY_SIZE(time_event_response),
546				   iwl_mvm_time_event_response, te_data);
547
548	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
549					    sizeof(*te_cmd), te_cmd);
550	if (ret) {
551		IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
552		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
553		goto out_clear_te;
554	}
555
556	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
557	ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
558	/* should never fail */
559	WARN_ON_ONCE(ret);
560
561	if (ret) {
562 out_clear_te:
563		spin_lock_bh(&mvm->time_event_lock);
564		iwl_mvm_te_clear_data(mvm, te_data);
565		spin_unlock_bh(&mvm->time_event_lock);
566	}
567	return ret;
568}
569
570void iwl_mvm_protect_session(struct iwl_mvm *mvm,
571			     struct ieee80211_vif *vif,
572			     u32 duration, u32 min_duration,
573			     u32 max_delay, bool wait_for_notif)
574{
575	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
576	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
577	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
578	struct iwl_notification_wait wait_te_notif;
579	struct iwl_time_event_cmd time_cmd = {};
580
581	lockdep_assert_held(&mvm->mutex);
582
583	if (te_data->running &&
584	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
585		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
586			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
587		return;
588	}
589
590	if (te_data->running) {
591		IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
592			     te_data->uid,
593			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
594		/*
595		 * we don't have enough time
596		 * cancel the current TE and issue a new one
597		 * Of course it would be better to remove the old one only
598		 * when the new one is added, but we don't care if we are off
599		 * channel for a bit. All we need to do, is not to return
600		 * before we actually begin to be on the channel.
601		 */
602		iwl_mvm_stop_session_protection(mvm, vif);
603	}
604
605	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
606	time_cmd.id_and_color =
607		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
608	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
609
610	time_cmd.apply_time = cpu_to_le32(0);
611
612	time_cmd.max_frags = TE_V2_FRAG_NONE;
613	time_cmd.max_delay = cpu_to_le32(max_delay);
614	/* TODO: why do we need to interval = bi if it is not periodic? */
615	time_cmd.interval = cpu_to_le32(1);
616	time_cmd.duration = cpu_to_le32(duration);
617	time_cmd.repeat = 1;
618	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
619				      TE_V2_NOTIF_HOST_EVENT_END |
620				      TE_V2_START_IMMEDIATELY);
621
622	if (!wait_for_notif) {
623		iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
624		return;
625	}
626
627	/*
628	 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
629	 * right after we send the time event
630	 */
631	iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
632				   te_notif_response,
633				   ARRAY_SIZE(te_notif_response),
634				   iwl_mvm_te_notif, te_data);
635
636	/* If TE was sent OK - wait for the notification that started */
637	if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
638		IWL_ERR(mvm, "Failed to add TE to protect session\n");
639		iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
640	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
641					 TU_TO_JIFFIES(max_delay))) {
642		IWL_ERR(mvm, "Failed to protect session until TE\n");
643	}
644}
645
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
647					struct iwl_mvm_time_event_data *te_data,
648					u32 *uid)
649{
650	u32 id;
 
 
 
 
 
 
 
 
 
 
651
652	/*
653	 * It is possible that by the time we got to this point the time
654	 * event was already removed.
655	 */
656	spin_lock_bh(&mvm->time_event_lock);
657
658	/* Save time event uid before clearing its data */
659	*uid = te_data->uid;
660	id = te_data->id;
 
661
662	/*
663	 * The clear_data function handles time events that were already removed
664	 */
665	iwl_mvm_te_clear_data(mvm, te_data);
666	spin_unlock_bh(&mvm->time_event_lock);
667
668	/*
669	 * It is possible that by the time we try to remove it, the time event
670	 * has already ended and removed. In such a case there is no need to
671	 * send a removal command.
672	 */
673	if (id == TE_MAX) {
674		IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
 
 
 
 
 
 
 
 
 
675		return false;
 
 
 
 
 
 
 
 
 
676	}
677
678	return true;
679}
680
681/*
682 * Explicit request to remove a aux roc time event. The removal of a time
683 * event needs to be synchronized with the flow of a time event's end
684 * notification, which also removes the time event from the op mode
685 * data structures.
686 */
687static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
688				      struct iwl_mvm_vif *mvmvif,
689				      struct iwl_mvm_time_event_data *te_data)
690{
691	struct iwl_hs20_roc_req aux_cmd = {};
 
 
692	u32 uid;
693	int ret;
694
695	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
696		return;
697
698	aux_cmd.event_unique_id = cpu_to_le32(uid);
699	aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
700	aux_cmd.id_and_color =
701		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
702	IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
703		     le32_to_cpu(aux_cmd.event_unique_id));
704	ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
705				   sizeof(aux_cmd), &aux_cmd);
706
707	if (WARN_ON(ret))
708		return;
709}
710
711/*
712 * Explicit request to remove a time event. The removal of a time event needs to
713 * be synchronized with the flow of a time event's end notification, which also
714 * removes the time event from the op mode data structures.
715 */
716void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
717			       struct iwl_mvm_vif *mvmvif,
718			       struct iwl_mvm_time_event_data *te_data)
719{
720	struct iwl_time_event_cmd time_cmd = {};
721	u32 uid;
722	int ret;
723
724	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
725		return;
726
727	/* When we remove a TE, the UID is to be set in the id field */
728	time_cmd.id = cpu_to_le32(uid);
729	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
730	time_cmd.id_and_color =
731		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
732
733	IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
734	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
735				   sizeof(time_cmd), &time_cmd);
736	if (WARN_ON(ret))
737		return;
738}
739
740void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
741				     struct ieee80211_vif *vif)
742{
743	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
744	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
745	u32 id;
746
747	lockdep_assert_held(&mvm->mutex);
748
749	spin_lock_bh(&mvm->time_event_lock);
750	id = te_data->id;
751	spin_unlock_bh(&mvm->time_event_lock);
752
753	if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
 
 
 
 
 
 
 
 
754		IWL_DEBUG_TE(mvm,
755			     "don't remove TE with id=%u (not session protection)\n",
756			     id);
757		return;
758	}
759
760	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
761}
762
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
763int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
764			  int duration, enum ieee80211_roc_type type)
765{
766	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
767	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
768	struct iwl_time_event_cmd time_cmd = {};
769
770	lockdep_assert_held(&mvm->mutex);
771	if (te_data->running) {
772		IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
773		return -EBUSY;
774	}
775
 
 
 
 
 
 
776	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
777	time_cmd.id_and_color =
778		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
779
780	switch (type) {
781	case IEEE80211_ROC_TYPE_NORMAL:
782		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
783		break;
784	case IEEE80211_ROC_TYPE_MGMT_TX:
785		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
786		break;
787	default:
788		WARN_ONCE(1, "Got an invalid ROC type\n");
789		return -EINVAL;
790	}
791
792	time_cmd.apply_time = cpu_to_le32(0);
793	time_cmd.interval = cpu_to_le32(1);
794
795	/*
796	 * The P2P Device TEs can have lower priority than other events
797	 * that are being scheduled by the driver/fw, and thus it might not be
798	 * scheduled. To improve the chances of it being scheduled, allow them
799	 * to be fragmented, and in addition allow them to be delayed.
800	 */
801	time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
802	time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
803	time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
804	time_cmd.repeat = 1;
805	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
806				      TE_V2_NOTIF_HOST_EVENT_END |
807				      TE_V2_START_IMMEDIATELY);
808
809	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
810}
811
812static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
813{
814	struct iwl_mvm_time_event_data *te_data;
815
816	lockdep_assert_held(&mvm->mutex);
817
818	spin_lock_bh(&mvm->time_event_lock);
819
820	/*
821	 * Iterate over the list of time events and find the time event that is
822	 * associated with a P2P_DEVICE interface.
823	 * This assumes that a P2P_DEVICE interface can have only a single time
824	 * event at any given time and this time event coresponds to a ROC
825	 * request
826	 */
827	list_for_each_entry(te_data, &mvm->time_event_list, list) {
828		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
829			goto out;
830	}
831
832	/* There can only be at most one AUX ROC time event, we just use the
833	 * list to simplify/unify code. Remove it if it exists.
834	 */
835	te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
836					   struct iwl_mvm_time_event_data,
837					   list);
838out:
839	spin_unlock_bh(&mvm->time_event_lock);
840	return te_data;
841}
842
843void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
844{
845	struct iwl_mvm_time_event_data *te_data;
846	u32 uid;
847
848	te_data = iwl_mvm_get_roc_te(mvm);
849	if (te_data)
850		__iwl_mvm_remove_time_event(mvm, te_data, &uid);
851}
852
853void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854{
855	struct iwl_mvm_vif *mvmvif;
856	struct iwl_mvm_time_event_data *te_data;
857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
858	te_data = iwl_mvm_get_roc_te(mvm);
859	if (!te_data) {
860		IWL_WARN(mvm, "No remain on channel event\n");
861		return;
862	}
863
864	mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
865
866	if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
867		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
868		set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
869	} else {
870		iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
 
871	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
872
873	iwl_mvm_roc_finished(mvm);
874}
875
876int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
877				struct ieee80211_vif *vif,
878				u32 duration, u32 apply_time)
879{
880	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
881	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
882	struct iwl_time_event_cmd time_cmd = {};
883
884	lockdep_assert_held(&mvm->mutex);
885
886	if (te_data->running) {
887		u32 id;
888
889		spin_lock_bh(&mvm->time_event_lock);
890		id = te_data->id;
891		spin_unlock_bh(&mvm->time_event_lock);
892
893		if (id == TE_CHANNEL_SWITCH_PERIOD) {
894			IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
895			return -EBUSY;
896		}
897
898		/*
899		 * Remove the session protection time event to allow the
900		 * channel switch. If we got here, we just heard a beacon so
901		 * the session protection is not needed anymore anyway.
902		 */
903		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
904	}
905
906	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
907	time_cmd.id_and_color =
908		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
909	time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
910	time_cmd.apply_time = cpu_to_le32(apply_time);
911	time_cmd.max_frags = TE_V2_FRAG_NONE;
912	time_cmd.duration = cpu_to_le32(duration);
913	time_cmd.repeat = 1;
914	time_cmd.interval = cpu_to_le32(1);
915	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
916				      TE_V2_ABSENCE);
917	if (!apply_time)
918		time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
919
920	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
921}