Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2017 Intel Deutschland GmbH
6 */
7#include <linux/jiffies.h>
8#include <net/mac80211.h>
9
10#include "fw/notif-wait.h"
11#include "iwl-trans.h"
12#include "fw-api.h"
13#include "time-event.h"
14#include "mvm.h"
15#include "iwl-io.h"
16#include "iwl-prph.h"
17
18/*
19 * For the high priority TE use a time event type that has similar priority to
20 * the FW's action scan priority.
21 */
22#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
23#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
24
25void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
26 struct iwl_mvm_time_event_data *te_data)
27{
28 lockdep_assert_held(&mvm->time_event_lock);
29
30 if (!te_data || !te_data->vif)
31 return;
32
33 list_del(&te_data->list);
34
35 /*
36 * the list is only used for AUX ROC events so make sure it is always
37 * initialized
38 */
39 INIT_LIST_HEAD(&te_data->list);
40
41 te_data->running = false;
42 te_data->uid = 0;
43 te_data->id = TE_MAX;
44 te_data->vif = NULL;
45 te_data->link_id = -1;
46}
47
48void iwl_mvm_roc_done_wk(struct work_struct *wk)
49{
50 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
51
52 /*
53 * Clear the ROC_RUNNING status bit.
54 * This will cause the TX path to drop offchannel transmissions.
55 * That would also be done by mac80211, but it is racy, in particular
56 * in the case that the time event actually completed in the firmware
57 * (which is handled in iwl_mvm_te_handle_notif).
58 */
59 clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
60
61 synchronize_net();
62
63 /*
64 * Flush the offchannel queue -- this is called when the time
65 * event finishes or is canceled, so that frames queued for it
66 * won't get stuck on the queue and be transmitted in the next
67 * time event.
68 */
69
70 mutex_lock(&mvm->mutex);
71 if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
72 struct iwl_mvm_vif *mvmvif;
73
74 /*
75 * NB: access to this pointer would be racy, but the flush bit
76 * can only be set when we had a P2P-Device VIF, and we have a
77 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
78 * not really racy.
79 */
80
81 if (!WARN_ON(!mvm->p2p_device_vif)) {
82 struct ieee80211_vif *vif = mvm->p2p_device_vif;
83
84 mvmvif = iwl_mvm_vif_from_mac80211(vif);
85 iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
86 mvmvif->deflink.bcast_sta.tfd_queue_msk);
87
88 if (mvm->mld_api_is_used) {
89 iwl_mvm_mld_rm_bcast_sta(mvm, vif,
90 &vif->bss_conf);
91
92 iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
93 LINK_CONTEXT_MODIFY_ACTIVE,
94 false);
95 } else {
96 iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
97 iwl_mvm_binding_remove_vif(mvm, vif);
98 }
99
100 /* Do not remove the PHY context as removing and adding
101 * a PHY context has timing overheads. Leaving it
102 * configured in FW would be useful in case the next ROC
103 * is with the same channel.
104 */
105 }
106 }
107
108 /*
109 * Clear the ROC_AUX_RUNNING status bit.
110 * This will cause the TX path to drop offchannel transmissions.
111 * That would also be done by mac80211, but it is racy, in particular
112 * in the case that the time event actually completed in the firmware
113 * (which is handled in iwl_mvm_te_handle_notif).
114 */
115 if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
116 /* do the same in case of hot spot 2.0 */
117 iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
118 mvm->aux_sta.tfd_queue_msk);
119
120 if (mvm->mld_api_is_used) {
121 iwl_mvm_mld_rm_aux_sta(mvm);
122 goto out_unlock;
123 }
124
125 /* In newer version of this command an aux station is added only
126 * in cases of dedicated tx queue and need to be removed in end
127 * of use */
128 if (iwl_mvm_has_new_station_api(mvm->fw))
129 iwl_mvm_rm_aux_sta(mvm);
130 }
131
132out_unlock:
133 mutex_unlock(&mvm->mutex);
134}
135
136static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
137{
138 /*
139 * Of course, our status bit is just as racy as mac80211, so in
140 * addition, fire off the work struct which will drop all frames
141 * from the hardware queues that made it through the race. First
142 * it will of course synchronize the TX path to make sure that
143 * any *new* TX will be rejected.
144 */
145 schedule_work(&mvm->roc_done_wk);
146}
147
148static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
149{
150 struct ieee80211_vif *csa_vif;
151
152 rcu_read_lock();
153
154 csa_vif = rcu_dereference(mvm->csa_vif);
155 if (!csa_vif || !csa_vif->bss_conf.csa_active)
156 goto out_unlock;
157
158 IWL_DEBUG_TE(mvm, "CSA NOA started\n");
159
160 /*
161 * CSA NoA is started but we still have beacons to
162 * transmit on the current channel.
163 * So we just do nothing here and the switch
164 * will be performed on the last TBTT.
165 */
166 if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
167 IWL_WARN(mvm, "CSA NOA started too early\n");
168 goto out_unlock;
169 }
170
171 ieee80211_csa_finish(csa_vif);
172
173 rcu_read_unlock();
174
175 RCU_INIT_POINTER(mvm->csa_vif, NULL);
176
177 return;
178
179out_unlock:
180 rcu_read_unlock();
181}
182
183static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
184 struct ieee80211_vif *vif,
185 const char *errmsg)
186{
187 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
188
189 if (vif->type != NL80211_IFTYPE_STATION)
190 return false;
191
192 if (!mvmvif->csa_bcn_pending && vif->cfg.assoc &&
193 vif->bss_conf.dtim_period)
194 return false;
195 if (errmsg)
196 IWL_ERR(mvm, "%s\n", errmsg);
197
198 if (mvmvif->csa_bcn_pending) {
199 struct iwl_mvm_sta *mvmsta;
200
201 rcu_read_lock();
202 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm,
203 mvmvif->deflink.ap_sta_id);
204 if (!WARN_ON(!mvmsta))
205 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
206 rcu_read_unlock();
207 }
208
209 if (vif->cfg.assoc) {
210 /*
211 * When not associated, this will be called from
212 * iwl_mvm_event_mlme_callback_ini()
213 */
214 iwl_dbg_tlv_time_point(&mvm->fwrt,
215 IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
216 NULL);
217 }
218
219 iwl_mvm_connection_loss(mvm, vif, errmsg);
220 return true;
221}
222
223static void
224iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
225 struct iwl_mvm_time_event_data *te_data,
226 struct iwl_time_event_notif *notif)
227{
228 struct ieee80211_vif *vif = te_data->vif;
229 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
230
231 if (!notif->status)
232 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
233
234 switch (te_data->vif->type) {
235 case NL80211_IFTYPE_AP:
236 if (!notif->status)
237 mvmvif->csa_failed = true;
238 iwl_mvm_csa_noa_start(mvm);
239 break;
240 case NL80211_IFTYPE_STATION:
241 if (!notif->status) {
242 iwl_mvm_connection_loss(mvm, vif,
243 "CSA TE failed to start");
244 break;
245 }
246 iwl_mvm_csa_client_absent(mvm, te_data->vif);
247 cancel_delayed_work(&mvmvif->csa_work);
248 ieee80211_chswitch_done(te_data->vif, true, 0);
249 break;
250 default:
251 /* should never happen */
252 WARN_ON_ONCE(1);
253 break;
254 }
255
256 /* we don't need it anymore */
257 iwl_mvm_te_clear_data(mvm, te_data);
258}
259
260static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
261 struct iwl_time_event_notif *notif,
262 struct iwl_mvm_time_event_data *te_data)
263{
264 struct iwl_fw_dbg_trigger_tlv *trig;
265 struct iwl_fw_dbg_trigger_time_event *te_trig;
266 int i;
267
268 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
269 ieee80211_vif_to_wdev(te_data->vif),
270 FW_DBG_TRIGGER_TIME_EVENT);
271 if (!trig)
272 return;
273
274 te_trig = (void *)trig->data;
275
276 for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
277 u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
278 u32 trig_action_bitmap =
279 le32_to_cpu(te_trig->time_events[i].action_bitmap);
280 u32 trig_status_bitmap =
281 le32_to_cpu(te_trig->time_events[i].status_bitmap);
282
283 if (trig_te_id != te_data->id ||
284 !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
285 !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
286 continue;
287
288 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
289 "Time event %d Action 0x%x received status: %d",
290 te_data->id,
291 le32_to_cpu(notif->action),
292 le32_to_cpu(notif->status));
293 break;
294 }
295}
296
297static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
298{
299 /*
300 * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
301 * roc_done_wk is already scheduled or running, so don't schedule it
302 * again to avoid a race where the roc_done_wk clears this bit after
303 * it is set here, affecting the next run of the roc_done_wk.
304 */
305 if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
306 iwl_mvm_roc_finished(mvm);
307}
308
309/*
310 * Handles a FW notification for an event that is known to the driver.
311 *
312 * @mvm: the mvm component
313 * @te_data: the time event data
314 * @notif: the notification data corresponding the time event data.
315 */
316static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
317 struct iwl_mvm_time_event_data *te_data,
318 struct iwl_time_event_notif *notif)
319{
320 lockdep_assert_held(&mvm->time_event_lock);
321
322 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
323 le32_to_cpu(notif->unique_id),
324 le32_to_cpu(notif->action));
325
326 iwl_mvm_te_check_trigger(mvm, notif, te_data);
327
328 /*
329 * The FW sends the start/end time event notifications even for events
330 * that it fails to schedule. This is indicated in the status field of
331 * the notification. This happens in cases that the scheduler cannot
332 * find a schedule that can handle the event (for example requesting a
333 * P2P Device discoveribility, while there are other higher priority
334 * events in the system).
335 */
336 if (!le32_to_cpu(notif->status)) {
337 const char *msg;
338
339 if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
340 msg = "Time Event start notification failure";
341 else
342 msg = "Time Event end notification failure";
343
344 IWL_DEBUG_TE(mvm, "%s\n", msg);
345
346 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
347 iwl_mvm_te_clear_data(mvm, te_data);
348 return;
349 }
350 }
351
352 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
353 IWL_DEBUG_TE(mvm,
354 "TE ended - current time %lu, estimated end %lu\n",
355 jiffies, te_data->end_jiffies);
356
357 switch (te_data->vif->type) {
358 case NL80211_IFTYPE_P2P_DEVICE:
359 ieee80211_remain_on_channel_expired(mvm->hw);
360 iwl_mvm_p2p_roc_finished(mvm);
361 break;
362 case NL80211_IFTYPE_STATION:
363 /*
364 * If we are switching channel, don't disconnect
365 * if the time event is already done. Beacons can
366 * be delayed a bit after the switch.
367 */
368 if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
369 IWL_DEBUG_TE(mvm,
370 "No beacon heard and the CS time event is over, don't disconnect\n");
371 break;
372 }
373
374 /*
375 * By now, we should have finished association
376 * and know the dtim period.
377 */
378 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
379 !te_data->vif->cfg.assoc ?
380 "Not associated and the time event is over already..." :
381 "No beacon heard and the time event is over already...");
382 break;
383 default:
384 break;
385 }
386
387 iwl_mvm_te_clear_data(mvm, te_data);
388 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
389 te_data->running = true;
390 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
391
392 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
393 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
394 ieee80211_ready_on_channel(mvm->hw);
395 } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
396 iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
397 }
398 } else {
399 IWL_WARN(mvm, "Got TE with unknown action\n");
400 }
401}
402
403void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
404 struct iwl_rx_cmd_buffer *rxb)
405{
406 struct iwl_rx_packet *pkt = rxb_addr(rxb);
407 struct iwl_roc_notif *notif = (void *)pkt->data;
408
409 if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) &&
410 le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) {
411 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
412 ieee80211_ready_on_channel(mvm->hw);
413 } else {
414 iwl_mvm_roc_finished(mvm);
415 ieee80211_remain_on_channel_expired(mvm->hw);
416 }
417}
418
419/*
420 * Handle A Aux ROC time event
421 */
422static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
423 struct iwl_time_event_notif *notif)
424{
425 struct iwl_mvm_time_event_data *aux_roc_te = NULL, *te_data;
426
427 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
428 if (le32_to_cpu(notif->unique_id) == te_data->uid) {
429 aux_roc_te = te_data;
430 break;
431 }
432 }
433 if (!aux_roc_te) /* Not a Aux ROC time event */
434 return -EINVAL;
435
436 iwl_mvm_te_check_trigger(mvm, notif, te_data);
437
438 IWL_DEBUG_TE(mvm,
439 "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
440 le32_to_cpu(notif->unique_id),
441 le32_to_cpu(notif->action), le32_to_cpu(notif->status));
442
443 if (!le32_to_cpu(notif->status) ||
444 le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
445 /* End TE, notify mac80211 */
446 ieee80211_remain_on_channel_expired(mvm->hw);
447 iwl_mvm_roc_finished(mvm); /* flush aux queue */
448 list_del(&te_data->list); /* remove from list */
449 te_data->running = false;
450 te_data->vif = NULL;
451 te_data->uid = 0;
452 te_data->id = TE_MAX;
453 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
454 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
455 te_data->running = true;
456 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
457 } else {
458 IWL_DEBUG_TE(mvm,
459 "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
460 le32_to_cpu(notif->action));
461 return -EINVAL;
462 }
463
464 return 0;
465}
466
467/*
468 * The Rx handler for time event notifications
469 */
470void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
471 struct iwl_rx_cmd_buffer *rxb)
472{
473 struct iwl_rx_packet *pkt = rxb_addr(rxb);
474 struct iwl_time_event_notif *notif = (void *)pkt->data;
475 struct iwl_mvm_time_event_data *te_data, *tmp;
476
477 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
478 le32_to_cpu(notif->unique_id),
479 le32_to_cpu(notif->action));
480
481 spin_lock_bh(&mvm->time_event_lock);
482 /* This time event is triggered for Aux ROC request */
483 if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
484 goto unlock;
485
486 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
487 if (le32_to_cpu(notif->unique_id) == te_data->uid)
488 iwl_mvm_te_handle_notif(mvm, te_data, notif);
489 }
490unlock:
491 spin_unlock_bh(&mvm->time_event_lock);
492}
493
494static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
495 struct iwl_rx_packet *pkt, void *data)
496{
497 struct iwl_mvm *mvm =
498 container_of(notif_wait, struct iwl_mvm, notif_wait);
499 struct iwl_mvm_time_event_data *te_data = data;
500 struct iwl_time_event_notif *resp;
501 int resp_len = iwl_rx_packet_payload_len(pkt);
502
503 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
504 return true;
505
506 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
507 IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
508 return true;
509 }
510
511 resp = (void *)pkt->data;
512
513 /* te_data->uid is already set in the TIME_EVENT_CMD response */
514 if (le32_to_cpu(resp->unique_id) != te_data->uid)
515 return false;
516
517 IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
518 te_data->uid);
519 if (!resp->status)
520 IWL_ERR(mvm,
521 "TIME_EVENT_NOTIFICATION received but not executed\n");
522
523 return true;
524}
525
526static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
527 struct iwl_rx_packet *pkt, void *data)
528{
529 struct iwl_mvm *mvm =
530 container_of(notif_wait, struct iwl_mvm, notif_wait);
531 struct iwl_mvm_time_event_data *te_data = data;
532 struct iwl_time_event_resp *resp;
533 int resp_len = iwl_rx_packet_payload_len(pkt);
534
535 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
536 return true;
537
538 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
539 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
540 return true;
541 }
542
543 resp = (void *)pkt->data;
544
545 /* we should never get a response to another TIME_EVENT_CMD here */
546 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
547 return false;
548
549 te_data->uid = le32_to_cpu(resp->unique_id);
550 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
551 te_data->uid);
552 return true;
553}
554
555static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
556 struct ieee80211_vif *vif,
557 struct iwl_mvm_time_event_data *te_data,
558 struct iwl_time_event_cmd *te_cmd)
559{
560 static const u16 time_event_response[] = { TIME_EVENT_CMD };
561 struct iwl_notification_wait wait_time_event;
562 int ret;
563
564 lockdep_assert_held(&mvm->mutex);
565
566 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
567 le32_to_cpu(te_cmd->duration));
568
569 spin_lock_bh(&mvm->time_event_lock);
570 if (WARN_ON(te_data->id != TE_MAX)) {
571 spin_unlock_bh(&mvm->time_event_lock);
572 return -EIO;
573 }
574 te_data->vif = vif;
575 te_data->duration = le32_to_cpu(te_cmd->duration);
576 te_data->id = le32_to_cpu(te_cmd->id);
577 list_add_tail(&te_data->list, &mvm->time_event_list);
578 spin_unlock_bh(&mvm->time_event_lock);
579
580 /*
581 * Use a notification wait, which really just processes the
582 * command response and doesn't wait for anything, in order
583 * to be able to process the response and get the UID inside
584 * the RX path. Using CMD_WANT_SKB doesn't work because it
585 * stores the buffer and then wakes up this thread, by which
586 * time another notification (that the time event started)
587 * might already be processed unsuccessfully.
588 */
589 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
590 time_event_response,
591 ARRAY_SIZE(time_event_response),
592 iwl_mvm_time_event_response, te_data);
593
594 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
595 sizeof(*te_cmd), te_cmd);
596 if (ret) {
597 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
598 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
599 goto out_clear_te;
600 }
601
602 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
603 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
604 /* should never fail */
605 WARN_ON_ONCE(ret);
606
607 if (ret) {
608 out_clear_te:
609 spin_lock_bh(&mvm->time_event_lock);
610 iwl_mvm_te_clear_data(mvm, te_data);
611 spin_unlock_bh(&mvm->time_event_lock);
612 }
613 return ret;
614}
615
616void iwl_mvm_protect_session(struct iwl_mvm *mvm,
617 struct ieee80211_vif *vif,
618 u32 duration, u32 min_duration,
619 u32 max_delay, bool wait_for_notif)
620{
621 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
622 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
623 const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
624 struct iwl_notification_wait wait_te_notif;
625 struct iwl_time_event_cmd time_cmd = {};
626
627 lockdep_assert_held(&mvm->mutex);
628
629 if (te_data->running &&
630 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
631 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
632 jiffies_to_msecs(te_data->end_jiffies - jiffies));
633 return;
634 }
635
636 if (te_data->running) {
637 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
638 te_data->uid,
639 jiffies_to_msecs(te_data->end_jiffies - jiffies));
640 /*
641 * we don't have enough time
642 * cancel the current TE and issue a new one
643 * Of course it would be better to remove the old one only
644 * when the new one is added, but we don't care if we are off
645 * channel for a bit. All we need to do, is not to return
646 * before we actually begin to be on the channel.
647 */
648 iwl_mvm_stop_session_protection(mvm, vif);
649 }
650
651 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
652 time_cmd.id_and_color =
653 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
654 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
655
656 time_cmd.apply_time = cpu_to_le32(0);
657
658 time_cmd.max_frags = TE_V2_FRAG_NONE;
659 time_cmd.max_delay = cpu_to_le32(max_delay);
660 /* TODO: why do we need to interval = bi if it is not periodic? */
661 time_cmd.interval = cpu_to_le32(1);
662 time_cmd.duration = cpu_to_le32(duration);
663 time_cmd.repeat = 1;
664 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
665 TE_V2_NOTIF_HOST_EVENT_END |
666 TE_V2_START_IMMEDIATELY);
667
668 if (!wait_for_notif) {
669 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
670 return;
671 }
672
673 /*
674 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
675 * right after we send the time event
676 */
677 iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
678 te_notif_response,
679 ARRAY_SIZE(te_notif_response),
680 iwl_mvm_te_notif, te_data);
681
682 /* If TE was sent OK - wait for the notification that started */
683 if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
684 IWL_ERR(mvm, "Failed to add TE to protect session\n");
685 iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
686 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
687 TU_TO_JIFFIES(max_delay))) {
688 IWL_ERR(mvm, "Failed to protect session until TE\n");
689 }
690}
691
692/* Determine whether mac or link id should be used, and validate the link id */
693static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
694 struct ieee80211_vif *vif,
695 u32 link_id)
696{
697 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
698 int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
699 WIDE_ID(MAC_CONF_GROUP,
700 SESSION_PROTECTION_CMD), 1);
701
702 if (ver < 2)
703 return mvmvif->id;
704
705 if (WARN(link_id < 0 || !mvmvif->link[link_id],
706 "Invalid link ID for session protection: %u\n", link_id))
707 return -EINVAL;
708
709 if (WARN(ieee80211_vif_is_mld(vif) &&
710 !(vif->active_links & BIT(link_id)),
711 "Session Protection on an inactive link: %u\n", link_id))
712 return -EINVAL;
713
714 return mvmvif->link[link_id]->fw_link_id;
715}
716
717static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
718 struct ieee80211_vif *vif,
719 u32 id, u32 link_id)
720{
721 int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
722 struct iwl_mvm_session_prot_cmd cmd = {
723 .id_and_color = cpu_to_le32(mac_link_id),
724 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
725 .conf_id = cpu_to_le32(id),
726 };
727 int ret;
728
729 if (mac_link_id < 0)
730 return;
731
732 ret = iwl_mvm_send_cmd_pdu(mvm,
733 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
734 0, sizeof(cmd), &cmd);
735 if (ret)
736 IWL_ERR(mvm,
737 "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
738}
739
740static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
741 struct iwl_mvm_time_event_data *te_data,
742 u32 *uid)
743{
744 u32 id;
745 struct ieee80211_vif *vif = te_data->vif;
746 struct iwl_mvm_vif *mvmvif;
747 enum nl80211_iftype iftype;
748 unsigned int link_id;
749
750 if (!vif)
751 return false;
752
753 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
754 iftype = te_data->vif->type;
755
756 /*
757 * It is possible that by the time we got to this point the time
758 * event was already removed.
759 */
760 spin_lock_bh(&mvm->time_event_lock);
761
762 /* Save time event uid before clearing its data */
763 *uid = te_data->uid;
764 id = te_data->id;
765 link_id = te_data->link_id;
766
767 /*
768 * The clear_data function handles time events that were already removed
769 */
770 iwl_mvm_te_clear_data(mvm, te_data);
771 spin_unlock_bh(&mvm->time_event_lock);
772
773 /* When session protection is used, the te_data->id field
774 * is reused to save session protection's configuration.
775 * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
776 * to HOT_SPOT_CMD.
777 */
778 if (fw_has_capa(&mvm->fw->ucode_capa,
779 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
780 id != HOT_SPOT_CMD) {
781 if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
782 /* Session protection is still ongoing. Cancel it */
783 iwl_mvm_cancel_session_protection(mvm, vif, id,
784 link_id);
785 if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
786 iwl_mvm_p2p_roc_finished(mvm);
787 }
788 }
789 return false;
790 } else {
791 /* It is possible that by the time we try to remove it, the
792 * time event has already ended and removed. In such a case
793 * there is no need to send a removal command.
794 */
795 if (id == TE_MAX) {
796 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
797 return false;
798 }
799 }
800
801 return true;
802}
803
804/*
805 * Explicit request to remove a aux roc time event. The removal of a time
806 * event needs to be synchronized with the flow of a time event's end
807 * notification, which also removes the time event from the op mode
808 * data structures.
809 */
810static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
811 struct iwl_mvm_vif *mvmvif,
812 struct iwl_mvm_time_event_data *te_data)
813{
814 struct iwl_hs20_roc_req aux_cmd = {};
815 u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
816
817 u32 uid;
818 int ret;
819
820 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
821 return;
822
823 aux_cmd.event_unique_id = cpu_to_le32(uid);
824 aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
825 aux_cmd.id_and_color =
826 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
827 IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
828 le32_to_cpu(aux_cmd.event_unique_id));
829 ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
830 len, &aux_cmd);
831
832 if (WARN_ON(ret))
833 return;
834}
835
836/*
837 * Explicit request to remove a time event. The removal of a time event needs to
838 * be synchronized with the flow of a time event's end notification, which also
839 * removes the time event from the op mode data structures.
840 */
841void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
842 struct iwl_mvm_vif *mvmvif,
843 struct iwl_mvm_time_event_data *te_data)
844{
845 struct iwl_time_event_cmd time_cmd = {};
846 u32 uid;
847 int ret;
848
849 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
850 return;
851
852 /* When we remove a TE, the UID is to be set in the id field */
853 time_cmd.id = cpu_to_le32(uid);
854 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
855 time_cmd.id_and_color =
856 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
857
858 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
859 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
860 sizeof(time_cmd), &time_cmd);
861 if (ret)
862 IWL_ERR(mvm, "Couldn't remove the time event\n");
863}
864
865void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
866 struct ieee80211_vif *vif)
867{
868 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
869 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
870 u32 id;
871
872 lockdep_assert_held(&mvm->mutex);
873
874 spin_lock_bh(&mvm->time_event_lock);
875 id = te_data->id;
876 spin_unlock_bh(&mvm->time_event_lock);
877
878 if (fw_has_capa(&mvm->fw->ucode_capa,
879 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
880 if (id != SESSION_PROTECT_CONF_ASSOC) {
881 IWL_DEBUG_TE(mvm,
882 "don't remove session protection id=%u\n",
883 id);
884 return;
885 }
886 } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
887 IWL_DEBUG_TE(mvm,
888 "don't remove TE with id=%u (not session protection)\n",
889 id);
890 return;
891 }
892
893 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
894}
895
896void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
897 struct iwl_rx_cmd_buffer *rxb)
898{
899 struct iwl_rx_packet *pkt = rxb_addr(rxb);
900 struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
901 unsigned int ver =
902 iwl_fw_lookup_cmd_ver(mvm->fw,
903 WIDE_ID(MAC_CONF_GROUP,
904 SESSION_PROTECTION_CMD), 2);
905 int id = le32_to_cpu(notif->mac_link_id);
906 struct ieee80211_vif *vif;
907 struct iwl_mvm_vif *mvmvif;
908 unsigned int notif_link_id;
909
910 rcu_read_lock();
911
912 if (ver <= 2) {
913 vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
914 } else {
915 struct ieee80211_bss_conf *link_conf =
916 iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true);
917
918 if (!link_conf)
919 goto out_unlock;
920
921 notif_link_id = link_conf->link_id;
922 vif = link_conf->vif;
923 }
924
925 if (!vif)
926 goto out_unlock;
927
928 mvmvif = iwl_mvm_vif_from_mac80211(vif);
929
930 if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
931 mvmvif->time_event_data.link_id != notif_link_id,
932 "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
933 notif_link_id, mvmvif->time_event_data.link_id))
934 goto out_unlock;
935
936 /* The vif is not a P2P_DEVICE, maintain its time_event_data */
937 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
938 struct iwl_mvm_time_event_data *te_data =
939 &mvmvif->time_event_data;
940
941 if (!le32_to_cpu(notif->status)) {
942 iwl_mvm_te_check_disconnect(mvm, vif,
943 "Session protection failure");
944 spin_lock_bh(&mvm->time_event_lock);
945 iwl_mvm_te_clear_data(mvm, te_data);
946 spin_unlock_bh(&mvm->time_event_lock);
947 }
948
949 if (le32_to_cpu(notif->start)) {
950 spin_lock_bh(&mvm->time_event_lock);
951 te_data->running = le32_to_cpu(notif->start);
952 te_data->end_jiffies =
953 TU_TO_EXP_TIME(te_data->duration);
954 spin_unlock_bh(&mvm->time_event_lock);
955 } else {
956 /*
957 * By now, we should have finished association
958 * and know the dtim period.
959 */
960 iwl_mvm_te_check_disconnect(mvm, vif,
961 !vif->cfg.assoc ?
962 "Not associated and the session protection is over already..." :
963 "No beacon heard and the session protection is over already...");
964 spin_lock_bh(&mvm->time_event_lock);
965 iwl_mvm_te_clear_data(mvm, te_data);
966 spin_unlock_bh(&mvm->time_event_lock);
967 }
968
969 goto out_unlock;
970 }
971
972 if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
973 /* End TE, notify mac80211 */
974 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
975 mvmvif->time_event_data.link_id = -1;
976 iwl_mvm_p2p_roc_finished(mvm);
977 ieee80211_remain_on_channel_expired(mvm->hw);
978 } else if (le32_to_cpu(notif->start)) {
979 if (WARN_ON(mvmvif->time_event_data.id !=
980 le32_to_cpu(notif->conf_id)))
981 goto out_unlock;
982 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
983 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
984 }
985
986 out_unlock:
987 rcu_read_unlock();
988}
989
990static int
991iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
992 struct ieee80211_vif *vif,
993 int duration,
994 enum ieee80211_roc_type type)
995{
996 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
997 struct iwl_mvm_session_prot_cmd cmd = {
998 .id_and_color =
999 cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
1000 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1001 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1002 };
1003
1004 lockdep_assert_held(&mvm->mutex);
1005
1006 /* The time_event_data.id field is reused to save session
1007 * protection's configuration.
1008 */
1009
1010 mvmvif->time_event_data.link_id = 0;
1011
1012 switch (type) {
1013 case IEEE80211_ROC_TYPE_NORMAL:
1014 mvmvif->time_event_data.id =
1015 SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
1016 break;
1017 case IEEE80211_ROC_TYPE_MGMT_TX:
1018 mvmvif->time_event_data.id =
1019 SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
1020 break;
1021 default:
1022 WARN_ONCE(1, "Got an invalid ROC type\n");
1023 return -EINVAL;
1024 }
1025
1026 cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
1027 return iwl_mvm_send_cmd_pdu(mvm,
1028 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1029 0, sizeof(cmd), &cmd);
1030}
1031
1032int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1033 int duration, enum ieee80211_roc_type type)
1034{
1035 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1036 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1037 struct iwl_time_event_cmd time_cmd = {};
1038
1039 lockdep_assert_held(&mvm->mutex);
1040 if (te_data->running) {
1041 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
1042 return -EBUSY;
1043 }
1044
1045 if (fw_has_capa(&mvm->fw->ucode_capa,
1046 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
1047 return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
1048 duration,
1049 type);
1050
1051 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1052 time_cmd.id_and_color =
1053 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1054
1055 switch (type) {
1056 case IEEE80211_ROC_TYPE_NORMAL:
1057 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
1058 break;
1059 case IEEE80211_ROC_TYPE_MGMT_TX:
1060 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
1061 break;
1062 default:
1063 WARN_ONCE(1, "Got an invalid ROC type\n");
1064 return -EINVAL;
1065 }
1066
1067 time_cmd.apply_time = cpu_to_le32(0);
1068 time_cmd.interval = cpu_to_le32(1);
1069
1070 /*
1071 * The P2P Device TEs can have lower priority than other events
1072 * that are being scheduled by the driver/fw, and thus it might not be
1073 * scheduled. To improve the chances of it being scheduled, allow them
1074 * to be fragmented, and in addition allow them to be delayed.
1075 */
1076 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
1077 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
1078 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
1079 time_cmd.repeat = 1;
1080 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1081 TE_V2_NOTIF_HOST_EVENT_END |
1082 TE_V2_START_IMMEDIATELY);
1083
1084 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1085}
1086
1087static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
1088{
1089 struct iwl_mvm_time_event_data *te_data;
1090
1091 lockdep_assert_held(&mvm->mutex);
1092
1093 spin_lock_bh(&mvm->time_event_lock);
1094
1095 /*
1096 * Iterate over the list of time events and find the time event that is
1097 * associated with a P2P_DEVICE interface.
1098 * This assumes that a P2P_DEVICE interface can have only a single time
1099 * event at any given time and this time event coresponds to a ROC
1100 * request
1101 */
1102 list_for_each_entry(te_data, &mvm->time_event_list, list) {
1103 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
1104 goto out;
1105 }
1106
1107 /* There can only be at most one AUX ROC time event, we just use the
1108 * list to simplify/unify code. Remove it if it exists.
1109 */
1110 te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
1111 struct iwl_mvm_time_event_data,
1112 list);
1113out:
1114 spin_unlock_bh(&mvm->time_event_lock);
1115 return te_data;
1116}
1117
1118void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
1119{
1120 struct iwl_mvm_time_event_data *te_data;
1121 u32 uid;
1122
1123 te_data = iwl_mvm_get_roc_te(mvm);
1124 if (te_data)
1125 __iwl_mvm_remove_time_event(mvm, te_data, &uid);
1126}
1127
1128static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
1129{
1130 int ret;
1131 struct iwl_roc_req roc_cmd = {
1132 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
1133 .activity = cpu_to_le32(activity),
1134 };
1135
1136 lockdep_assert_held(&mvm->mutex);
1137 ret = iwl_mvm_send_cmd_pdu(mvm,
1138 WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
1139 0, sizeof(roc_cmd), &roc_cmd);
1140 WARN_ON(ret);
1141}
1142
1143static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm,
1144 struct iwl_mvm_vif *mvmvif)
1145{
1146 u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
1147 u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
1148 IWL_FW_CMD_VER_UNKNOWN);
1149
1150 if (fw_ver == IWL_FW_CMD_VER_UNKNOWN)
1151 iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
1152 &mvmvif->hs_time_event_data);
1153 else if (fw_ver == 3)
1154 iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT);
1155 else
1156 IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
1157}
1158
1159void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1160{
1161 struct iwl_mvm_vif *mvmvif;
1162 struct iwl_mvm_time_event_data *te_data;
1163
1164 if (fw_has_capa(&mvm->fw->ucode_capa,
1165 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
1166 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1167
1168 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1169 iwl_mvm_cancel_session_protection(mvm, vif,
1170 mvmvif->time_event_data.id,
1171 mvmvif->time_event_data.link_id);
1172 iwl_mvm_p2p_roc_finished(mvm);
1173 } else {
1174 iwl_mvm_roc_station_remove(mvm, mvmvif);
1175 iwl_mvm_roc_finished(mvm);
1176 }
1177
1178 return;
1179 }
1180
1181 te_data = iwl_mvm_get_roc_te(mvm);
1182 if (!te_data) {
1183 IWL_WARN(mvm, "No remain on channel event\n");
1184 return;
1185 }
1186
1187 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
1188
1189 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1190 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1191 iwl_mvm_p2p_roc_finished(mvm);
1192 } else {
1193 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
1194 iwl_mvm_roc_finished(mvm);
1195 }
1196}
1197
1198void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
1199 struct ieee80211_vif *vif)
1200{
1201 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1202 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1203 u32 id;
1204
1205 lockdep_assert_held(&mvm->mutex);
1206
1207 spin_lock_bh(&mvm->time_event_lock);
1208 id = te_data->id;
1209 spin_unlock_bh(&mvm->time_event_lock);
1210
1211 if (id != TE_CHANNEL_SWITCH_PERIOD)
1212 return;
1213
1214 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1215}
1216
1217int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
1218 struct ieee80211_vif *vif,
1219 u32 duration, u32 apply_time)
1220{
1221 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1222 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1223 struct iwl_time_event_cmd time_cmd = {};
1224
1225 lockdep_assert_held(&mvm->mutex);
1226
1227 if (te_data->running) {
1228 u32 id;
1229
1230 spin_lock_bh(&mvm->time_event_lock);
1231 id = te_data->id;
1232 spin_unlock_bh(&mvm->time_event_lock);
1233
1234 if (id == TE_CHANNEL_SWITCH_PERIOD) {
1235 IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
1236 return -EBUSY;
1237 }
1238
1239 /*
1240 * Remove the session protection time event to allow the
1241 * channel switch. If we got here, we just heard a beacon so
1242 * the session protection is not needed anymore anyway.
1243 */
1244 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1245 }
1246
1247 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1248 time_cmd.id_and_color =
1249 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1250 time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
1251 time_cmd.apply_time = cpu_to_le32(apply_time);
1252 time_cmd.max_frags = TE_V2_FRAG_NONE;
1253 time_cmd.duration = cpu_to_le32(duration);
1254 time_cmd.repeat = 1;
1255 time_cmd.interval = cpu_to_le32(1);
1256 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1257 TE_V2_ABSENCE);
1258 if (!apply_time)
1259 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
1260
1261 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1262}
1263
1264static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
1265 struct iwl_rx_packet *pkt, void *data)
1266{
1267 struct iwl_mvm *mvm =
1268 container_of(notif_wait, struct iwl_mvm, notif_wait);
1269 struct iwl_mvm_session_prot_notif *resp;
1270 int resp_len = iwl_rx_packet_payload_len(pkt);
1271
1272 if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
1273 pkt->hdr.group_id != MAC_CONF_GROUP))
1274 return true;
1275
1276 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
1277 IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
1278 return true;
1279 }
1280
1281 resp = (void *)pkt->data;
1282
1283 if (!resp->status)
1284 IWL_ERR(mvm,
1285 "TIME_EVENT_NOTIFICATION received but not executed\n");
1286
1287 return true;
1288}
1289
1290void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
1291 struct ieee80211_vif *vif,
1292 u32 duration, u32 min_duration,
1293 bool wait_for_notif,
1294 unsigned int link_id)
1295{
1296 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1297 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1298 const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
1299 struct iwl_notification_wait wait_notif;
1300 int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
1301 struct iwl_mvm_session_prot_cmd cmd = {
1302 .id_and_color = cpu_to_le32(mac_link_id),
1303 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1304 .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
1305 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1306 };
1307
1308 if (mac_link_id < 0)
1309 return;
1310
1311 lockdep_assert_held(&mvm->mutex);
1312
1313 spin_lock_bh(&mvm->time_event_lock);
1314 if (te_data->running && te_data->link_id == link_id &&
1315 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
1316 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
1317 jiffies_to_msecs(te_data->end_jiffies - jiffies));
1318 spin_unlock_bh(&mvm->time_event_lock);
1319
1320 return;
1321 }
1322
1323 iwl_mvm_te_clear_data(mvm, te_data);
1324 /*
1325 * The time_event_data.id field is reused to save session
1326 * protection's configuration.
1327 */
1328 te_data->id = le32_to_cpu(cmd.conf_id);
1329 te_data->duration = le32_to_cpu(cmd.duration_tu);
1330 te_data->vif = vif;
1331 te_data->link_id = link_id;
1332 spin_unlock_bh(&mvm->time_event_lock);
1333
1334 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
1335 le32_to_cpu(cmd.duration_tu));
1336
1337 if (!wait_for_notif) {
1338 if (iwl_mvm_send_cmd_pdu(mvm,
1339 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1340 0, sizeof(cmd), &cmd)) {
1341 goto send_cmd_err;
1342 }
1343
1344 return;
1345 }
1346
1347 iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
1348 notif, ARRAY_SIZE(notif),
1349 iwl_mvm_session_prot_notif, NULL);
1350
1351 if (iwl_mvm_send_cmd_pdu(mvm,
1352 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1353 0, sizeof(cmd), &cmd)) {
1354 iwl_remove_notification(&mvm->notif_wait, &wait_notif);
1355 goto send_cmd_err;
1356 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
1357 TU_TO_JIFFIES(100))) {
1358 IWL_ERR(mvm,
1359 "Failed to protect session until session protection\n");
1360 }
1361 return;
1362
1363send_cmd_err:
1364 IWL_ERR(mvm,
1365 "Couldn't send the SESSION_PROTECTION_CMD\n");
1366 spin_lock_bh(&mvm->time_event_lock);
1367 iwl_mvm_te_clear_data(mvm, te_data);
1368 spin_unlock_bh(&mvm->time_event_lock);
1369}
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
24 *
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64
65#include <linux/jiffies.h>
66#include <net/mac80211.h>
67
68#include "fw/notif-wait.h"
69#include "iwl-trans.h"
70#include "fw-api.h"
71#include "time-event.h"
72#include "mvm.h"
73#include "iwl-io.h"
74#include "iwl-prph.h"
75
76/*
77 * For the high priority TE use a time event type that has similar priority to
78 * the FW's action scan priority.
79 */
80#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
81#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
82
83void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
84 struct iwl_mvm_time_event_data *te_data)
85{
86 lockdep_assert_held(&mvm->time_event_lock);
87
88 if (!te_data || !te_data->vif)
89 return;
90
91 list_del(&te_data->list);
92 te_data->running = false;
93 te_data->uid = 0;
94 te_data->id = TE_MAX;
95 te_data->vif = NULL;
96}
97
98void iwl_mvm_roc_done_wk(struct work_struct *wk)
99{
100 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
101
102 /*
103 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
104 * This will cause the TX path to drop offchannel transmissions.
105 * That would also be done by mac80211, but it is racy, in particular
106 * in the case that the time event actually completed in the firmware
107 * (which is handled in iwl_mvm_te_handle_notif).
108 */
109 clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
110 clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
111
112 synchronize_net();
113
114 /*
115 * Flush the offchannel queue -- this is called when the time
116 * event finishes or is canceled, so that frames queued for it
117 * won't get stuck on the queue and be transmitted in the next
118 * time event.
119 * We have to send the command asynchronously since this cannot
120 * be under the mutex for locking reasons, but that's not an
121 * issue as it will have to complete before the next command is
122 * executed, and a new time event means a new command.
123 */
124 iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
125
126 /* Do the same for the P2P device queue (STA) */
127 if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
128 struct iwl_mvm_vif *mvmvif;
129
130 /*
131 * NB: access to this pointer would be racy, but the flush bit
132 * can only be set when we had a P2P-Device VIF, and we have a
133 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
134 * not really racy.
135 */
136
137 if (!WARN_ON(!mvm->p2p_device_vif)) {
138 mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
139 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
140 CMD_ASYNC);
141 }
142 }
143}
144
145static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
146{
147 /*
148 * Of course, our status bit is just as racy as mac80211, so in
149 * addition, fire off the work struct which will drop all frames
150 * from the hardware queues that made it through the race. First
151 * it will of course synchronize the TX path to make sure that
152 * any *new* TX will be rejected.
153 */
154 schedule_work(&mvm->roc_done_wk);
155}
156
157static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
158{
159 struct ieee80211_vif *csa_vif;
160
161 rcu_read_lock();
162
163 csa_vif = rcu_dereference(mvm->csa_vif);
164 if (!csa_vif || !csa_vif->csa_active)
165 goto out_unlock;
166
167 IWL_DEBUG_TE(mvm, "CSA NOA started\n");
168
169 /*
170 * CSA NoA is started but we still have beacons to
171 * transmit on the current channel.
172 * So we just do nothing here and the switch
173 * will be performed on the last TBTT.
174 */
175 if (!ieee80211_csa_is_complete(csa_vif)) {
176 IWL_WARN(mvm, "CSA NOA started too early\n");
177 goto out_unlock;
178 }
179
180 ieee80211_csa_finish(csa_vif);
181
182 rcu_read_unlock();
183
184 RCU_INIT_POINTER(mvm->csa_vif, NULL);
185
186 return;
187
188out_unlock:
189 rcu_read_unlock();
190}
191
192static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
193 struct ieee80211_vif *vif,
194 const char *errmsg)
195{
196 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
197
198 if (vif->type != NL80211_IFTYPE_STATION)
199 return false;
200
201 if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
202 vif->bss_conf.dtim_period)
203 return false;
204 if (errmsg)
205 IWL_ERR(mvm, "%s\n", errmsg);
206
207 iwl_mvm_connection_loss(mvm, vif, errmsg);
208 return true;
209}
210
211static void
212iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
213 struct iwl_mvm_time_event_data *te_data,
214 struct iwl_time_event_notif *notif)
215{
216 struct ieee80211_vif *vif = te_data->vif;
217 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
218
219 if (!notif->status)
220 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
221
222 switch (te_data->vif->type) {
223 case NL80211_IFTYPE_AP:
224 if (!notif->status)
225 mvmvif->csa_failed = true;
226 iwl_mvm_csa_noa_start(mvm);
227 break;
228 case NL80211_IFTYPE_STATION:
229 if (!notif->status) {
230 iwl_mvm_connection_loss(mvm, vif,
231 "CSA TE failed to start");
232 break;
233 }
234 iwl_mvm_csa_client_absent(mvm, te_data->vif);
235 cancel_delayed_work(&mvmvif->csa_work);
236 ieee80211_chswitch_done(te_data->vif, true);
237 break;
238 default:
239 /* should never happen */
240 WARN_ON_ONCE(1);
241 break;
242 }
243
244 /* we don't need it anymore */
245 iwl_mvm_te_clear_data(mvm, te_data);
246}
247
248static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
249 struct iwl_time_event_notif *notif,
250 struct iwl_mvm_time_event_data *te_data)
251{
252 struct iwl_fw_dbg_trigger_tlv *trig;
253 struct iwl_fw_dbg_trigger_time_event *te_trig;
254 int i;
255
256 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
257 ieee80211_vif_to_wdev(te_data->vif),
258 FW_DBG_TRIGGER_TIME_EVENT);
259 if (!trig)
260 return;
261
262 te_trig = (void *)trig->data;
263
264 for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
265 u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
266 u32 trig_action_bitmap =
267 le32_to_cpu(te_trig->time_events[i].action_bitmap);
268 u32 trig_status_bitmap =
269 le32_to_cpu(te_trig->time_events[i].status_bitmap);
270
271 if (trig_te_id != te_data->id ||
272 !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
273 !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
274 continue;
275
276 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
277 "Time event %d Action 0x%x received status: %d",
278 te_data->id,
279 le32_to_cpu(notif->action),
280 le32_to_cpu(notif->status));
281 break;
282 }
283}
284
285/*
286 * Handles a FW notification for an event that is known to the driver.
287 *
288 * @mvm: the mvm component
289 * @te_data: the time event data
290 * @notif: the notification data corresponding the time event data.
291 */
292static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
293 struct iwl_mvm_time_event_data *te_data,
294 struct iwl_time_event_notif *notif)
295{
296 lockdep_assert_held(&mvm->time_event_lock);
297
298 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
299 le32_to_cpu(notif->unique_id),
300 le32_to_cpu(notif->action));
301
302 iwl_mvm_te_check_trigger(mvm, notif, te_data);
303
304 /*
305 * The FW sends the start/end time event notifications even for events
306 * that it fails to schedule. This is indicated in the status field of
307 * the notification. This happens in cases that the scheduler cannot
308 * find a schedule that can handle the event (for example requesting a
309 * P2P Device discoveribility, while there are other higher priority
310 * events in the system).
311 */
312 if (!le32_to_cpu(notif->status)) {
313 const char *msg;
314
315 if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
316 msg = "Time Event start notification failure";
317 else
318 msg = "Time Event end notification failure";
319
320 IWL_DEBUG_TE(mvm, "%s\n", msg);
321
322 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
323 iwl_mvm_te_clear_data(mvm, te_data);
324 return;
325 }
326 }
327
328 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
329 IWL_DEBUG_TE(mvm,
330 "TE ended - current time %lu, estimated end %lu\n",
331 jiffies, te_data->end_jiffies);
332
333 switch (te_data->vif->type) {
334 case NL80211_IFTYPE_P2P_DEVICE:
335 ieee80211_remain_on_channel_expired(mvm->hw);
336 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
337 iwl_mvm_roc_finished(mvm);
338 break;
339 case NL80211_IFTYPE_STATION:
340 /*
341 * By now, we should have finished association
342 * and know the dtim period.
343 */
344 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
345 "No beacon heard and the time event is over already...");
346 break;
347 default:
348 break;
349 }
350
351 iwl_mvm_te_clear_data(mvm, te_data);
352 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
353 te_data->running = true;
354 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
355
356 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
357 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
358 ieee80211_ready_on_channel(mvm->hw);
359 } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
360 iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
361 }
362 } else {
363 IWL_WARN(mvm, "Got TE with unknown action\n");
364 }
365}
366
367/*
368 * Handle A Aux ROC time event
369 */
370static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
371 struct iwl_time_event_notif *notif)
372{
373 struct iwl_mvm_time_event_data *te_data, *tmp;
374 bool aux_roc_te = false;
375
376 list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
377 if (le32_to_cpu(notif->unique_id) == te_data->uid) {
378 aux_roc_te = true;
379 break;
380 }
381 }
382 if (!aux_roc_te) /* Not a Aux ROC time event */
383 return -EINVAL;
384
385 iwl_mvm_te_check_trigger(mvm, notif, te_data);
386
387 IWL_DEBUG_TE(mvm,
388 "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
389 le32_to_cpu(notif->unique_id),
390 le32_to_cpu(notif->action), le32_to_cpu(notif->status));
391
392 if (!le32_to_cpu(notif->status) ||
393 le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
394 /* End TE, notify mac80211 */
395 ieee80211_remain_on_channel_expired(mvm->hw);
396 iwl_mvm_roc_finished(mvm); /* flush aux queue */
397 list_del(&te_data->list); /* remove from list */
398 te_data->running = false;
399 te_data->vif = NULL;
400 te_data->uid = 0;
401 te_data->id = TE_MAX;
402 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
403 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
404 te_data->running = true;
405 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
406 } else {
407 IWL_DEBUG_TE(mvm,
408 "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
409 le32_to_cpu(notif->action));
410 return -EINVAL;
411 }
412
413 return 0;
414}
415
416/*
417 * The Rx handler for time event notifications
418 */
419void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
420 struct iwl_rx_cmd_buffer *rxb)
421{
422 struct iwl_rx_packet *pkt = rxb_addr(rxb);
423 struct iwl_time_event_notif *notif = (void *)pkt->data;
424 struct iwl_mvm_time_event_data *te_data, *tmp;
425
426 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
427 le32_to_cpu(notif->unique_id),
428 le32_to_cpu(notif->action));
429
430 spin_lock_bh(&mvm->time_event_lock);
431 /* This time event is triggered for Aux ROC request */
432 if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
433 goto unlock;
434
435 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
436 if (le32_to_cpu(notif->unique_id) == te_data->uid)
437 iwl_mvm_te_handle_notif(mvm, te_data, notif);
438 }
439unlock:
440 spin_unlock_bh(&mvm->time_event_lock);
441}
442
443static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
444 struct iwl_rx_packet *pkt, void *data)
445{
446 struct iwl_mvm *mvm =
447 container_of(notif_wait, struct iwl_mvm, notif_wait);
448 struct iwl_mvm_time_event_data *te_data = data;
449 struct iwl_time_event_notif *resp;
450 int resp_len = iwl_rx_packet_payload_len(pkt);
451
452 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
453 return true;
454
455 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
456 IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
457 return true;
458 }
459
460 resp = (void *)pkt->data;
461
462 /* te_data->uid is already set in the TIME_EVENT_CMD response */
463 if (le32_to_cpu(resp->unique_id) != te_data->uid)
464 return false;
465
466 IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
467 te_data->uid);
468 if (!resp->status)
469 IWL_ERR(mvm,
470 "TIME_EVENT_NOTIFICATION received but not executed\n");
471
472 return true;
473}
474
475static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
476 struct iwl_rx_packet *pkt, void *data)
477{
478 struct iwl_mvm *mvm =
479 container_of(notif_wait, struct iwl_mvm, notif_wait);
480 struct iwl_mvm_time_event_data *te_data = data;
481 struct iwl_time_event_resp *resp;
482 int resp_len = iwl_rx_packet_payload_len(pkt);
483
484 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
485 return true;
486
487 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
488 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
489 return true;
490 }
491
492 resp = (void *)pkt->data;
493
494 /* we should never get a response to another TIME_EVENT_CMD here */
495 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
496 return false;
497
498 te_data->uid = le32_to_cpu(resp->unique_id);
499 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
500 te_data->uid);
501 return true;
502}
503
504static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
505 struct ieee80211_vif *vif,
506 struct iwl_mvm_time_event_data *te_data,
507 struct iwl_time_event_cmd *te_cmd)
508{
509 static const u16 time_event_response[] = { TIME_EVENT_CMD };
510 struct iwl_notification_wait wait_time_event;
511 int ret;
512
513 lockdep_assert_held(&mvm->mutex);
514
515 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
516 le32_to_cpu(te_cmd->duration));
517
518 spin_lock_bh(&mvm->time_event_lock);
519 if (WARN_ON(te_data->id != TE_MAX)) {
520 spin_unlock_bh(&mvm->time_event_lock);
521 return -EIO;
522 }
523 te_data->vif = vif;
524 te_data->duration = le32_to_cpu(te_cmd->duration);
525 te_data->id = le32_to_cpu(te_cmd->id);
526 list_add_tail(&te_data->list, &mvm->time_event_list);
527 spin_unlock_bh(&mvm->time_event_lock);
528
529 /*
530 * Use a notification wait, which really just processes the
531 * command response and doesn't wait for anything, in order
532 * to be able to process the response and get the UID inside
533 * the RX path. Using CMD_WANT_SKB doesn't work because it
534 * stores the buffer and then wakes up this thread, by which
535 * time another notification (that the time event started)
536 * might already be processed unsuccessfully.
537 */
538 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
539 time_event_response,
540 ARRAY_SIZE(time_event_response),
541 iwl_mvm_time_event_response, te_data);
542
543 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
544 sizeof(*te_cmd), te_cmd);
545 if (ret) {
546 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
547 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
548 goto out_clear_te;
549 }
550
551 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
552 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
553 /* should never fail */
554 WARN_ON_ONCE(ret);
555
556 if (ret) {
557 out_clear_te:
558 spin_lock_bh(&mvm->time_event_lock);
559 iwl_mvm_te_clear_data(mvm, te_data);
560 spin_unlock_bh(&mvm->time_event_lock);
561 }
562 return ret;
563}
564
565void iwl_mvm_protect_session(struct iwl_mvm *mvm,
566 struct ieee80211_vif *vif,
567 u32 duration, u32 min_duration,
568 u32 max_delay, bool wait_for_notif)
569{
570 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
571 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
572 const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
573 struct iwl_notification_wait wait_te_notif;
574 struct iwl_time_event_cmd time_cmd = {};
575
576 lockdep_assert_held(&mvm->mutex);
577
578 if (te_data->running &&
579 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
580 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
581 jiffies_to_msecs(te_data->end_jiffies - jiffies));
582 return;
583 }
584
585 if (te_data->running) {
586 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
587 te_data->uid,
588 jiffies_to_msecs(te_data->end_jiffies - jiffies));
589 /*
590 * we don't have enough time
591 * cancel the current TE and issue a new one
592 * Of course it would be better to remove the old one only
593 * when the new one is added, but we don't care if we are off
594 * channel for a bit. All we need to do, is not to return
595 * before we actually begin to be on the channel.
596 */
597 iwl_mvm_stop_session_protection(mvm, vif);
598 }
599
600 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
601 time_cmd.id_and_color =
602 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
603 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
604
605 time_cmd.apply_time = cpu_to_le32(0);
606
607 time_cmd.max_frags = TE_V2_FRAG_NONE;
608 time_cmd.max_delay = cpu_to_le32(max_delay);
609 /* TODO: why do we need to interval = bi if it is not periodic? */
610 time_cmd.interval = cpu_to_le32(1);
611 time_cmd.duration = cpu_to_le32(duration);
612 time_cmd.repeat = 1;
613 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
614 TE_V2_NOTIF_HOST_EVENT_END |
615 TE_V2_START_IMMEDIATELY);
616
617 if (!wait_for_notif) {
618 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
619 return;
620 }
621
622 /*
623 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
624 * right after we send the time event
625 */
626 iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
627 te_notif_response,
628 ARRAY_SIZE(te_notif_response),
629 iwl_mvm_te_notif, te_data);
630
631 /* If TE was sent OK - wait for the notification that started */
632 if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
633 IWL_ERR(mvm, "Failed to add TE to protect session\n");
634 iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
635 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
636 TU_TO_JIFFIES(max_delay))) {
637 IWL_ERR(mvm, "Failed to protect session until TE\n");
638 }
639}
640
641static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
642 struct iwl_mvm_time_event_data *te_data,
643 u32 *uid)
644{
645 u32 id;
646
647 /*
648 * It is possible that by the time we got to this point the time
649 * event was already removed.
650 */
651 spin_lock_bh(&mvm->time_event_lock);
652
653 /* Save time event uid before clearing its data */
654 *uid = te_data->uid;
655 id = te_data->id;
656
657 /*
658 * The clear_data function handles time events that were already removed
659 */
660 iwl_mvm_te_clear_data(mvm, te_data);
661 spin_unlock_bh(&mvm->time_event_lock);
662
663 /*
664 * It is possible that by the time we try to remove it, the time event
665 * has already ended and removed. In such a case there is no need to
666 * send a removal command.
667 */
668 if (id == TE_MAX) {
669 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
670 return false;
671 }
672
673 return true;
674}
675
676/*
677 * Explicit request to remove a aux roc time event. The removal of a time
678 * event needs to be synchronized with the flow of a time event's end
679 * notification, which also removes the time event from the op mode
680 * data structures.
681 */
682static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
683 struct iwl_mvm_vif *mvmvif,
684 struct iwl_mvm_time_event_data *te_data)
685{
686 struct iwl_hs20_roc_req aux_cmd = {};
687 u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
688
689 u32 uid;
690 int ret;
691
692 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
693 return;
694
695 aux_cmd.event_unique_id = cpu_to_le32(uid);
696 aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
697 aux_cmd.id_and_color =
698 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
699 IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
700 le32_to_cpu(aux_cmd.event_unique_id));
701 ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
702 len, &aux_cmd);
703
704 if (WARN_ON(ret))
705 return;
706}
707
708/*
709 * Explicit request to remove a time event. The removal of a time event needs to
710 * be synchronized with the flow of a time event's end notification, which also
711 * removes the time event from the op mode data structures.
712 */
713void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
714 struct iwl_mvm_vif *mvmvif,
715 struct iwl_mvm_time_event_data *te_data)
716{
717 struct iwl_time_event_cmd time_cmd = {};
718 u32 uid;
719 int ret;
720
721 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
722 return;
723
724 /* When we remove a TE, the UID is to be set in the id field */
725 time_cmd.id = cpu_to_le32(uid);
726 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
727 time_cmd.id_and_color =
728 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
729
730 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
731 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
732 sizeof(time_cmd), &time_cmd);
733 if (WARN_ON(ret))
734 return;
735}
736
737/*
738 * When the firmware supports the session protection API,
739 * this is not needed since it'll automatically remove the
740 * session protection after association + beacon reception.
741 */
742void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
743 struct ieee80211_vif *vif)
744{
745 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
746 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
747 u32 id;
748
749 lockdep_assert_held(&mvm->mutex);
750
751 spin_lock_bh(&mvm->time_event_lock);
752 id = te_data->id;
753 spin_unlock_bh(&mvm->time_event_lock);
754
755 if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
756 IWL_DEBUG_TE(mvm,
757 "don't remove TE with id=%u (not session protection)\n",
758 id);
759 return;
760 }
761
762 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
763}
764
765void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
766 struct iwl_rx_cmd_buffer *rxb)
767{
768 struct iwl_rx_packet *pkt = rxb_addr(rxb);
769 struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
770 struct ieee80211_vif *vif;
771
772 rcu_read_lock();
773 vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
774 true);
775
776 if (!vif)
777 goto out_unlock;
778
779 /* The vif is not a P2P_DEVICE, maintain its time_event_data */
780 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
781 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
782 struct iwl_mvm_time_event_data *te_data =
783 &mvmvif->time_event_data;
784
785 if (!le32_to_cpu(notif->status)) {
786 iwl_mvm_te_check_disconnect(mvm, vif,
787 "Session protection failure");
788 spin_lock_bh(&mvm->time_event_lock);
789 iwl_mvm_te_clear_data(mvm, te_data);
790 spin_unlock_bh(&mvm->time_event_lock);
791 }
792
793 if (le32_to_cpu(notif->start)) {
794 spin_lock_bh(&mvm->time_event_lock);
795 te_data->running = le32_to_cpu(notif->start);
796 te_data->end_jiffies =
797 TU_TO_EXP_TIME(te_data->duration);
798 spin_unlock_bh(&mvm->time_event_lock);
799 } else {
800 /*
801 * By now, we should have finished association
802 * and know the dtim period.
803 */
804 iwl_mvm_te_check_disconnect(mvm, vif,
805 "No beacon heard and the session protection is over already...");
806 spin_lock_bh(&mvm->time_event_lock);
807 iwl_mvm_te_clear_data(mvm, te_data);
808 spin_unlock_bh(&mvm->time_event_lock);
809 }
810
811 goto out_unlock;
812 }
813
814 if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
815 /* End TE, notify mac80211 */
816 ieee80211_remain_on_channel_expired(mvm->hw);
817 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
818 iwl_mvm_roc_finished(mvm);
819 } else if (le32_to_cpu(notif->start)) {
820 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
821 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
822 }
823
824 out_unlock:
825 rcu_read_unlock();
826}
827
828static int
829iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
830 struct ieee80211_vif *vif,
831 int duration,
832 enum ieee80211_roc_type type)
833{
834 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
835 struct iwl_mvm_session_prot_cmd cmd = {
836 .id_and_color =
837 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
838 mvmvif->color)),
839 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
840 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
841 };
842
843 lockdep_assert_held(&mvm->mutex);
844
845 switch (type) {
846 case IEEE80211_ROC_TYPE_NORMAL:
847 cmd.conf_id =
848 cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV);
849 break;
850 case IEEE80211_ROC_TYPE_MGMT_TX:
851 cmd.conf_id =
852 cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION);
853 break;
854 default:
855 WARN_ONCE(1, "Got an invalid ROC type\n");
856 return -EINVAL;
857 }
858
859 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
860 MAC_CONF_GROUP, 0),
861 0, sizeof(cmd), &cmd);
862}
863
864int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
865 int duration, enum ieee80211_roc_type type)
866{
867 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
868 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
869 struct iwl_time_event_cmd time_cmd = {};
870
871 lockdep_assert_held(&mvm->mutex);
872 if (te_data->running) {
873 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
874 return -EBUSY;
875 }
876
877 if (fw_has_capa(&mvm->fw->ucode_capa,
878 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
879 return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
880 duration,
881 type);
882
883 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
884 time_cmd.id_and_color =
885 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
886
887 switch (type) {
888 case IEEE80211_ROC_TYPE_NORMAL:
889 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
890 break;
891 case IEEE80211_ROC_TYPE_MGMT_TX:
892 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
893 break;
894 default:
895 WARN_ONCE(1, "Got an invalid ROC type\n");
896 return -EINVAL;
897 }
898
899 time_cmd.apply_time = cpu_to_le32(0);
900 time_cmd.interval = cpu_to_le32(1);
901
902 /*
903 * The P2P Device TEs can have lower priority than other events
904 * that are being scheduled by the driver/fw, and thus it might not be
905 * scheduled. To improve the chances of it being scheduled, allow them
906 * to be fragmented, and in addition allow them to be delayed.
907 */
908 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
909 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
910 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
911 time_cmd.repeat = 1;
912 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
913 TE_V2_NOTIF_HOST_EVENT_END |
914 TE_V2_START_IMMEDIATELY);
915
916 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
917}
918
919static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
920{
921 struct iwl_mvm_time_event_data *te_data;
922
923 lockdep_assert_held(&mvm->mutex);
924
925 spin_lock_bh(&mvm->time_event_lock);
926
927 /*
928 * Iterate over the list of time events and find the time event that is
929 * associated with a P2P_DEVICE interface.
930 * This assumes that a P2P_DEVICE interface can have only a single time
931 * event at any given time and this time event coresponds to a ROC
932 * request
933 */
934 list_for_each_entry(te_data, &mvm->time_event_list, list) {
935 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
936 goto out;
937 }
938
939 /* There can only be at most one AUX ROC time event, we just use the
940 * list to simplify/unify code. Remove it if it exists.
941 */
942 te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
943 struct iwl_mvm_time_event_data,
944 list);
945out:
946 spin_unlock_bh(&mvm->time_event_lock);
947 return te_data;
948}
949
950void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
951{
952 struct iwl_mvm_time_event_data *te_data;
953 u32 uid;
954
955 te_data = iwl_mvm_get_roc_te(mvm);
956 if (te_data)
957 __iwl_mvm_remove_time_event(mvm, te_data, &uid);
958}
959
960static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
961 struct iwl_mvm_vif *mvmvif)
962{
963 struct iwl_mvm_session_prot_cmd cmd = {
964 .id_and_color =
965 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
966 mvmvif->color)),
967 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
968 };
969 int ret;
970
971 ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
972 MAC_CONF_GROUP, 0),
973 0, sizeof(cmd), &cmd);
974 if (ret)
975 IWL_ERR(mvm,
976 "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
977}
978
979void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
980{
981 struct iwl_mvm_vif *mvmvif;
982 struct iwl_mvm_time_event_data *te_data;
983
984 if (fw_has_capa(&mvm->fw->ucode_capa,
985 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
986 mvmvif = iwl_mvm_vif_from_mac80211(vif);
987
988 iwl_mvm_cancel_session_protection(mvm, mvmvif);
989
990 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
991 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
992
993 iwl_mvm_roc_finished(mvm);
994
995 return;
996 }
997
998 te_data = iwl_mvm_get_roc_te(mvm);
999 if (!te_data) {
1000 IWL_WARN(mvm, "No remain on channel event\n");
1001 return;
1002 }
1003
1004 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
1005
1006 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1007 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1008 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
1009 } else {
1010 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
1011 }
1012
1013 iwl_mvm_roc_finished(mvm);
1014}
1015
1016int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
1017 struct ieee80211_vif *vif,
1018 u32 duration, u32 apply_time)
1019{
1020 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1021 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1022 struct iwl_time_event_cmd time_cmd = {};
1023
1024 lockdep_assert_held(&mvm->mutex);
1025
1026 if (te_data->running) {
1027 u32 id;
1028
1029 spin_lock_bh(&mvm->time_event_lock);
1030 id = te_data->id;
1031 spin_unlock_bh(&mvm->time_event_lock);
1032
1033 if (id == TE_CHANNEL_SWITCH_PERIOD) {
1034 IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
1035 return -EBUSY;
1036 }
1037
1038 /*
1039 * Remove the session protection time event to allow the
1040 * channel switch. If we got here, we just heard a beacon so
1041 * the session protection is not needed anymore anyway.
1042 */
1043 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1044 }
1045
1046 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1047 time_cmd.id_and_color =
1048 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1049 time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
1050 time_cmd.apply_time = cpu_to_le32(apply_time);
1051 time_cmd.max_frags = TE_V2_FRAG_NONE;
1052 time_cmd.duration = cpu_to_le32(duration);
1053 time_cmd.repeat = 1;
1054 time_cmd.interval = cpu_to_le32(1);
1055 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1056 TE_V2_ABSENCE);
1057 if (!apply_time)
1058 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
1059
1060 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1061}
1062
1063static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
1064 struct iwl_rx_packet *pkt, void *data)
1065{
1066 struct iwl_mvm *mvm =
1067 container_of(notif_wait, struct iwl_mvm, notif_wait);
1068 struct iwl_mvm_session_prot_notif *resp;
1069 int resp_len = iwl_rx_packet_payload_len(pkt);
1070
1071 if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
1072 pkt->hdr.group_id != MAC_CONF_GROUP))
1073 return true;
1074
1075 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
1076 IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
1077 return true;
1078 }
1079
1080 resp = (void *)pkt->data;
1081
1082 if (!resp->status)
1083 IWL_ERR(mvm,
1084 "TIME_EVENT_NOTIFICATION received but not executed\n");
1085
1086 return true;
1087}
1088
1089void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
1090 struct ieee80211_vif *vif,
1091 u32 duration, u32 min_duration,
1092 bool wait_for_notif)
1093{
1094 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1095 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1096 const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
1097 MAC_CONF_GROUP, 0) };
1098 struct iwl_notification_wait wait_notif;
1099 struct iwl_mvm_session_prot_cmd cmd = {
1100 .id_and_color =
1101 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
1102 mvmvif->color)),
1103 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1104 .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
1105 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1106 };
1107
1108 lockdep_assert_held(&mvm->mutex);
1109
1110 spin_lock_bh(&mvm->time_event_lock);
1111 if (te_data->running &&
1112 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
1113 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
1114 jiffies_to_msecs(te_data->end_jiffies - jiffies));
1115 spin_unlock_bh(&mvm->time_event_lock);
1116
1117 return;
1118 }
1119
1120 iwl_mvm_te_clear_data(mvm, te_data);
1121 te_data->duration = le32_to_cpu(cmd.duration_tu);
1122 spin_unlock_bh(&mvm->time_event_lock);
1123
1124 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
1125 le32_to_cpu(cmd.duration_tu));
1126
1127 if (!wait_for_notif) {
1128 if (iwl_mvm_send_cmd_pdu(mvm,
1129 iwl_cmd_id(SESSION_PROTECTION_CMD,
1130 MAC_CONF_GROUP, 0),
1131 0, sizeof(cmd), &cmd)) {
1132 IWL_ERR(mvm,
1133 "Couldn't send the SESSION_PROTECTION_CMD\n");
1134 spin_lock_bh(&mvm->time_event_lock);
1135 iwl_mvm_te_clear_data(mvm, te_data);
1136 spin_unlock_bh(&mvm->time_event_lock);
1137 }
1138
1139 return;
1140 }
1141
1142 iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
1143 notif, ARRAY_SIZE(notif),
1144 iwl_mvm_session_prot_notif, NULL);
1145
1146 if (iwl_mvm_send_cmd_pdu(mvm,
1147 iwl_cmd_id(SESSION_PROTECTION_CMD,
1148 MAC_CONF_GROUP, 0),
1149 0, sizeof(cmd), &cmd)) {
1150 IWL_ERR(mvm,
1151 "Couldn't send the SESSION_PROTECTION_CMD\n");
1152 iwl_remove_notification(&mvm->notif_wait, &wait_notif);
1153 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
1154 TU_TO_JIFFIES(100))) {
1155 IWL_ERR(mvm,
1156 "Failed to protect session until session protection\n");
1157 }
1158}