Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2017 Intel Deutschland GmbH
6 */
7#include <linux/jiffies.h>
8#include <net/mac80211.h>
9
10#include "fw/notif-wait.h"
11#include "iwl-trans.h"
12#include "fw-api.h"
13#include "time-event.h"
14#include "mvm.h"
15#include "iwl-io.h"
16#include "iwl-prph.h"
17
18/*
19 * For the high priority TE use a time event type that has similar priority to
20 * the FW's action scan priority.
21 */
22#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
23#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
24
25void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
26 struct iwl_mvm_time_event_data *te_data)
27{
28 lockdep_assert_held(&mvm->time_event_lock);
29
30 if (!te_data || !te_data->vif)
31 return;
32
33 list_del(&te_data->list);
34
35 /*
36 * the list is only used for AUX ROC events so make sure it is always
37 * initialized
38 */
39 INIT_LIST_HEAD(&te_data->list);
40
41 te_data->running = false;
42 te_data->uid = 0;
43 te_data->id = TE_MAX;
44 te_data->vif = NULL;
45 te_data->link_id = -1;
46}
47
48void iwl_mvm_roc_done_wk(struct work_struct *wk)
49{
50 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
51
52 /*
53 * Clear the ROC_RUNNING status bit.
54 * This will cause the TX path to drop offchannel transmissions.
55 * That would also be done by mac80211, but it is racy, in particular
56 * in the case that the time event actually completed in the firmware
57 * (which is handled in iwl_mvm_te_handle_notif).
58 */
59 clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
60
61 synchronize_net();
62
63 /*
64 * Flush the offchannel queue -- this is called when the time
65 * event finishes or is canceled, so that frames queued for it
66 * won't get stuck on the queue and be transmitted in the next
67 * time event.
68 */
69
70 mutex_lock(&mvm->mutex);
71 if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
72 struct iwl_mvm_vif *mvmvif;
73
74 /*
75 * NB: access to this pointer would be racy, but the flush bit
76 * can only be set when we had a P2P-Device VIF, and we have a
77 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
78 * not really racy.
79 */
80
81 if (!WARN_ON(!mvm->p2p_device_vif)) {
82 struct ieee80211_vif *vif = mvm->p2p_device_vif;
83
84 mvmvif = iwl_mvm_vif_from_mac80211(vif);
85 iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
86 mvmvif->deflink.bcast_sta.tfd_queue_msk);
87
88 if (mvm->mld_api_is_used) {
89 iwl_mvm_mld_rm_bcast_sta(mvm, vif,
90 &vif->bss_conf);
91
92 iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
93 LINK_CONTEXT_MODIFY_ACTIVE,
94 false);
95 } else {
96 iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
97 iwl_mvm_binding_remove_vif(mvm, vif);
98 }
99
100 /* Do not remove the PHY context as removing and adding
101 * a PHY context has timing overheads. Leaving it
102 * configured in FW would be useful in case the next ROC
103 * is with the same channel.
104 */
105 }
106 }
107
108 /*
109 * Clear the ROC_AUX_RUNNING status bit.
110 * This will cause the TX path to drop offchannel transmissions.
111 * That would also be done by mac80211, but it is racy, in particular
112 * in the case that the time event actually completed in the firmware
113 * (which is handled in iwl_mvm_te_handle_notif).
114 */
115 if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
116 /* do the same in case of hot spot 2.0 */
117 iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
118 mvm->aux_sta.tfd_queue_msk);
119
120 if (mvm->mld_api_is_used) {
121 iwl_mvm_mld_rm_aux_sta(mvm);
122 goto out_unlock;
123 }
124
125 /* In newer version of this command an aux station is added only
126 * in cases of dedicated tx queue and need to be removed in end
127 * of use */
128 if (iwl_mvm_has_new_station_api(mvm->fw))
129 iwl_mvm_rm_aux_sta(mvm);
130 }
131
132out_unlock:
133 mutex_unlock(&mvm->mutex);
134}
135
136static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
137{
138 /*
139 * Of course, our status bit is just as racy as mac80211, so in
140 * addition, fire off the work struct which will drop all frames
141 * from the hardware queues that made it through the race. First
142 * it will of course synchronize the TX path to make sure that
143 * any *new* TX will be rejected.
144 */
145 schedule_work(&mvm->roc_done_wk);
146}
147
148static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
149{
150 struct ieee80211_vif *csa_vif;
151
152 rcu_read_lock();
153
154 csa_vif = rcu_dereference(mvm->csa_vif);
155 if (!csa_vif || !csa_vif->bss_conf.csa_active)
156 goto out_unlock;
157
158 IWL_DEBUG_TE(mvm, "CSA NOA started\n");
159
160 /*
161 * CSA NoA is started but we still have beacons to
162 * transmit on the current channel.
163 * So we just do nothing here and the switch
164 * will be performed on the last TBTT.
165 */
166 if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
167 IWL_WARN(mvm, "CSA NOA started too early\n");
168 goto out_unlock;
169 }
170
171 ieee80211_csa_finish(csa_vif);
172
173 rcu_read_unlock();
174
175 RCU_INIT_POINTER(mvm->csa_vif, NULL);
176
177 return;
178
179out_unlock:
180 rcu_read_unlock();
181}
182
183static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
184 struct ieee80211_vif *vif,
185 const char *errmsg)
186{
187 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
188
189 if (vif->type != NL80211_IFTYPE_STATION)
190 return false;
191
192 if (!mvmvif->csa_bcn_pending && vif->cfg.assoc &&
193 vif->bss_conf.dtim_period)
194 return false;
195 if (errmsg)
196 IWL_ERR(mvm, "%s\n", errmsg);
197
198 if (mvmvif->csa_bcn_pending) {
199 struct iwl_mvm_sta *mvmsta;
200
201 rcu_read_lock();
202 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm,
203 mvmvif->deflink.ap_sta_id);
204 if (!WARN_ON(!mvmsta))
205 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
206 rcu_read_unlock();
207 }
208
209 if (vif->cfg.assoc) {
210 /*
211 * When not associated, this will be called from
212 * iwl_mvm_event_mlme_callback_ini()
213 */
214 iwl_dbg_tlv_time_point(&mvm->fwrt,
215 IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
216 NULL);
217 }
218
219 iwl_mvm_connection_loss(mvm, vif, errmsg);
220 return true;
221}
222
223static void
224iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
225 struct iwl_mvm_time_event_data *te_data,
226 struct iwl_time_event_notif *notif)
227{
228 struct ieee80211_vif *vif = te_data->vif;
229 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
230
231 if (!notif->status)
232 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
233
234 switch (te_data->vif->type) {
235 case NL80211_IFTYPE_AP:
236 if (!notif->status)
237 mvmvif->csa_failed = true;
238 iwl_mvm_csa_noa_start(mvm);
239 break;
240 case NL80211_IFTYPE_STATION:
241 if (!notif->status) {
242 iwl_mvm_connection_loss(mvm, vif,
243 "CSA TE failed to start");
244 break;
245 }
246 iwl_mvm_csa_client_absent(mvm, te_data->vif);
247 cancel_delayed_work(&mvmvif->csa_work);
248 ieee80211_chswitch_done(te_data->vif, true, 0);
249 break;
250 default:
251 /* should never happen */
252 WARN_ON_ONCE(1);
253 break;
254 }
255
256 /* we don't need it anymore */
257 iwl_mvm_te_clear_data(mvm, te_data);
258}
259
260static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
261 struct iwl_time_event_notif *notif,
262 struct iwl_mvm_time_event_data *te_data)
263{
264 struct iwl_fw_dbg_trigger_tlv *trig;
265 struct iwl_fw_dbg_trigger_time_event *te_trig;
266 int i;
267
268 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
269 ieee80211_vif_to_wdev(te_data->vif),
270 FW_DBG_TRIGGER_TIME_EVENT);
271 if (!trig)
272 return;
273
274 te_trig = (void *)trig->data;
275
276 for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
277 u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
278 u32 trig_action_bitmap =
279 le32_to_cpu(te_trig->time_events[i].action_bitmap);
280 u32 trig_status_bitmap =
281 le32_to_cpu(te_trig->time_events[i].status_bitmap);
282
283 if (trig_te_id != te_data->id ||
284 !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
285 !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
286 continue;
287
288 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
289 "Time event %d Action 0x%x received status: %d",
290 te_data->id,
291 le32_to_cpu(notif->action),
292 le32_to_cpu(notif->status));
293 break;
294 }
295}
296
297static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
298{
299 /*
300 * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
301 * roc_done_wk is already scheduled or running, so don't schedule it
302 * again to avoid a race where the roc_done_wk clears this bit after
303 * it is set here, affecting the next run of the roc_done_wk.
304 */
305 if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
306 iwl_mvm_roc_finished(mvm);
307}
308
309/*
310 * Handles a FW notification for an event that is known to the driver.
311 *
312 * @mvm: the mvm component
313 * @te_data: the time event data
314 * @notif: the notification data corresponding the time event data.
315 */
316static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
317 struct iwl_mvm_time_event_data *te_data,
318 struct iwl_time_event_notif *notif)
319{
320 lockdep_assert_held(&mvm->time_event_lock);
321
322 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
323 le32_to_cpu(notif->unique_id),
324 le32_to_cpu(notif->action));
325
326 iwl_mvm_te_check_trigger(mvm, notif, te_data);
327
328 /*
329 * The FW sends the start/end time event notifications even for events
330 * that it fails to schedule. This is indicated in the status field of
331 * the notification. This happens in cases that the scheduler cannot
332 * find a schedule that can handle the event (for example requesting a
333 * P2P Device discoveribility, while there are other higher priority
334 * events in the system).
335 */
336 if (!le32_to_cpu(notif->status)) {
337 const char *msg;
338
339 if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
340 msg = "Time Event start notification failure";
341 else
342 msg = "Time Event end notification failure";
343
344 IWL_DEBUG_TE(mvm, "%s\n", msg);
345
346 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
347 iwl_mvm_te_clear_data(mvm, te_data);
348 return;
349 }
350 }
351
352 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
353 IWL_DEBUG_TE(mvm,
354 "TE ended - current time %lu, estimated end %lu\n",
355 jiffies, te_data->end_jiffies);
356
357 switch (te_data->vif->type) {
358 case NL80211_IFTYPE_P2P_DEVICE:
359 ieee80211_remain_on_channel_expired(mvm->hw);
360 iwl_mvm_p2p_roc_finished(mvm);
361 break;
362 case NL80211_IFTYPE_STATION:
363 /*
364 * If we are switching channel, don't disconnect
365 * if the time event is already done. Beacons can
366 * be delayed a bit after the switch.
367 */
368 if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
369 IWL_DEBUG_TE(mvm,
370 "No beacon heard and the CS time event is over, don't disconnect\n");
371 break;
372 }
373
374 /*
375 * By now, we should have finished association
376 * and know the dtim period.
377 */
378 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
379 !te_data->vif->cfg.assoc ?
380 "Not associated and the time event is over already..." :
381 "No beacon heard and the time event is over already...");
382 break;
383 default:
384 break;
385 }
386
387 iwl_mvm_te_clear_data(mvm, te_data);
388 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
389 te_data->running = true;
390 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
391
392 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
393 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
394 ieee80211_ready_on_channel(mvm->hw);
395 } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
396 iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
397 }
398 } else {
399 IWL_WARN(mvm, "Got TE with unknown action\n");
400 }
401}
402
403void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
404 struct iwl_rx_cmd_buffer *rxb)
405{
406 struct iwl_rx_packet *pkt = rxb_addr(rxb);
407 struct iwl_roc_notif *notif = (void *)pkt->data;
408
409 if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) &&
410 le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) {
411 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
412 ieee80211_ready_on_channel(mvm->hw);
413 } else {
414 iwl_mvm_roc_finished(mvm);
415 ieee80211_remain_on_channel_expired(mvm->hw);
416 }
417}
418
419/*
420 * Handle A Aux ROC time event
421 */
422static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
423 struct iwl_time_event_notif *notif)
424{
425 struct iwl_mvm_time_event_data *aux_roc_te = NULL, *te_data;
426
427 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
428 if (le32_to_cpu(notif->unique_id) == te_data->uid) {
429 aux_roc_te = te_data;
430 break;
431 }
432 }
433 if (!aux_roc_te) /* Not a Aux ROC time event */
434 return -EINVAL;
435
436 iwl_mvm_te_check_trigger(mvm, notif, te_data);
437
438 IWL_DEBUG_TE(mvm,
439 "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
440 le32_to_cpu(notif->unique_id),
441 le32_to_cpu(notif->action), le32_to_cpu(notif->status));
442
443 if (!le32_to_cpu(notif->status) ||
444 le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
445 /* End TE, notify mac80211 */
446 ieee80211_remain_on_channel_expired(mvm->hw);
447 iwl_mvm_roc_finished(mvm); /* flush aux queue */
448 list_del(&te_data->list); /* remove from list */
449 te_data->running = false;
450 te_data->vif = NULL;
451 te_data->uid = 0;
452 te_data->id = TE_MAX;
453 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
454 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
455 te_data->running = true;
456 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
457 } else {
458 IWL_DEBUG_TE(mvm,
459 "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
460 le32_to_cpu(notif->action));
461 return -EINVAL;
462 }
463
464 return 0;
465}
466
467/*
468 * The Rx handler for time event notifications
469 */
470void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
471 struct iwl_rx_cmd_buffer *rxb)
472{
473 struct iwl_rx_packet *pkt = rxb_addr(rxb);
474 struct iwl_time_event_notif *notif = (void *)pkt->data;
475 struct iwl_mvm_time_event_data *te_data, *tmp;
476
477 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
478 le32_to_cpu(notif->unique_id),
479 le32_to_cpu(notif->action));
480
481 spin_lock_bh(&mvm->time_event_lock);
482 /* This time event is triggered for Aux ROC request */
483 if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
484 goto unlock;
485
486 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
487 if (le32_to_cpu(notif->unique_id) == te_data->uid)
488 iwl_mvm_te_handle_notif(mvm, te_data, notif);
489 }
490unlock:
491 spin_unlock_bh(&mvm->time_event_lock);
492}
493
494static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
495 struct iwl_rx_packet *pkt, void *data)
496{
497 struct iwl_mvm *mvm =
498 container_of(notif_wait, struct iwl_mvm, notif_wait);
499 struct iwl_mvm_time_event_data *te_data = data;
500 struct iwl_time_event_notif *resp;
501 int resp_len = iwl_rx_packet_payload_len(pkt);
502
503 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
504 return true;
505
506 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
507 IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
508 return true;
509 }
510
511 resp = (void *)pkt->data;
512
513 /* te_data->uid is already set in the TIME_EVENT_CMD response */
514 if (le32_to_cpu(resp->unique_id) != te_data->uid)
515 return false;
516
517 IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
518 te_data->uid);
519 if (!resp->status)
520 IWL_ERR(mvm,
521 "TIME_EVENT_NOTIFICATION received but not executed\n");
522
523 return true;
524}
525
526static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
527 struct iwl_rx_packet *pkt, void *data)
528{
529 struct iwl_mvm *mvm =
530 container_of(notif_wait, struct iwl_mvm, notif_wait);
531 struct iwl_mvm_time_event_data *te_data = data;
532 struct iwl_time_event_resp *resp;
533 int resp_len = iwl_rx_packet_payload_len(pkt);
534
535 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
536 return true;
537
538 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
539 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
540 return true;
541 }
542
543 resp = (void *)pkt->data;
544
545 /* we should never get a response to another TIME_EVENT_CMD here */
546 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
547 return false;
548
549 te_data->uid = le32_to_cpu(resp->unique_id);
550 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
551 te_data->uid);
552 return true;
553}
554
555static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
556 struct ieee80211_vif *vif,
557 struct iwl_mvm_time_event_data *te_data,
558 struct iwl_time_event_cmd *te_cmd)
559{
560 static const u16 time_event_response[] = { TIME_EVENT_CMD };
561 struct iwl_notification_wait wait_time_event;
562 int ret;
563
564 lockdep_assert_held(&mvm->mutex);
565
566 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
567 le32_to_cpu(te_cmd->duration));
568
569 spin_lock_bh(&mvm->time_event_lock);
570 if (WARN_ON(te_data->id != TE_MAX)) {
571 spin_unlock_bh(&mvm->time_event_lock);
572 return -EIO;
573 }
574 te_data->vif = vif;
575 te_data->duration = le32_to_cpu(te_cmd->duration);
576 te_data->id = le32_to_cpu(te_cmd->id);
577 list_add_tail(&te_data->list, &mvm->time_event_list);
578 spin_unlock_bh(&mvm->time_event_lock);
579
580 /*
581 * Use a notification wait, which really just processes the
582 * command response and doesn't wait for anything, in order
583 * to be able to process the response and get the UID inside
584 * the RX path. Using CMD_WANT_SKB doesn't work because it
585 * stores the buffer and then wakes up this thread, by which
586 * time another notification (that the time event started)
587 * might already be processed unsuccessfully.
588 */
589 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
590 time_event_response,
591 ARRAY_SIZE(time_event_response),
592 iwl_mvm_time_event_response, te_data);
593
594 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
595 sizeof(*te_cmd), te_cmd);
596 if (ret) {
597 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
598 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
599 goto out_clear_te;
600 }
601
602 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
603 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
604 /* should never fail */
605 WARN_ON_ONCE(ret);
606
607 if (ret) {
608 out_clear_te:
609 spin_lock_bh(&mvm->time_event_lock);
610 iwl_mvm_te_clear_data(mvm, te_data);
611 spin_unlock_bh(&mvm->time_event_lock);
612 }
613 return ret;
614}
615
616void iwl_mvm_protect_session(struct iwl_mvm *mvm,
617 struct ieee80211_vif *vif,
618 u32 duration, u32 min_duration,
619 u32 max_delay, bool wait_for_notif)
620{
621 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
622 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
623 const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
624 struct iwl_notification_wait wait_te_notif;
625 struct iwl_time_event_cmd time_cmd = {};
626
627 lockdep_assert_held(&mvm->mutex);
628
629 if (te_data->running &&
630 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
631 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
632 jiffies_to_msecs(te_data->end_jiffies - jiffies));
633 return;
634 }
635
636 if (te_data->running) {
637 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
638 te_data->uid,
639 jiffies_to_msecs(te_data->end_jiffies - jiffies));
640 /*
641 * we don't have enough time
642 * cancel the current TE and issue a new one
643 * Of course it would be better to remove the old one only
644 * when the new one is added, but we don't care if we are off
645 * channel for a bit. All we need to do, is not to return
646 * before we actually begin to be on the channel.
647 */
648 iwl_mvm_stop_session_protection(mvm, vif);
649 }
650
651 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
652 time_cmd.id_and_color =
653 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
654 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
655
656 time_cmd.apply_time = cpu_to_le32(0);
657
658 time_cmd.max_frags = TE_V2_FRAG_NONE;
659 time_cmd.max_delay = cpu_to_le32(max_delay);
660 /* TODO: why do we need to interval = bi if it is not periodic? */
661 time_cmd.interval = cpu_to_le32(1);
662 time_cmd.duration = cpu_to_le32(duration);
663 time_cmd.repeat = 1;
664 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
665 TE_V2_NOTIF_HOST_EVENT_END |
666 TE_V2_START_IMMEDIATELY);
667
668 if (!wait_for_notif) {
669 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
670 return;
671 }
672
673 /*
674 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
675 * right after we send the time event
676 */
677 iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
678 te_notif_response,
679 ARRAY_SIZE(te_notif_response),
680 iwl_mvm_te_notif, te_data);
681
682 /* If TE was sent OK - wait for the notification that started */
683 if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
684 IWL_ERR(mvm, "Failed to add TE to protect session\n");
685 iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
686 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
687 TU_TO_JIFFIES(max_delay))) {
688 IWL_ERR(mvm, "Failed to protect session until TE\n");
689 }
690}
691
692/* Determine whether mac or link id should be used, and validate the link id */
693static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
694 struct ieee80211_vif *vif,
695 u32 link_id)
696{
697 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
698 int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
699 WIDE_ID(MAC_CONF_GROUP,
700 SESSION_PROTECTION_CMD), 1);
701
702 if (ver < 2)
703 return mvmvif->id;
704
705 if (WARN(link_id < 0 || !mvmvif->link[link_id],
706 "Invalid link ID for session protection: %u\n", link_id))
707 return -EINVAL;
708
709 if (WARN(ieee80211_vif_is_mld(vif) &&
710 !(vif->active_links & BIT(link_id)),
711 "Session Protection on an inactive link: %u\n", link_id))
712 return -EINVAL;
713
714 return mvmvif->link[link_id]->fw_link_id;
715}
716
717static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
718 struct ieee80211_vif *vif,
719 u32 id, u32 link_id)
720{
721 int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
722 struct iwl_mvm_session_prot_cmd cmd = {
723 .id_and_color = cpu_to_le32(mac_link_id),
724 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
725 .conf_id = cpu_to_le32(id),
726 };
727 int ret;
728
729 if (mac_link_id < 0)
730 return;
731
732 ret = iwl_mvm_send_cmd_pdu(mvm,
733 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
734 0, sizeof(cmd), &cmd);
735 if (ret)
736 IWL_ERR(mvm,
737 "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
738}
739
740static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
741 struct iwl_mvm_time_event_data *te_data,
742 u32 *uid)
743{
744 u32 id;
745 struct ieee80211_vif *vif = te_data->vif;
746 struct iwl_mvm_vif *mvmvif;
747 enum nl80211_iftype iftype;
748 unsigned int link_id;
749
750 if (!vif)
751 return false;
752
753 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
754 iftype = te_data->vif->type;
755
756 /*
757 * It is possible that by the time we got to this point the time
758 * event was already removed.
759 */
760 spin_lock_bh(&mvm->time_event_lock);
761
762 /* Save time event uid before clearing its data */
763 *uid = te_data->uid;
764 id = te_data->id;
765 link_id = te_data->link_id;
766
767 /*
768 * The clear_data function handles time events that were already removed
769 */
770 iwl_mvm_te_clear_data(mvm, te_data);
771 spin_unlock_bh(&mvm->time_event_lock);
772
773 /* When session protection is used, the te_data->id field
774 * is reused to save session protection's configuration.
775 * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
776 * to HOT_SPOT_CMD.
777 */
778 if (fw_has_capa(&mvm->fw->ucode_capa,
779 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
780 id != HOT_SPOT_CMD) {
781 if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
782 /* Session protection is still ongoing. Cancel it */
783 iwl_mvm_cancel_session_protection(mvm, vif, id,
784 link_id);
785 if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
786 iwl_mvm_p2p_roc_finished(mvm);
787 }
788 }
789 return false;
790 } else {
791 /* It is possible that by the time we try to remove it, the
792 * time event has already ended and removed. In such a case
793 * there is no need to send a removal command.
794 */
795 if (id == TE_MAX) {
796 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
797 return false;
798 }
799 }
800
801 return true;
802}
803
804/*
805 * Explicit request to remove a aux roc time event. The removal of a time
806 * event needs to be synchronized with the flow of a time event's end
807 * notification, which also removes the time event from the op mode
808 * data structures.
809 */
810static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
811 struct iwl_mvm_vif *mvmvif,
812 struct iwl_mvm_time_event_data *te_data)
813{
814 struct iwl_hs20_roc_req aux_cmd = {};
815 u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
816
817 u32 uid;
818 int ret;
819
820 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
821 return;
822
823 aux_cmd.event_unique_id = cpu_to_le32(uid);
824 aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
825 aux_cmd.id_and_color =
826 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
827 IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
828 le32_to_cpu(aux_cmd.event_unique_id));
829 ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
830 len, &aux_cmd);
831
832 if (WARN_ON(ret))
833 return;
834}
835
836/*
837 * Explicit request to remove a time event. The removal of a time event needs to
838 * be synchronized with the flow of a time event's end notification, which also
839 * removes the time event from the op mode data structures.
840 */
841void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
842 struct iwl_mvm_vif *mvmvif,
843 struct iwl_mvm_time_event_data *te_data)
844{
845 struct iwl_time_event_cmd time_cmd = {};
846 u32 uid;
847 int ret;
848
849 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
850 return;
851
852 /* When we remove a TE, the UID is to be set in the id field */
853 time_cmd.id = cpu_to_le32(uid);
854 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
855 time_cmd.id_and_color =
856 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
857
858 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
859 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
860 sizeof(time_cmd), &time_cmd);
861 if (ret)
862 IWL_ERR(mvm, "Couldn't remove the time event\n");
863}
864
865void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
866 struct ieee80211_vif *vif)
867{
868 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
869 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
870 u32 id;
871
872 lockdep_assert_held(&mvm->mutex);
873
874 spin_lock_bh(&mvm->time_event_lock);
875 id = te_data->id;
876 spin_unlock_bh(&mvm->time_event_lock);
877
878 if (fw_has_capa(&mvm->fw->ucode_capa,
879 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
880 if (id != SESSION_PROTECT_CONF_ASSOC) {
881 IWL_DEBUG_TE(mvm,
882 "don't remove session protection id=%u\n",
883 id);
884 return;
885 }
886 } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
887 IWL_DEBUG_TE(mvm,
888 "don't remove TE with id=%u (not session protection)\n",
889 id);
890 return;
891 }
892
893 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
894}
895
896void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
897 struct iwl_rx_cmd_buffer *rxb)
898{
899 struct iwl_rx_packet *pkt = rxb_addr(rxb);
900 struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
901 unsigned int ver =
902 iwl_fw_lookup_cmd_ver(mvm->fw,
903 WIDE_ID(MAC_CONF_GROUP,
904 SESSION_PROTECTION_CMD), 2);
905 int id = le32_to_cpu(notif->mac_link_id);
906 struct ieee80211_vif *vif;
907 struct iwl_mvm_vif *mvmvif;
908 unsigned int notif_link_id;
909
910 rcu_read_lock();
911
912 if (ver <= 2) {
913 vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
914 } else {
915 struct ieee80211_bss_conf *link_conf =
916 iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true);
917
918 if (!link_conf)
919 goto out_unlock;
920
921 notif_link_id = link_conf->link_id;
922 vif = link_conf->vif;
923 }
924
925 if (!vif)
926 goto out_unlock;
927
928 mvmvif = iwl_mvm_vif_from_mac80211(vif);
929
930 if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
931 mvmvif->time_event_data.link_id != notif_link_id,
932 "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
933 notif_link_id, mvmvif->time_event_data.link_id))
934 goto out_unlock;
935
936 /* The vif is not a P2P_DEVICE, maintain its time_event_data */
937 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
938 struct iwl_mvm_time_event_data *te_data =
939 &mvmvif->time_event_data;
940
941 if (!le32_to_cpu(notif->status)) {
942 iwl_mvm_te_check_disconnect(mvm, vif,
943 "Session protection failure");
944 spin_lock_bh(&mvm->time_event_lock);
945 iwl_mvm_te_clear_data(mvm, te_data);
946 spin_unlock_bh(&mvm->time_event_lock);
947 }
948
949 if (le32_to_cpu(notif->start)) {
950 spin_lock_bh(&mvm->time_event_lock);
951 te_data->running = le32_to_cpu(notif->start);
952 te_data->end_jiffies =
953 TU_TO_EXP_TIME(te_data->duration);
954 spin_unlock_bh(&mvm->time_event_lock);
955 } else {
956 /*
957 * By now, we should have finished association
958 * and know the dtim period.
959 */
960 iwl_mvm_te_check_disconnect(mvm, vif,
961 !vif->cfg.assoc ?
962 "Not associated and the session protection is over already..." :
963 "No beacon heard and the session protection is over already...");
964 spin_lock_bh(&mvm->time_event_lock);
965 iwl_mvm_te_clear_data(mvm, te_data);
966 spin_unlock_bh(&mvm->time_event_lock);
967 }
968
969 goto out_unlock;
970 }
971
972 if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
973 /* End TE, notify mac80211 */
974 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
975 mvmvif->time_event_data.link_id = -1;
976 iwl_mvm_p2p_roc_finished(mvm);
977 ieee80211_remain_on_channel_expired(mvm->hw);
978 } else if (le32_to_cpu(notif->start)) {
979 if (WARN_ON(mvmvif->time_event_data.id !=
980 le32_to_cpu(notif->conf_id)))
981 goto out_unlock;
982 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
983 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
984 }
985
986 out_unlock:
987 rcu_read_unlock();
988}
989
990static int
991iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
992 struct ieee80211_vif *vif,
993 int duration,
994 enum ieee80211_roc_type type)
995{
996 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
997 struct iwl_mvm_session_prot_cmd cmd = {
998 .id_and_color =
999 cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
1000 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1001 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1002 };
1003
1004 lockdep_assert_held(&mvm->mutex);
1005
1006 /* The time_event_data.id field is reused to save session
1007 * protection's configuration.
1008 */
1009
1010 mvmvif->time_event_data.link_id = 0;
1011
1012 switch (type) {
1013 case IEEE80211_ROC_TYPE_NORMAL:
1014 mvmvif->time_event_data.id =
1015 SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
1016 break;
1017 case IEEE80211_ROC_TYPE_MGMT_TX:
1018 mvmvif->time_event_data.id =
1019 SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
1020 break;
1021 default:
1022 WARN_ONCE(1, "Got an invalid ROC type\n");
1023 return -EINVAL;
1024 }
1025
1026 cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
1027 return iwl_mvm_send_cmd_pdu(mvm,
1028 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1029 0, sizeof(cmd), &cmd);
1030}
1031
1032int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1033 int duration, enum ieee80211_roc_type type)
1034{
1035 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1036 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1037 struct iwl_time_event_cmd time_cmd = {};
1038
1039 lockdep_assert_held(&mvm->mutex);
1040 if (te_data->running) {
1041 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
1042 return -EBUSY;
1043 }
1044
1045 if (fw_has_capa(&mvm->fw->ucode_capa,
1046 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
1047 return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
1048 duration,
1049 type);
1050
1051 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1052 time_cmd.id_and_color =
1053 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1054
1055 switch (type) {
1056 case IEEE80211_ROC_TYPE_NORMAL:
1057 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
1058 break;
1059 case IEEE80211_ROC_TYPE_MGMT_TX:
1060 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
1061 break;
1062 default:
1063 WARN_ONCE(1, "Got an invalid ROC type\n");
1064 return -EINVAL;
1065 }
1066
1067 time_cmd.apply_time = cpu_to_le32(0);
1068 time_cmd.interval = cpu_to_le32(1);
1069
1070 /*
1071 * The P2P Device TEs can have lower priority than other events
1072 * that are being scheduled by the driver/fw, and thus it might not be
1073 * scheduled. To improve the chances of it being scheduled, allow them
1074 * to be fragmented, and in addition allow them to be delayed.
1075 */
1076 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
1077 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
1078 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
1079 time_cmd.repeat = 1;
1080 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1081 TE_V2_NOTIF_HOST_EVENT_END |
1082 TE_V2_START_IMMEDIATELY);
1083
1084 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1085}
1086
1087static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
1088{
1089 struct iwl_mvm_time_event_data *te_data;
1090
1091 lockdep_assert_held(&mvm->mutex);
1092
1093 spin_lock_bh(&mvm->time_event_lock);
1094
1095 /*
1096 * Iterate over the list of time events and find the time event that is
1097 * associated with a P2P_DEVICE interface.
1098 * This assumes that a P2P_DEVICE interface can have only a single time
1099 * event at any given time and this time event coresponds to a ROC
1100 * request
1101 */
1102 list_for_each_entry(te_data, &mvm->time_event_list, list) {
1103 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
1104 goto out;
1105 }
1106
1107 /* There can only be at most one AUX ROC time event, we just use the
1108 * list to simplify/unify code. Remove it if it exists.
1109 */
1110 te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
1111 struct iwl_mvm_time_event_data,
1112 list);
1113out:
1114 spin_unlock_bh(&mvm->time_event_lock);
1115 return te_data;
1116}
1117
1118void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
1119{
1120 struct iwl_mvm_time_event_data *te_data;
1121 u32 uid;
1122
1123 te_data = iwl_mvm_get_roc_te(mvm);
1124 if (te_data)
1125 __iwl_mvm_remove_time_event(mvm, te_data, &uid);
1126}
1127
1128static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
1129{
1130 int ret;
1131 struct iwl_roc_req roc_cmd = {
1132 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
1133 .activity = cpu_to_le32(activity),
1134 };
1135
1136 lockdep_assert_held(&mvm->mutex);
1137 ret = iwl_mvm_send_cmd_pdu(mvm,
1138 WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
1139 0, sizeof(roc_cmd), &roc_cmd);
1140 WARN_ON(ret);
1141}
1142
1143static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm,
1144 struct iwl_mvm_vif *mvmvif)
1145{
1146 u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
1147 u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
1148 IWL_FW_CMD_VER_UNKNOWN);
1149
1150 if (fw_ver == IWL_FW_CMD_VER_UNKNOWN)
1151 iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
1152 &mvmvif->hs_time_event_data);
1153 else if (fw_ver == 3)
1154 iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT);
1155 else
1156 IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
1157}
1158
1159void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1160{
1161 struct iwl_mvm_vif *mvmvif;
1162 struct iwl_mvm_time_event_data *te_data;
1163
1164 if (fw_has_capa(&mvm->fw->ucode_capa,
1165 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
1166 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1167
1168 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1169 iwl_mvm_cancel_session_protection(mvm, vif,
1170 mvmvif->time_event_data.id,
1171 mvmvif->time_event_data.link_id);
1172 iwl_mvm_p2p_roc_finished(mvm);
1173 } else {
1174 iwl_mvm_roc_station_remove(mvm, mvmvif);
1175 iwl_mvm_roc_finished(mvm);
1176 }
1177
1178 return;
1179 }
1180
1181 te_data = iwl_mvm_get_roc_te(mvm);
1182 if (!te_data) {
1183 IWL_WARN(mvm, "No remain on channel event\n");
1184 return;
1185 }
1186
1187 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
1188
1189 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1190 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1191 iwl_mvm_p2p_roc_finished(mvm);
1192 } else {
1193 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
1194 iwl_mvm_roc_finished(mvm);
1195 }
1196}
1197
1198void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
1199 struct ieee80211_vif *vif)
1200{
1201 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1202 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1203 u32 id;
1204
1205 lockdep_assert_held(&mvm->mutex);
1206
1207 spin_lock_bh(&mvm->time_event_lock);
1208 id = te_data->id;
1209 spin_unlock_bh(&mvm->time_event_lock);
1210
1211 if (id != TE_CHANNEL_SWITCH_PERIOD)
1212 return;
1213
1214 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1215}
1216
1217int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
1218 struct ieee80211_vif *vif,
1219 u32 duration, u32 apply_time)
1220{
1221 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1222 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1223 struct iwl_time_event_cmd time_cmd = {};
1224
1225 lockdep_assert_held(&mvm->mutex);
1226
1227 if (te_data->running) {
1228 u32 id;
1229
1230 spin_lock_bh(&mvm->time_event_lock);
1231 id = te_data->id;
1232 spin_unlock_bh(&mvm->time_event_lock);
1233
1234 if (id == TE_CHANNEL_SWITCH_PERIOD) {
1235 IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
1236 return -EBUSY;
1237 }
1238
1239 /*
1240 * Remove the session protection time event to allow the
1241 * channel switch. If we got here, we just heard a beacon so
1242 * the session protection is not needed anymore anyway.
1243 */
1244 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1245 }
1246
1247 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1248 time_cmd.id_and_color =
1249 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1250 time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
1251 time_cmd.apply_time = cpu_to_le32(apply_time);
1252 time_cmd.max_frags = TE_V2_FRAG_NONE;
1253 time_cmd.duration = cpu_to_le32(duration);
1254 time_cmd.repeat = 1;
1255 time_cmd.interval = cpu_to_le32(1);
1256 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1257 TE_V2_ABSENCE);
1258 if (!apply_time)
1259 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
1260
1261 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1262}
1263
1264static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
1265 struct iwl_rx_packet *pkt, void *data)
1266{
1267 struct iwl_mvm *mvm =
1268 container_of(notif_wait, struct iwl_mvm, notif_wait);
1269 struct iwl_mvm_session_prot_notif *resp;
1270 int resp_len = iwl_rx_packet_payload_len(pkt);
1271
1272 if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
1273 pkt->hdr.group_id != MAC_CONF_GROUP))
1274 return true;
1275
1276 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
1277 IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
1278 return true;
1279 }
1280
1281 resp = (void *)pkt->data;
1282
1283 if (!resp->status)
1284 IWL_ERR(mvm,
1285 "TIME_EVENT_NOTIFICATION received but not executed\n");
1286
1287 return true;
1288}
1289
1290void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
1291 struct ieee80211_vif *vif,
1292 u32 duration, u32 min_duration,
1293 bool wait_for_notif,
1294 unsigned int link_id)
1295{
1296 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1297 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1298 const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
1299 struct iwl_notification_wait wait_notif;
1300 int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
1301 struct iwl_mvm_session_prot_cmd cmd = {
1302 .id_and_color = cpu_to_le32(mac_link_id),
1303 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1304 .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
1305 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1306 };
1307
1308 if (mac_link_id < 0)
1309 return;
1310
1311 lockdep_assert_held(&mvm->mutex);
1312
1313 spin_lock_bh(&mvm->time_event_lock);
1314 if (te_data->running && te_data->link_id == link_id &&
1315 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
1316 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
1317 jiffies_to_msecs(te_data->end_jiffies - jiffies));
1318 spin_unlock_bh(&mvm->time_event_lock);
1319
1320 return;
1321 }
1322
1323 iwl_mvm_te_clear_data(mvm, te_data);
1324 /*
1325 * The time_event_data.id field is reused to save session
1326 * protection's configuration.
1327 */
1328 te_data->id = le32_to_cpu(cmd.conf_id);
1329 te_data->duration = le32_to_cpu(cmd.duration_tu);
1330 te_data->vif = vif;
1331 te_data->link_id = link_id;
1332 spin_unlock_bh(&mvm->time_event_lock);
1333
1334 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
1335 le32_to_cpu(cmd.duration_tu));
1336
1337 if (!wait_for_notif) {
1338 if (iwl_mvm_send_cmd_pdu(mvm,
1339 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1340 0, sizeof(cmd), &cmd)) {
1341 goto send_cmd_err;
1342 }
1343
1344 return;
1345 }
1346
1347 iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
1348 notif, ARRAY_SIZE(notif),
1349 iwl_mvm_session_prot_notif, NULL);
1350
1351 if (iwl_mvm_send_cmd_pdu(mvm,
1352 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1353 0, sizeof(cmd), &cmd)) {
1354 iwl_remove_notification(&mvm->notif_wait, &wait_notif);
1355 goto send_cmd_err;
1356 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
1357 TU_TO_JIFFIES(100))) {
1358 IWL_ERR(mvm,
1359 "Failed to protect session until session protection\n");
1360 }
1361 return;
1362
1363send_cmd_err:
1364 IWL_ERR(mvm,
1365 "Couldn't send the SESSION_PROTECTION_CMD\n");
1366 spin_lock_bh(&mvm->time_event_lock);
1367 iwl_mvm_te_clear_data(mvm, te_data);
1368 spin_unlock_bh(&mvm->time_event_lock);
1369}
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2017 Intel Deutschland GmbH
6 */
7#include <linux/jiffies.h>
8#include <net/mac80211.h>
9
10#include "fw/notif-wait.h"
11#include "iwl-trans.h"
12#include "fw-api.h"
13#include "time-event.h"
14#include "mvm.h"
15#include "iwl-io.h"
16#include "iwl-prph.h"
17
18/*
19 * For the high priority TE use a time event type that has similar priority to
20 * the FW's action scan priority.
21 */
22#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
23#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
24
25void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
26 struct iwl_mvm_time_event_data *te_data)
27{
28 lockdep_assert_held(&mvm->time_event_lock);
29
30 if (!te_data || !te_data->vif)
31 return;
32
33 list_del(&te_data->list);
34
35 /*
36 * the list is only used for AUX ROC events so make sure it is always
37 * initialized
38 */
39 INIT_LIST_HEAD(&te_data->list);
40
41 te_data->running = false;
42 te_data->uid = 0;
43 te_data->id = TE_MAX;
44 te_data->vif = NULL;
45 te_data->link_id = -1;
46}
47
48static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
49{
50 /*
51 * Clear the ROC_RUNNING status bit.
52 * This will cause the TX path to drop offchannel transmissions.
53 * That would also be done by mac80211, but it is racy, in particular
54 * in the case that the time event actually completed in the firmware.
55 *
56 * Also flush the offchannel queue -- this is called when the time
57 * event finishes or is canceled, so that frames queued for it
58 * won't get stuck on the queue and be transmitted in the next
59 * time event.
60 */
61 if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
62 struct iwl_mvm_vif *mvmvif;
63
64 synchronize_net();
65
66 /*
67 * NB: access to this pointer would be racy, but the flush bit
68 * can only be set when we had a P2P-Device VIF, and we have a
69 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
70 * not really racy.
71 */
72
73 if (!WARN_ON(!mvm->p2p_device_vif)) {
74 struct ieee80211_vif *vif = mvm->p2p_device_vif;
75
76 mvmvif = iwl_mvm_vif_from_mac80211(vif);
77 iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
78 mvmvif->deflink.bcast_sta.tfd_queue_msk);
79
80 if (mvm->mld_api_is_used) {
81 iwl_mvm_mld_rm_bcast_sta(mvm, vif,
82 &vif->bss_conf);
83
84 iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
85 LINK_CONTEXT_MODIFY_ACTIVE,
86 false);
87 } else {
88 iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
89 iwl_mvm_binding_remove_vif(mvm, vif);
90 }
91
92 /* Do not remove the PHY context as removing and adding
93 * a PHY context has timing overheads. Leaving it
94 * configured in FW would be useful in case the next ROC
95 * is with the same channel.
96 */
97 }
98 }
99
100 /* Do the same for AUX ROC */
101 if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
102 synchronize_net();
103
104 iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
105 mvm->aux_sta.tfd_queue_msk);
106
107 if (mvm->mld_api_is_used) {
108 iwl_mvm_mld_rm_aux_sta(mvm);
109 return;
110 }
111
112 /* In newer version of this command an aux station is added only
113 * in cases of dedicated tx queue and need to be removed in end
114 * of use */
115 if (iwl_mvm_has_new_station_api(mvm->fw))
116 iwl_mvm_rm_aux_sta(mvm);
117 }
118}
119
120void iwl_mvm_roc_done_wk(struct work_struct *wk)
121{
122 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
123
124 mutex_lock(&mvm->mutex);
125 iwl_mvm_cleanup_roc(mvm);
126 mutex_unlock(&mvm->mutex);
127}
128
129static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
130{
131 /*
132 * Of course, our status bit is just as racy as mac80211, so in
133 * addition, fire off the work struct which will drop all frames
134 * from the hardware queues that made it through the race. First
135 * it will of course synchronize the TX path to make sure that
136 * any *new* TX will be rejected.
137 */
138 schedule_work(&mvm->roc_done_wk);
139}
140
141static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
142{
143 struct ieee80211_vif *csa_vif;
144
145 rcu_read_lock();
146
147 csa_vif = rcu_dereference(mvm->csa_vif);
148 if (!csa_vif || !csa_vif->bss_conf.csa_active)
149 goto out_unlock;
150
151 IWL_DEBUG_TE(mvm, "CSA NOA started\n");
152
153 /*
154 * CSA NoA is started but we still have beacons to
155 * transmit on the current channel.
156 * So we just do nothing here and the switch
157 * will be performed on the last TBTT.
158 */
159 if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
160 IWL_WARN(mvm, "CSA NOA started too early\n");
161 goto out_unlock;
162 }
163
164 ieee80211_csa_finish(csa_vif, 0);
165
166 rcu_read_unlock();
167
168 RCU_INIT_POINTER(mvm->csa_vif, NULL);
169
170 return;
171
172out_unlock:
173 rcu_read_unlock();
174}
175
176static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
177 struct ieee80211_vif *vif,
178 const char *errmsg)
179{
180 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
181
182 if (vif->type != NL80211_IFTYPE_STATION)
183 return false;
184
185 if (!mvmvif->csa_bcn_pending && vif->cfg.assoc &&
186 vif->bss_conf.dtim_period)
187 return false;
188 if (errmsg)
189 IWL_ERR(mvm, "%s\n", errmsg);
190
191 if (mvmvif->csa_bcn_pending) {
192 struct iwl_mvm_sta *mvmsta;
193
194 rcu_read_lock();
195 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm,
196 mvmvif->deflink.ap_sta_id);
197 if (!WARN_ON(!mvmsta))
198 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
199 rcu_read_unlock();
200 }
201
202 if (vif->cfg.assoc) {
203 /*
204 * When not associated, this will be called from
205 * iwl_mvm_event_mlme_callback_ini()
206 */
207 iwl_dbg_tlv_time_point(&mvm->fwrt,
208 IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
209 NULL);
210 }
211
212 iwl_mvm_connection_loss(mvm, vif, errmsg);
213 return true;
214}
215
216static void
217iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
218 struct iwl_mvm_time_event_data *te_data,
219 struct iwl_time_event_notif *notif)
220{
221 struct ieee80211_vif *vif = te_data->vif;
222 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
223
224 if (!notif->status)
225 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
226
227 switch (te_data->vif->type) {
228 case NL80211_IFTYPE_AP:
229 if (!notif->status)
230 mvmvif->csa_failed = true;
231 iwl_mvm_csa_noa_start(mvm);
232 break;
233 case NL80211_IFTYPE_STATION:
234 if (!notif->status) {
235 iwl_mvm_connection_loss(mvm, vif,
236 "CSA TE failed to start");
237 break;
238 }
239 iwl_mvm_csa_client_absent(mvm, te_data->vif);
240 cancel_delayed_work(&mvmvif->csa_work);
241 ieee80211_chswitch_done(te_data->vif, true, 0);
242 break;
243 default:
244 /* should never happen */
245 WARN_ON_ONCE(1);
246 break;
247 }
248
249 /* we don't need it anymore */
250 iwl_mvm_te_clear_data(mvm, te_data);
251}
252
253static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
254 struct iwl_time_event_notif *notif,
255 struct iwl_mvm_time_event_data *te_data)
256{
257 struct iwl_fw_dbg_trigger_tlv *trig;
258 struct iwl_fw_dbg_trigger_time_event *te_trig;
259 int i;
260
261 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
262 ieee80211_vif_to_wdev(te_data->vif),
263 FW_DBG_TRIGGER_TIME_EVENT);
264 if (!trig)
265 return;
266
267 te_trig = (void *)trig->data;
268
269 for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
270 u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
271 u32 trig_action_bitmap =
272 le32_to_cpu(te_trig->time_events[i].action_bitmap);
273 u32 trig_status_bitmap =
274 le32_to_cpu(te_trig->time_events[i].status_bitmap);
275
276 if (trig_te_id != te_data->id ||
277 !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
278 !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
279 continue;
280
281 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
282 "Time event %d Action 0x%x received status: %d",
283 te_data->id,
284 le32_to_cpu(notif->action),
285 le32_to_cpu(notif->status));
286 break;
287 }
288}
289
290/*
291 * Handles a FW notification for an event that is known to the driver.
292 *
293 * @mvm: the mvm component
294 * @te_data: the time event data
295 * @notif: the notification data corresponding the time event data.
296 */
297static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
298 struct iwl_mvm_time_event_data *te_data,
299 struct iwl_time_event_notif *notif)
300{
301 lockdep_assert_held(&mvm->time_event_lock);
302
303 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
304 le32_to_cpu(notif->unique_id),
305 le32_to_cpu(notif->action));
306
307 iwl_mvm_te_check_trigger(mvm, notif, te_data);
308
309 /*
310 * The FW sends the start/end time event notifications even for events
311 * that it fails to schedule. This is indicated in the status field of
312 * the notification. This happens in cases that the scheduler cannot
313 * find a schedule that can handle the event (for example requesting a
314 * P2P Device discoveribility, while there are other higher priority
315 * events in the system).
316 */
317 if (!le32_to_cpu(notif->status)) {
318 const char *msg;
319
320 if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
321 msg = "Time Event start notification failure";
322 else
323 msg = "Time Event end notification failure";
324
325 IWL_DEBUG_TE(mvm, "%s\n", msg);
326
327 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
328 iwl_mvm_te_clear_data(mvm, te_data);
329 return;
330 }
331 }
332
333 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
334 IWL_DEBUG_TE(mvm,
335 "TE ended - current time %lu, estimated end %lu\n",
336 jiffies, te_data->end_jiffies);
337
338 switch (te_data->vif->type) {
339 case NL80211_IFTYPE_P2P_DEVICE:
340 ieee80211_remain_on_channel_expired(mvm->hw);
341 iwl_mvm_roc_finished(mvm);
342 break;
343 case NL80211_IFTYPE_STATION:
344 /*
345 * If we are switching channel, don't disconnect
346 * if the time event is already done. Beacons can
347 * be delayed a bit after the switch.
348 */
349 if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
350 IWL_DEBUG_TE(mvm,
351 "No beacon heard and the CS time event is over, don't disconnect\n");
352 break;
353 }
354
355 /*
356 * By now, we should have finished association
357 * and know the dtim period.
358 */
359 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
360 !te_data->vif->cfg.assoc ?
361 "Not associated and the time event is over already..." :
362 "No beacon heard and the time event is over already...");
363 break;
364 default:
365 break;
366 }
367
368 iwl_mvm_te_clear_data(mvm, te_data);
369 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
370 te_data->running = true;
371 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
372
373 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
374 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
375 ieee80211_ready_on_channel(mvm->hw);
376 } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
377 iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
378 }
379 } else {
380 IWL_WARN(mvm, "Got TE with unknown action\n");
381 }
382}
383
384void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
385 struct iwl_rx_cmd_buffer *rxb)
386{
387 struct iwl_rx_packet *pkt = rxb_addr(rxb);
388 struct iwl_roc_notif *notif = (void *)pkt->data;
389
390 if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) &&
391 le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) {
392 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
393 ieee80211_ready_on_channel(mvm->hw);
394 } else {
395 iwl_mvm_roc_finished(mvm);
396 ieee80211_remain_on_channel_expired(mvm->hw);
397 }
398}
399
400/*
401 * Handle A Aux ROC time event
402 */
403static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
404 struct iwl_time_event_notif *notif)
405{
406 struct iwl_mvm_time_event_data *aux_roc_te = NULL, *te_data;
407
408 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
409 if (le32_to_cpu(notif->unique_id) == te_data->uid) {
410 aux_roc_te = te_data;
411 break;
412 }
413 }
414 if (!aux_roc_te) /* Not a Aux ROC time event */
415 return -EINVAL;
416
417 iwl_mvm_te_check_trigger(mvm, notif, te_data);
418
419 IWL_DEBUG_TE(mvm,
420 "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
421 le32_to_cpu(notif->unique_id),
422 le32_to_cpu(notif->action), le32_to_cpu(notif->status));
423
424 if (!le32_to_cpu(notif->status) ||
425 le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
426 /* End TE, notify mac80211 */
427 ieee80211_remain_on_channel_expired(mvm->hw);
428 iwl_mvm_roc_finished(mvm); /* flush aux queue */
429 list_del(&te_data->list); /* remove from list */
430 te_data->running = false;
431 te_data->vif = NULL;
432 te_data->uid = 0;
433 te_data->id = TE_MAX;
434 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
435 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
436 te_data->running = true;
437 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
438 } else {
439 IWL_DEBUG_TE(mvm,
440 "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
441 le32_to_cpu(notif->action));
442 return -EINVAL;
443 }
444
445 return 0;
446}
447
448/*
449 * The Rx handler for time event notifications
450 */
451void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
452 struct iwl_rx_cmd_buffer *rxb)
453{
454 struct iwl_rx_packet *pkt = rxb_addr(rxb);
455 struct iwl_time_event_notif *notif = (void *)pkt->data;
456 struct iwl_mvm_time_event_data *te_data, *tmp;
457
458 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
459 le32_to_cpu(notif->unique_id),
460 le32_to_cpu(notif->action));
461
462 spin_lock_bh(&mvm->time_event_lock);
463 /* This time event is triggered for Aux ROC request */
464 if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
465 goto unlock;
466
467 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
468 if (le32_to_cpu(notif->unique_id) == te_data->uid)
469 iwl_mvm_te_handle_notif(mvm, te_data, notif);
470 }
471unlock:
472 spin_unlock_bh(&mvm->time_event_lock);
473}
474
475static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
476 struct iwl_rx_packet *pkt, void *data)
477{
478 struct iwl_mvm *mvm =
479 container_of(notif_wait, struct iwl_mvm, notif_wait);
480 struct iwl_mvm_time_event_data *te_data = data;
481 struct iwl_time_event_notif *resp;
482 int resp_len = iwl_rx_packet_payload_len(pkt);
483
484 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
485 return true;
486
487 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
488 IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
489 return true;
490 }
491
492 resp = (void *)pkt->data;
493
494 /* te_data->uid is already set in the TIME_EVENT_CMD response */
495 if (le32_to_cpu(resp->unique_id) != te_data->uid)
496 return false;
497
498 IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
499 te_data->uid);
500 if (!resp->status)
501 IWL_ERR(mvm,
502 "TIME_EVENT_NOTIFICATION received but not executed\n");
503
504 return true;
505}
506
507static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
508 struct iwl_rx_packet *pkt, void *data)
509{
510 struct iwl_mvm *mvm =
511 container_of(notif_wait, struct iwl_mvm, notif_wait);
512 struct iwl_mvm_time_event_data *te_data = data;
513 struct iwl_time_event_resp *resp;
514 int resp_len = iwl_rx_packet_payload_len(pkt);
515
516 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
517 return true;
518
519 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
520 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
521 return true;
522 }
523
524 resp = (void *)pkt->data;
525
526 /* we should never get a response to another TIME_EVENT_CMD here */
527 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
528 return false;
529
530 te_data->uid = le32_to_cpu(resp->unique_id);
531 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
532 te_data->uid);
533 return true;
534}
535
536static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
537 struct ieee80211_vif *vif,
538 struct iwl_mvm_time_event_data *te_data,
539 struct iwl_time_event_cmd *te_cmd)
540{
541 static const u16 time_event_response[] = { TIME_EVENT_CMD };
542 struct iwl_notification_wait wait_time_event;
543 int ret;
544
545 lockdep_assert_held(&mvm->mutex);
546
547 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
548 le32_to_cpu(te_cmd->duration));
549
550 spin_lock_bh(&mvm->time_event_lock);
551 if (WARN_ON(te_data->id != TE_MAX)) {
552 spin_unlock_bh(&mvm->time_event_lock);
553 return -EIO;
554 }
555 te_data->vif = vif;
556 te_data->duration = le32_to_cpu(te_cmd->duration);
557 te_data->id = le32_to_cpu(te_cmd->id);
558 list_add_tail(&te_data->list, &mvm->time_event_list);
559 spin_unlock_bh(&mvm->time_event_lock);
560
561 /*
562 * Use a notification wait, which really just processes the
563 * command response and doesn't wait for anything, in order
564 * to be able to process the response and get the UID inside
565 * the RX path. Using CMD_WANT_SKB doesn't work because it
566 * stores the buffer and then wakes up this thread, by which
567 * time another notification (that the time event started)
568 * might already be processed unsuccessfully.
569 */
570 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
571 time_event_response,
572 ARRAY_SIZE(time_event_response),
573 iwl_mvm_time_event_response, te_data);
574
575 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
576 sizeof(*te_cmd), te_cmd);
577 if (ret) {
578 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
579 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
580 goto out_clear_te;
581 }
582
583 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
584 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
585 /* should never fail */
586 WARN_ON_ONCE(ret);
587
588 if (ret) {
589 out_clear_te:
590 spin_lock_bh(&mvm->time_event_lock);
591 iwl_mvm_te_clear_data(mvm, te_data);
592 spin_unlock_bh(&mvm->time_event_lock);
593 }
594 return ret;
595}
596
597void iwl_mvm_protect_session(struct iwl_mvm *mvm,
598 struct ieee80211_vif *vif,
599 u32 duration, u32 min_duration,
600 u32 max_delay, bool wait_for_notif)
601{
602 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
603 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
604 const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
605 struct iwl_notification_wait wait_te_notif;
606 struct iwl_time_event_cmd time_cmd = {};
607
608 lockdep_assert_held(&mvm->mutex);
609
610 if (te_data->running &&
611 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
612 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
613 jiffies_to_msecs(te_data->end_jiffies - jiffies));
614 return;
615 }
616
617 if (te_data->running) {
618 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
619 te_data->uid,
620 jiffies_to_msecs(te_data->end_jiffies - jiffies));
621 /*
622 * we don't have enough time
623 * cancel the current TE and issue a new one
624 * Of course it would be better to remove the old one only
625 * when the new one is added, but we don't care if we are off
626 * channel for a bit. All we need to do, is not to return
627 * before we actually begin to be on the channel.
628 */
629 iwl_mvm_stop_session_protection(mvm, vif);
630 }
631
632 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
633 time_cmd.id_and_color =
634 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
635 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
636
637 time_cmd.apply_time = cpu_to_le32(0);
638
639 time_cmd.max_frags = TE_V2_FRAG_NONE;
640 time_cmd.max_delay = cpu_to_le32(max_delay);
641 /* TODO: why do we need to interval = bi if it is not periodic? */
642 time_cmd.interval = cpu_to_le32(1);
643 time_cmd.duration = cpu_to_le32(duration);
644 time_cmd.repeat = 1;
645 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
646 TE_V2_NOTIF_HOST_EVENT_END |
647 TE_V2_START_IMMEDIATELY);
648
649 if (!wait_for_notif) {
650 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
651 return;
652 }
653
654 /*
655 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
656 * right after we send the time event
657 */
658 iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
659 te_notif_response,
660 ARRAY_SIZE(te_notif_response),
661 iwl_mvm_te_notif, te_data);
662
663 /* If TE was sent OK - wait for the notification that started */
664 if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
665 IWL_ERR(mvm, "Failed to add TE to protect session\n");
666 iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
667 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
668 TU_TO_JIFFIES(max_delay))) {
669 IWL_ERR(mvm, "Failed to protect session until TE\n");
670 }
671}
672
673/* Determine whether mac or link id should be used, and validate the link id */
674static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
675 struct ieee80211_vif *vif,
676 s8 link_id)
677{
678 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
679 int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
680 WIDE_ID(MAC_CONF_GROUP,
681 SESSION_PROTECTION_CMD), 1);
682
683 if (ver < 2)
684 return mvmvif->id;
685
686 if (WARN(link_id < 0 || !mvmvif->link[link_id],
687 "Invalid link ID for session protection: %u\n", link_id))
688 return -EINVAL;
689
690 if (WARN(!mvmvif->link[link_id]->active,
691 "Session Protection on an inactive link: %u\n", link_id))
692 return -EINVAL;
693
694 return mvmvif->link[link_id]->fw_link_id;
695}
696
697static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
698 struct ieee80211_vif *vif,
699 u32 id, s8 link_id)
700{
701 int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
702 struct iwl_mvm_session_prot_cmd cmd = {
703 .id_and_color = cpu_to_le32(mac_link_id),
704 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
705 .conf_id = cpu_to_le32(id),
706 };
707 int ret;
708
709 if (mac_link_id < 0)
710 return;
711
712 ret = iwl_mvm_send_cmd_pdu(mvm,
713 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
714 0, sizeof(cmd), &cmd);
715 if (ret)
716 IWL_ERR(mvm,
717 "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
718}
719
720static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
721 struct iwl_mvm_time_event_data *te_data,
722 u32 *uid)
723{
724 u32 id;
725 struct ieee80211_vif *vif = te_data->vif;
726 struct iwl_mvm_vif *mvmvif;
727 enum nl80211_iftype iftype;
728 s8 link_id;
729
730 if (!vif)
731 return false;
732
733 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
734 iftype = te_data->vif->type;
735
736 /*
737 * It is possible that by the time we got to this point the time
738 * event was already removed.
739 */
740 spin_lock_bh(&mvm->time_event_lock);
741
742 /* Save time event uid before clearing its data */
743 *uid = te_data->uid;
744 id = te_data->id;
745 link_id = te_data->link_id;
746
747 /*
748 * The clear_data function handles time events that were already removed
749 */
750 iwl_mvm_te_clear_data(mvm, te_data);
751 spin_unlock_bh(&mvm->time_event_lock);
752
753 /* When session protection is used, the te_data->id field
754 * is reused to save session protection's configuration.
755 * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
756 * to HOT_SPOT_CMD.
757 */
758 if (fw_has_capa(&mvm->fw->ucode_capa,
759 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
760 id != HOT_SPOT_CMD) {
761 if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
762 /* Session protection is still ongoing. Cancel it */
763 iwl_mvm_cancel_session_protection(mvm, vif, id,
764 link_id);
765 if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
766 iwl_mvm_roc_finished(mvm);
767 }
768 }
769 return false;
770 } else {
771 /* It is possible that by the time we try to remove it, the
772 * time event has already ended and removed. In such a case
773 * there is no need to send a removal command.
774 */
775 if (id == TE_MAX) {
776 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
777 return false;
778 }
779 }
780
781 return true;
782}
783
784/*
785 * Explicit request to remove a aux roc time event. The removal of a time
786 * event needs to be synchronized with the flow of a time event's end
787 * notification, which also removes the time event from the op mode
788 * data structures.
789 */
790static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
791 struct iwl_mvm_vif *mvmvif,
792 struct iwl_mvm_time_event_data *te_data)
793{
794 struct iwl_hs20_roc_req aux_cmd = {};
795 u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
796
797 u32 uid;
798 int ret;
799
800 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
801 return;
802
803 aux_cmd.event_unique_id = cpu_to_le32(uid);
804 aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
805 aux_cmd.id_and_color =
806 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
807 IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
808 le32_to_cpu(aux_cmd.event_unique_id));
809 ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
810 len, &aux_cmd);
811
812 if (WARN_ON(ret))
813 return;
814}
815
816/*
817 * Explicit request to remove a time event. The removal of a time event needs to
818 * be synchronized with the flow of a time event's end notification, which also
819 * removes the time event from the op mode data structures.
820 */
821void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
822 struct iwl_mvm_vif *mvmvif,
823 struct iwl_mvm_time_event_data *te_data)
824{
825 struct iwl_time_event_cmd time_cmd = {};
826 u32 uid;
827 int ret;
828
829 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
830 return;
831
832 /* When we remove a TE, the UID is to be set in the id field */
833 time_cmd.id = cpu_to_le32(uid);
834 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
835 time_cmd.id_and_color =
836 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
837
838 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
839 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
840 sizeof(time_cmd), &time_cmd);
841 if (ret)
842 IWL_ERR(mvm, "Couldn't remove the time event\n");
843}
844
845void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
846 struct ieee80211_vif *vif)
847{
848 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
849 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
850 u32 id;
851
852 lockdep_assert_held(&mvm->mutex);
853
854 spin_lock_bh(&mvm->time_event_lock);
855 id = te_data->id;
856 spin_unlock_bh(&mvm->time_event_lock);
857
858 if (fw_has_capa(&mvm->fw->ucode_capa,
859 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
860 if (id != SESSION_PROTECT_CONF_ASSOC) {
861 IWL_DEBUG_TE(mvm,
862 "don't remove session protection id=%u\n",
863 id);
864 return;
865 }
866 } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
867 IWL_DEBUG_TE(mvm,
868 "don't remove TE with id=%u (not session protection)\n",
869 id);
870 return;
871 }
872
873 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
874}
875
876void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
877 struct iwl_rx_cmd_buffer *rxb)
878{
879 struct iwl_rx_packet *pkt = rxb_addr(rxb);
880 struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
881 unsigned int ver =
882 iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
883 SESSION_PROTECTION_NOTIF, 2);
884 int id = le32_to_cpu(notif->mac_link_id);
885 struct ieee80211_vif *vif;
886 struct iwl_mvm_vif *mvmvif;
887 unsigned int notif_link_id;
888
889 rcu_read_lock();
890
891 if (ver <= 2) {
892 vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
893 } else {
894 struct ieee80211_bss_conf *link_conf =
895 iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true);
896
897 if (!link_conf)
898 goto out_unlock;
899
900 notif_link_id = link_conf->link_id;
901 vif = link_conf->vif;
902 }
903
904 if (!vif)
905 goto out_unlock;
906
907 mvmvif = iwl_mvm_vif_from_mac80211(vif);
908
909 if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
910 mvmvif->time_event_data.link_id != notif_link_id,
911 "SESSION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
912 notif_link_id, mvmvif->time_event_data.link_id))
913 goto out_unlock;
914
915 /* The vif is not a P2P_DEVICE, maintain its time_event_data */
916 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
917 struct iwl_mvm_time_event_data *te_data =
918 &mvmvif->time_event_data;
919
920 if (!le32_to_cpu(notif->status)) {
921 iwl_mvm_te_check_disconnect(mvm, vif,
922 "Session protection failure");
923 spin_lock_bh(&mvm->time_event_lock);
924 iwl_mvm_te_clear_data(mvm, te_data);
925 spin_unlock_bh(&mvm->time_event_lock);
926 }
927
928 if (le32_to_cpu(notif->start)) {
929 spin_lock_bh(&mvm->time_event_lock);
930 te_data->running = le32_to_cpu(notif->start);
931 te_data->end_jiffies =
932 TU_TO_EXP_TIME(te_data->duration);
933 spin_unlock_bh(&mvm->time_event_lock);
934 } else {
935 /*
936 * By now, we should have finished association
937 * and know the dtim period.
938 */
939 iwl_mvm_te_check_disconnect(mvm, vif,
940 !vif->cfg.assoc ?
941 "Not associated and the session protection is over already..." :
942 "No beacon heard and the session protection is over already...");
943 spin_lock_bh(&mvm->time_event_lock);
944 iwl_mvm_te_clear_data(mvm, te_data);
945 spin_unlock_bh(&mvm->time_event_lock);
946 }
947
948 goto out_unlock;
949 }
950
951 if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
952 /* End TE, notify mac80211 */
953 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
954 mvmvif->time_event_data.link_id = -1;
955 iwl_mvm_roc_finished(mvm);
956 ieee80211_remain_on_channel_expired(mvm->hw);
957 } else if (le32_to_cpu(notif->start)) {
958 if (WARN_ON(mvmvif->time_event_data.id !=
959 le32_to_cpu(notif->conf_id)))
960 goto out_unlock;
961 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
962 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
963 }
964
965 out_unlock:
966 rcu_read_unlock();
967}
968
969#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
970#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
971#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
972#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
973#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
974
975void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
976 u32 duration_ms,
977 u32 *duration_tu,
978 u32 *delay)
979{
980 u32 dtim_interval = vif->bss_conf.dtim_period *
981 vif->bss_conf.beacon_int;
982
983 *delay = AUX_ROC_MIN_DELAY;
984 *duration_tu = MSEC_TO_TU(duration_ms);
985
986 /*
987 * If we are associated we want the delay time to be at least one
988 * dtim interval so that the FW can wait until after the DTIM and
989 * then start the time event, this will potentially allow us to
990 * remain off-channel for the max duration.
991 * Since we want to use almost a whole dtim interval we would also
992 * like the delay to be for 2-3 dtim intervals, in case there are
993 * other time events with higher priority.
994 */
995 if (vif->cfg.assoc) {
996 *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
997 /* We cannot remain off-channel longer than the DTIM interval */
998 if (dtim_interval <= *duration_tu) {
999 *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
1000 if (*duration_tu <= AUX_ROC_MIN_DURATION)
1001 *duration_tu = dtim_interval -
1002 AUX_ROC_MIN_SAFETY_BUFFER;
1003 }
1004 }
1005}
1006
1007int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
1008 struct ieee80211_channel *channel,
1009 struct ieee80211_vif *vif,
1010 int duration, u32 activity)
1011{
1012 int res;
1013 u32 duration_tu, delay;
1014 struct iwl_roc_req roc_req = {
1015 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1016 .activity = cpu_to_le32(activity),
1017 .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
1018 };
1019
1020 lockdep_assert_held(&mvm->mutex);
1021
1022 /* Set the channel info data */
1023 iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
1024 channel->hw_value,
1025 iwl_mvm_phy_band_from_nl80211(channel->band),
1026 IWL_PHY_CHANNEL_MODE20, 0);
1027
1028 iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
1029 &delay);
1030 roc_req.duration = cpu_to_le32(duration_tu);
1031 roc_req.max_delay = cpu_to_le32(delay);
1032
1033 IWL_DEBUG_TE(mvm,
1034 "\t(requested = %ums, max_delay = %ums)\n",
1035 duration, delay);
1036 IWL_DEBUG_TE(mvm,
1037 "Requesting to remain on channel %u for %utu\n",
1038 channel->hw_value, duration_tu);
1039
1040 /* Set the node address */
1041 memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
1042
1043 res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
1044 0, sizeof(roc_req), &roc_req);
1045
1046 return res;
1047}
1048
1049static int
1050iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
1051 struct ieee80211_vif *vif,
1052 int duration,
1053 enum ieee80211_roc_type type)
1054{
1055 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1056 struct iwl_mvm_session_prot_cmd cmd = {
1057 .id_and_color =
1058 cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
1059 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1060 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1061 };
1062
1063 lockdep_assert_held(&mvm->mutex);
1064
1065 /* The time_event_data.id field is reused to save session
1066 * protection's configuration.
1067 */
1068
1069 mvmvif->time_event_data.link_id = 0;
1070
1071 switch (type) {
1072 case IEEE80211_ROC_TYPE_NORMAL:
1073 mvmvif->time_event_data.id =
1074 SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
1075 break;
1076 case IEEE80211_ROC_TYPE_MGMT_TX:
1077 mvmvif->time_event_data.id =
1078 SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
1079 break;
1080 default:
1081 WARN_ONCE(1, "Got an invalid ROC type\n");
1082 return -EINVAL;
1083 }
1084
1085 cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
1086 return iwl_mvm_send_cmd_pdu(mvm,
1087 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1088 0, sizeof(cmd), &cmd);
1089}
1090
1091int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1092 int duration, enum ieee80211_roc_type type)
1093{
1094 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1095 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1096 struct iwl_time_event_cmd time_cmd = {};
1097
1098 lockdep_assert_held(&mvm->mutex);
1099 if (te_data->running) {
1100 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
1101 return -EBUSY;
1102 }
1103
1104 if (fw_has_capa(&mvm->fw->ucode_capa,
1105 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
1106 return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
1107 duration,
1108 type);
1109
1110 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1111 time_cmd.id_and_color =
1112 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1113
1114 switch (type) {
1115 case IEEE80211_ROC_TYPE_NORMAL:
1116 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
1117 break;
1118 case IEEE80211_ROC_TYPE_MGMT_TX:
1119 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
1120 break;
1121 default:
1122 WARN_ONCE(1, "Got an invalid ROC type\n");
1123 return -EINVAL;
1124 }
1125
1126 time_cmd.apply_time = cpu_to_le32(0);
1127 time_cmd.interval = cpu_to_le32(1);
1128
1129 /*
1130 * The P2P Device TEs can have lower priority than other events
1131 * that are being scheduled by the driver/fw, and thus it might not be
1132 * scheduled. To improve the chances of it being scheduled, allow them
1133 * to be fragmented, and in addition allow them to be delayed.
1134 */
1135 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
1136 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
1137 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
1138 time_cmd.repeat = 1;
1139 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1140 TE_V2_NOTIF_HOST_EVENT_END |
1141 TE_V2_START_IMMEDIATELY);
1142
1143 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1144}
1145
1146static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
1147{
1148 struct iwl_mvm_time_event_data *te_data;
1149
1150 lockdep_assert_held(&mvm->mutex);
1151
1152 spin_lock_bh(&mvm->time_event_lock);
1153
1154 /*
1155 * Iterate over the list of time events and find the time event that is
1156 * associated with a P2P_DEVICE interface.
1157 * This assumes that a P2P_DEVICE interface can have only a single time
1158 * event at any given time and this time event coresponds to a ROC
1159 * request
1160 */
1161 list_for_each_entry(te_data, &mvm->time_event_list, list) {
1162 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
1163 goto out;
1164 }
1165
1166 /* There can only be at most one AUX ROC time event, we just use the
1167 * list to simplify/unify code. Remove it if it exists.
1168 */
1169 te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
1170 struct iwl_mvm_time_event_data,
1171 list);
1172out:
1173 spin_unlock_bh(&mvm->time_event_lock);
1174 return te_data;
1175}
1176
1177void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
1178{
1179 struct iwl_mvm_time_event_data *te_data;
1180 u32 uid;
1181
1182 te_data = iwl_mvm_get_roc_te(mvm);
1183 if (te_data)
1184 __iwl_mvm_remove_time_event(mvm, te_data, &uid);
1185}
1186
1187static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
1188{
1189 int ret;
1190 struct iwl_roc_req roc_cmd = {
1191 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
1192 .activity = cpu_to_le32(activity),
1193 };
1194
1195 lockdep_assert_held(&mvm->mutex);
1196 ret = iwl_mvm_send_cmd_pdu(mvm,
1197 WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
1198 0, sizeof(roc_cmd), &roc_cmd);
1199 WARN_ON(ret);
1200}
1201
1202static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm,
1203 struct iwl_mvm_vif *mvmvif)
1204{
1205 u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
1206 u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
1207 IWL_FW_CMD_VER_UNKNOWN);
1208
1209 if (fw_ver == IWL_FW_CMD_VER_UNKNOWN)
1210 iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
1211 &mvmvif->hs_time_event_data);
1212 else if (fw_ver == 3)
1213 iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT);
1214 else
1215 IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
1216}
1217
1218void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1219{
1220 struct iwl_mvm_vif *mvmvif;
1221 struct iwl_mvm_time_event_data *te_data;
1222
1223 if (fw_has_capa(&mvm->fw->ucode_capa,
1224 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
1225 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1226 te_data = &mvmvif->time_event_data;
1227
1228 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1229 if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
1230 IWL_DEBUG_TE(mvm,
1231 "No remain on channel event\n");
1232 return;
1233 }
1234
1235 iwl_mvm_cancel_session_protection(mvm, vif,
1236 te_data->id,
1237 te_data->link_id);
1238 } else {
1239 iwl_mvm_roc_station_remove(mvm, mvmvif);
1240 }
1241 goto cleanup_roc;
1242 }
1243
1244 te_data = iwl_mvm_get_roc_te(mvm);
1245 if (!te_data) {
1246 IWL_WARN(mvm, "No remain on channel event\n");
1247 return;
1248 }
1249
1250 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
1251
1252 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
1253 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1254 else
1255 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
1256
1257cleanup_roc:
1258 /*
1259 * In case we get here before the ROC event started,
1260 * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
1261 * cleanup things properly
1262 */
1263 set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
1264 IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
1265 &mvm->status);
1266 iwl_mvm_cleanup_roc(mvm);
1267}
1268
1269void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
1270 struct ieee80211_vif *vif)
1271{
1272 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1273 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1274 u32 id;
1275
1276 lockdep_assert_held(&mvm->mutex);
1277
1278 spin_lock_bh(&mvm->time_event_lock);
1279 id = te_data->id;
1280 spin_unlock_bh(&mvm->time_event_lock);
1281
1282 if (id != TE_CHANNEL_SWITCH_PERIOD)
1283 return;
1284
1285 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1286}
1287
1288int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
1289 struct ieee80211_vif *vif,
1290 u32 duration, u32 apply_time)
1291{
1292 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1293 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1294 struct iwl_time_event_cmd time_cmd = {};
1295
1296 lockdep_assert_held(&mvm->mutex);
1297
1298 if (te_data->running) {
1299 u32 id;
1300
1301 spin_lock_bh(&mvm->time_event_lock);
1302 id = te_data->id;
1303 spin_unlock_bh(&mvm->time_event_lock);
1304
1305 if (id == TE_CHANNEL_SWITCH_PERIOD) {
1306 IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
1307 return -EBUSY;
1308 }
1309
1310 /*
1311 * Remove the session protection time event to allow the
1312 * channel switch. If we got here, we just heard a beacon so
1313 * the session protection is not needed anymore anyway.
1314 */
1315 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
1316 }
1317
1318 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
1319 time_cmd.id_and_color =
1320 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
1321 time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
1322 time_cmd.apply_time = cpu_to_le32(apply_time);
1323 time_cmd.max_frags = TE_V2_FRAG_NONE;
1324 time_cmd.duration = cpu_to_le32(duration);
1325 time_cmd.repeat = 1;
1326 time_cmd.interval = cpu_to_le32(1);
1327 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
1328 TE_V2_ABSENCE);
1329 if (!apply_time)
1330 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
1331
1332 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
1333}
1334
1335static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
1336 struct iwl_rx_packet *pkt, void *data)
1337{
1338 struct iwl_mvm *mvm =
1339 container_of(notif_wait, struct iwl_mvm, notif_wait);
1340 struct iwl_mvm_session_prot_notif *resp;
1341 int resp_len = iwl_rx_packet_payload_len(pkt);
1342
1343 if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
1344 pkt->hdr.group_id != MAC_CONF_GROUP))
1345 return true;
1346
1347 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
1348 IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
1349 return true;
1350 }
1351
1352 resp = (void *)pkt->data;
1353
1354 if (!resp->status)
1355 IWL_ERR(mvm,
1356 "TIME_EVENT_NOTIFICATION received but not executed\n");
1357
1358 return true;
1359}
1360
1361void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
1362 struct ieee80211_vif *vif,
1363 u32 duration, u32 min_duration,
1364 bool wait_for_notif,
1365 unsigned int link_id)
1366{
1367 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1368 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
1369 const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
1370 struct iwl_notification_wait wait_notif;
1371 int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
1372 struct iwl_mvm_session_prot_cmd cmd = {
1373 .id_and_color = cpu_to_le32(mac_link_id),
1374 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
1375 .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
1376 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
1377 };
1378
1379 if (mac_link_id < 0)
1380 return;
1381
1382 lockdep_assert_held(&mvm->mutex);
1383
1384 spin_lock_bh(&mvm->time_event_lock);
1385 if (te_data->running && te_data->link_id == link_id &&
1386 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
1387 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
1388 jiffies_to_msecs(te_data->end_jiffies - jiffies));
1389 spin_unlock_bh(&mvm->time_event_lock);
1390
1391 return;
1392 }
1393
1394 iwl_mvm_te_clear_data(mvm, te_data);
1395 /*
1396 * The time_event_data.id field is reused to save session
1397 * protection's configuration.
1398 */
1399 te_data->id = le32_to_cpu(cmd.conf_id);
1400 te_data->duration = le32_to_cpu(cmd.duration_tu);
1401 te_data->vif = vif;
1402 te_data->link_id = link_id;
1403 spin_unlock_bh(&mvm->time_event_lock);
1404
1405 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
1406 le32_to_cpu(cmd.duration_tu));
1407
1408 if (!wait_for_notif) {
1409 if (iwl_mvm_send_cmd_pdu(mvm,
1410 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1411 0, sizeof(cmd), &cmd)) {
1412 goto send_cmd_err;
1413 }
1414
1415 return;
1416 }
1417
1418 iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
1419 notif, ARRAY_SIZE(notif),
1420 iwl_mvm_session_prot_notif, NULL);
1421
1422 if (iwl_mvm_send_cmd_pdu(mvm,
1423 WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
1424 0, sizeof(cmd), &cmd)) {
1425 iwl_remove_notification(&mvm->notif_wait, &wait_notif);
1426 goto send_cmd_err;
1427 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
1428 TU_TO_JIFFIES(100))) {
1429 IWL_ERR(mvm,
1430 "Failed to protect session until session protection\n");
1431 }
1432 return;
1433
1434send_cmd_err:
1435 IWL_ERR(mvm,
1436 "Couldn't send the SESSION_PROTECTION_CMD\n");
1437 spin_lock_bh(&mvm->time_event_lock);
1438 iwl_mvm_te_clear_data(mvm, te_data);
1439 spin_unlock_bh(&mvm->time_event_lock);
1440}