Loading...
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include "wlcore.h"
25#include "debug.h"
26#include "io.h"
27#include "event.h"
28#include "ps.h"
29#include "scan.h"
30#include "wl12xx_80211.h"
31#include "hw_ops.h"
32
33#define WL18XX_LOGGER_SDIO_BUFF_MAX (0x1020)
34#define WL18XX_DATA_RAM_BASE_ADDRESS (0x20000000)
35#define WL18XX_LOGGER_SDIO_BUFF_ADDR (0x40159c)
36#define WL18XX_LOGGER_BUFF_OFFSET (sizeof(struct fw_logger_information))
37#define WL18XX_LOGGER_READ_POINT_OFFSET (12)
38
39int wlcore_event_fw_logger(struct wl1271 *wl)
40{
41 int ret;
42 struct fw_logger_information fw_log;
43 u8 *buffer;
44 u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
45 u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
46 u32 end_buff_addr = WL18XX_LOGGER_SDIO_BUFF_ADDR +
47 WL18XX_LOGGER_BUFF_OFFSET;
48 u32 available_len;
49 u32 actual_len;
50 u32 clear_addr;
51 size_t len;
52 u32 start_loc;
53
54 buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
55 if (!buffer) {
56 wl1271_error("Fail to allocate fw logger memory");
57 fw_log.actual_buff_size = cpu_to_le32(0);
58 goto out;
59 }
60
61 ret = wlcore_read(wl, addr, buffer, WL18XX_LOGGER_SDIO_BUFF_MAX,
62 false);
63 if (ret < 0) {
64 wl1271_error("Fail to read logger buffer, error_id = %d",
65 ret);
66 fw_log.actual_buff_size = cpu_to_le32(0);
67 goto free_out;
68 }
69
70 memcpy(&fw_log, buffer, sizeof(fw_log));
71
72 if (le32_to_cpu(fw_log.actual_buff_size) == 0)
73 goto free_out;
74
75 actual_len = le32_to_cpu(fw_log.actual_buff_size);
76 start_loc = (le32_to_cpu(fw_log.buff_read_ptr) -
77 internal_fw_addrbase) - addr;
78 end_buff_addr += le32_to_cpu(fw_log.max_buff_size);
79 available_len = end_buff_addr -
80 (le32_to_cpu(fw_log.buff_read_ptr) -
81 internal_fw_addrbase);
82 actual_len = min(actual_len, available_len);
83 len = actual_len;
84
85 wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
86 clear_addr = addr + start_loc + le32_to_cpu(fw_log.actual_buff_size) +
87 internal_fw_addrbase;
88
89 len = le32_to_cpu(fw_log.actual_buff_size) - len;
90 if (len) {
91 wl12xx_copy_fwlog(wl,
92 &buffer[WL18XX_LOGGER_BUFF_OFFSET],
93 len);
94 clear_addr = addr + WL18XX_LOGGER_BUFF_OFFSET + len +
95 internal_fw_addrbase;
96 }
97
98 /* double check that clear address and write pointer are the same */
99 if (clear_addr != le32_to_cpu(fw_log.buff_write_ptr)) {
100 wl1271_error("Calculate of clear addr Clear = %x, write = %x",
101 clear_addr, le32_to_cpu(fw_log.buff_write_ptr));
102 }
103
104 /* indicate FW about Clear buffer */
105 ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
106 fw_log.buff_write_ptr);
107free_out:
108 kfree(buffer);
109out:
110 return le32_to_cpu(fw_log.actual_buff_size);
111}
112EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
113
114void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
115{
116 struct wl12xx_vif *wlvif;
117 struct ieee80211_vif *vif;
118 enum nl80211_cqm_rssi_threshold_event event;
119 s8 metric = metric_arr[0];
120
121 wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
122
123 /* TODO: check actual multi-role support */
124 wl12xx_for_each_wlvif_sta(wl, wlvif) {
125 if (metric <= wlvif->rssi_thold)
126 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
127 else
128 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
129
130 vif = wl12xx_wlvif_to_vif(wlvif);
131 if (event != wlvif->last_rssi_event)
132 ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
133 wlvif->last_rssi_event = event;
134 }
135}
136EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
137
138static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
139{
140 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
141
142 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
143 u8 hlid = wlvif->sta.hlid;
144 if (!wl->links[hlid].ba_bitmap)
145 return;
146 ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
147 vif->bss_conf.bssid);
148 } else {
149 u8 hlid;
150 struct wl1271_link *lnk;
151 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
152 wl->num_links) {
153 lnk = &wl->links[hlid];
154 if (!lnk->ba_bitmap)
155 continue;
156
157 ieee80211_stop_rx_ba_session(vif,
158 lnk->ba_bitmap,
159 lnk->addr);
160 }
161 }
162}
163
164void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
165{
166 struct wl12xx_vif *wlvif;
167
168 if (enable) {
169 set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
170 } else {
171 clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
172 wl12xx_for_each_wlvif_sta(wl, wlvif) {
173 wl1271_recalc_rx_streaming(wl, wlvif);
174 }
175 }
176}
177EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
178
179void wlcore_event_sched_scan_completed(struct wl1271 *wl,
180 u8 status)
181{
182 wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
183 status);
184
185 if (wl->sched_vif) {
186 ieee80211_sched_scan_stopped(wl->hw);
187 wl->sched_vif = NULL;
188 }
189}
190EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
191
192void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
193 unsigned long roles_bitmap,
194 unsigned long allowed_bitmap)
195{
196 struct wl12xx_vif *wlvif;
197
198 wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
199 __func__, roles_bitmap, allowed_bitmap);
200
201 wl12xx_for_each_wlvif(wl, wlvif) {
202 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
203 !test_bit(wlvif->role_id , &roles_bitmap))
204 continue;
205
206 wlvif->ba_allowed = !!test_bit(wlvif->role_id,
207 &allowed_bitmap);
208 if (!wlvif->ba_allowed)
209 wl1271_stop_ba_event(wl, wlvif);
210 }
211}
212EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
213
214void wlcore_event_channel_switch(struct wl1271 *wl,
215 unsigned long roles_bitmap,
216 bool success)
217{
218 struct wl12xx_vif *wlvif;
219 struct ieee80211_vif *vif;
220
221 wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
222 __func__, roles_bitmap, success);
223
224 wl12xx_for_each_wlvif(wl, wlvif) {
225 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
226 !test_bit(wlvif->role_id , &roles_bitmap))
227 continue;
228
229 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
230 &wlvif->flags))
231 continue;
232
233 vif = wl12xx_wlvif_to_vif(wlvif);
234
235 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
236 ieee80211_chswitch_done(vif, success);
237 cancel_delayed_work(&wlvif->channel_switch_work);
238 } else {
239 set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
240 ieee80211_csa_finish(vif);
241 }
242 }
243}
244EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
245
246void wlcore_event_dummy_packet(struct wl1271 *wl)
247{
248 if (wl->plt) {
249 wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
250 return;
251 }
252
253 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
254 wl1271_tx_dummy_packet(wl);
255}
256EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
257
258static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
259{
260 u32 num_packets = wl->conf.tx.max_tx_retries;
261 struct wl12xx_vif *wlvif;
262 struct ieee80211_vif *vif;
263 struct ieee80211_sta *sta;
264 const u8 *addr;
265 int h;
266
267 for_each_set_bit(h, &sta_bitmap, wl->num_links) {
268 bool found = false;
269 /* find the ap vif connected to this sta */
270 wl12xx_for_each_wlvif_ap(wl, wlvif) {
271 if (!test_bit(h, wlvif->ap.sta_hlid_map))
272 continue;
273 found = true;
274 break;
275 }
276 if (!found)
277 continue;
278
279 vif = wl12xx_wlvif_to_vif(wlvif);
280 addr = wl->links[h].addr;
281
282 rcu_read_lock();
283 sta = ieee80211_find_sta(vif, addr);
284 if (sta) {
285 wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
286 ieee80211_report_low_ack(sta, num_packets);
287 }
288 rcu_read_unlock();
289 }
290}
291
292void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
293{
294 wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
295 wlcore_disconnect_sta(wl, sta_bitmap);
296}
297EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
298
299void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
300{
301 wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
302 wlcore_disconnect_sta(wl, sta_bitmap);
303}
304EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
305
306void wlcore_event_roc_complete(struct wl1271 *wl)
307{
308 wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
309 if (wl->roc_vif)
310 ieee80211_ready_on_channel(wl->hw);
311}
312EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
313
314void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
315{
316 /*
317 * We are HW_MONITOR device. On beacon loss - queue
318 * connection loss work. Cancel it on REGAINED event.
319 */
320 struct wl12xx_vif *wlvif;
321 struct ieee80211_vif *vif;
322 int delay = wl->conf.conn.synch_fail_thold *
323 wl->conf.conn.bss_lose_timeout;
324
325 wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
326
327 wl12xx_for_each_wlvif_sta(wl, wlvif) {
328 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
329 !test_bit(wlvif->role_id , &roles_bitmap))
330 continue;
331
332 vif = wl12xx_wlvif_to_vif(wlvif);
333
334 /* don't attempt roaming in case of p2p */
335 if (wlvif->p2p) {
336 ieee80211_connection_loss(vif);
337 continue;
338 }
339
340 /*
341 * if the work is already queued, it should take place.
342 * We don't want to delay the connection loss
343 * indication any more.
344 */
345 ieee80211_queue_delayed_work(wl->hw,
346 &wlvif->connection_loss_work,
347 msecs_to_jiffies(delay));
348
349 ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
350 }
351}
352EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
353
354int wl1271_event_unmask(struct wl1271 *wl)
355{
356 int ret;
357
358 wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
359 ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
360 if (ret < 0)
361 return ret;
362
363 return 0;
364}
365
366int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
367{
368 int ret;
369
370 wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
371
372 if (mbox_num > 1)
373 return -EINVAL;
374
375 /* first we read the mbox descriptor */
376 ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
377 wl->mbox_size, false);
378 if (ret < 0)
379 return ret;
380
381 /* process the descriptor */
382 ret = wl->ops->process_mailbox_events(wl);
383 if (ret < 0)
384 return ret;
385
386 /*
387 * TODO: we just need this because one bit is in a different
388 * place. Is there any better way?
389 */
390 ret = wl->ops->ack_event(wl);
391
392 return ret;
393}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2008-2009 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 */
9
10#include "wlcore.h"
11#include "debug.h"
12#include "io.h"
13#include "event.h"
14#include "ps.h"
15#include "scan.h"
16#include "wl12xx_80211.h"
17#include "hw_ops.h"
18
19#define WL18XX_LOGGER_SDIO_BUFF_MAX (0x1020)
20#define WL18XX_DATA_RAM_BASE_ADDRESS (0x20000000)
21#define WL18XX_LOGGER_SDIO_BUFF_ADDR (0x40159c)
22#define WL18XX_LOGGER_BUFF_OFFSET (sizeof(struct fw_logger_information))
23#define WL18XX_LOGGER_READ_POINT_OFFSET (12)
24
25int wlcore_event_fw_logger(struct wl1271 *wl)
26{
27 int ret;
28 struct fw_logger_information fw_log;
29 u8 *buffer;
30 u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
31 u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
32 u32 addr_ptr;
33 u32 buff_start_ptr;
34 u32 buff_read_ptr;
35 u32 buff_end_ptr;
36 u32 available_len;
37 u32 actual_len;
38 u32 clear_ptr;
39 size_t len;
40 u32 start_loc;
41
42 buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
43 if (!buffer) {
44 wl1271_error("Fail to allocate fw logger memory");
45 actual_len = 0;
46 goto out;
47 }
48
49 ret = wlcore_read(wl, addr, buffer, WL18XX_LOGGER_SDIO_BUFF_MAX,
50 false);
51 if (ret < 0) {
52 wl1271_error("Fail to read logger buffer, error_id = %d",
53 ret);
54 actual_len = 0;
55 goto free_out;
56 }
57
58 memcpy(&fw_log, buffer, sizeof(fw_log));
59
60 actual_len = le32_to_cpu(fw_log.actual_buff_size);
61 if (actual_len == 0)
62 goto free_out;
63
64 /* Calculate the internal pointer to the fwlog structure */
65 addr_ptr = internal_fw_addrbase + addr;
66
67 /* Calculate the internal pointers to the start and end of log buffer */
68 buff_start_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET;
69 buff_end_ptr = buff_start_ptr + le32_to_cpu(fw_log.max_buff_size);
70
71 /* Read the read pointer and validate it */
72 buff_read_ptr = le32_to_cpu(fw_log.buff_read_ptr);
73 if (buff_read_ptr < buff_start_ptr ||
74 buff_read_ptr >= buff_end_ptr) {
75 wl1271_error("buffer read pointer out of bounds: %x not in (%x-%x)\n",
76 buff_read_ptr, buff_start_ptr, buff_end_ptr);
77 goto free_out;
78 }
79
80 start_loc = buff_read_ptr - addr_ptr;
81 available_len = buff_end_ptr - buff_read_ptr;
82
83 /* Copy initial part up to the end of ring buffer */
84 len = min(actual_len, available_len);
85 wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
86 clear_ptr = addr_ptr + start_loc + len;
87 if (clear_ptr == buff_end_ptr)
88 clear_ptr = buff_start_ptr;
89
90 /* Copy any remaining part from beginning of ring buffer */
91 len = actual_len - len;
92 if (len) {
93 wl12xx_copy_fwlog(wl,
94 &buffer[WL18XX_LOGGER_BUFF_OFFSET],
95 len);
96 clear_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET + len;
97 }
98
99 /* Update the read pointer */
100 ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
101 clear_ptr);
102free_out:
103 kfree(buffer);
104out:
105 return actual_len;
106}
107EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
108
109void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
110{
111 struct wl12xx_vif *wlvif;
112 struct ieee80211_vif *vif;
113 enum nl80211_cqm_rssi_threshold_event event;
114 s8 metric = metric_arr[0];
115
116 wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
117
118 /* TODO: check actual multi-role support */
119 wl12xx_for_each_wlvif_sta(wl, wlvif) {
120 if (metric <= wlvif->rssi_thold)
121 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
122 else
123 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
124
125 vif = wl12xx_wlvif_to_vif(wlvif);
126 if (event != wlvif->last_rssi_event)
127 ieee80211_cqm_rssi_notify(vif, event, metric,
128 GFP_KERNEL);
129 wlvif->last_rssi_event = event;
130 }
131}
132EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
133
134static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
135{
136 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
137
138 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
139 u8 hlid = wlvif->sta.hlid;
140 if (!wl->links[hlid].ba_bitmap)
141 return;
142 ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
143 vif->bss_conf.bssid);
144 } else {
145 u8 hlid;
146 struct wl1271_link *lnk;
147 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
148 wl->num_links) {
149 lnk = &wl->links[hlid];
150 if (!lnk->ba_bitmap)
151 continue;
152
153 ieee80211_stop_rx_ba_session(vif,
154 lnk->ba_bitmap,
155 lnk->addr);
156 }
157 }
158}
159
160void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
161{
162 struct wl12xx_vif *wlvif;
163
164 if (enable) {
165 set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
166 } else {
167 clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
168 wl12xx_for_each_wlvif_sta(wl, wlvif) {
169 wl1271_recalc_rx_streaming(wl, wlvif);
170 }
171 }
172}
173EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
174
175void wlcore_event_sched_scan_completed(struct wl1271 *wl,
176 u8 status)
177{
178 wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
179 status);
180
181 if (wl->sched_vif) {
182 ieee80211_sched_scan_stopped(wl->hw);
183 wl->sched_vif = NULL;
184 }
185}
186EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
187
188void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
189 unsigned long roles_bitmap,
190 unsigned long allowed_bitmap)
191{
192 struct wl12xx_vif *wlvif;
193
194 wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
195 __func__, roles_bitmap, allowed_bitmap);
196
197 wl12xx_for_each_wlvif(wl, wlvif) {
198 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
199 !test_bit(wlvif->role_id , &roles_bitmap))
200 continue;
201
202 wlvif->ba_allowed = !!test_bit(wlvif->role_id,
203 &allowed_bitmap);
204 if (!wlvif->ba_allowed)
205 wl1271_stop_ba_event(wl, wlvif);
206 }
207}
208EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
209
210void wlcore_event_channel_switch(struct wl1271 *wl,
211 unsigned long roles_bitmap,
212 bool success)
213{
214 struct wl12xx_vif *wlvif;
215 struct ieee80211_vif *vif;
216
217 wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
218 __func__, roles_bitmap, success);
219
220 wl12xx_for_each_wlvif(wl, wlvif) {
221 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
222 !test_bit(wlvif->role_id , &roles_bitmap))
223 continue;
224
225 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
226 &wlvif->flags))
227 continue;
228
229 vif = wl12xx_wlvif_to_vif(wlvif);
230
231 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
232 ieee80211_chswitch_done(vif, success, 0);
233 cancel_delayed_work(&wlvif->channel_switch_work);
234 } else {
235 set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
236 ieee80211_csa_finish(vif, 0);
237 }
238 }
239}
240EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
241
242void wlcore_event_dummy_packet(struct wl1271 *wl)
243{
244 if (wl->plt) {
245 wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
246 return;
247 }
248
249 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
250 wl1271_tx_dummy_packet(wl);
251}
252EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
253
254static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
255{
256 u32 num_packets = wl->conf.tx.max_tx_retries;
257 struct wl12xx_vif *wlvif;
258 struct ieee80211_vif *vif;
259 struct ieee80211_sta *sta;
260 const u8 *addr;
261 int h;
262
263 for_each_set_bit(h, &sta_bitmap, wl->num_links) {
264 bool found = false;
265 /* find the ap vif connected to this sta */
266 wl12xx_for_each_wlvif_ap(wl, wlvif) {
267 if (!test_bit(h, wlvif->ap.sta_hlid_map))
268 continue;
269 found = true;
270 break;
271 }
272 if (!found)
273 continue;
274
275 vif = wl12xx_wlvif_to_vif(wlvif);
276 addr = wl->links[h].addr;
277
278 rcu_read_lock();
279 sta = ieee80211_find_sta(vif, addr);
280 if (sta) {
281 wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
282 ieee80211_report_low_ack(sta, num_packets);
283 }
284 rcu_read_unlock();
285 }
286}
287
288void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
289{
290 wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
291 wlcore_disconnect_sta(wl, sta_bitmap);
292}
293EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
294
295void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
296{
297 wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
298 wlcore_disconnect_sta(wl, sta_bitmap);
299}
300EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
301
302void wlcore_event_roc_complete(struct wl1271 *wl)
303{
304 wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
305 if (wl->roc_vif)
306 ieee80211_ready_on_channel(wl->hw);
307}
308EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
309
310void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
311{
312 /*
313 * We are HW_MONITOR device. On beacon loss - queue
314 * connection loss work. Cancel it on REGAINED event.
315 */
316 struct wl12xx_vif *wlvif;
317 struct ieee80211_vif *vif;
318 int delay = wl->conf.conn.synch_fail_thold *
319 wl->conf.conn.bss_lose_timeout;
320
321 wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
322
323 wl12xx_for_each_wlvif_sta(wl, wlvif) {
324 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
325 !test_bit(wlvif->role_id , &roles_bitmap))
326 continue;
327
328 vif = wl12xx_wlvif_to_vif(wlvif);
329
330 /* don't attempt roaming in case of p2p */
331 if (wlvif->p2p) {
332 ieee80211_connection_loss(vif);
333 continue;
334 }
335
336 /*
337 * if the work is already queued, it should take place.
338 * We don't want to delay the connection loss
339 * indication any more.
340 */
341 ieee80211_queue_delayed_work(wl->hw,
342 &wlvif->connection_loss_work,
343 msecs_to_jiffies(delay));
344
345 ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
346 }
347}
348EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
349
350int wl1271_event_unmask(struct wl1271 *wl)
351{
352 int ret;
353
354 wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
355 ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
356 if (ret < 0)
357 return ret;
358
359 return 0;
360}
361
362int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
363{
364 int ret;
365
366 wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
367
368 if (mbox_num > 1)
369 return -EINVAL;
370
371 /* first we read the mbox descriptor */
372 ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
373 wl->mbox_size, false);
374 if (ret < 0)
375 return ret;
376
377 /* process the descriptor */
378 ret = wl->ops->process_mailbox_events(wl);
379 if (ret < 0)
380 return ret;
381
382 /*
383 * TODO: we just need this because one bit is in a different
384 * place. Is there any better way?
385 */
386 ret = wl->ops->ack_event(wl);
387
388 return ret;
389}