Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * This file is part of wl1271
  3 *
  4 * Copyright (C) 2008-2009 Nokia Corporation
  5 *
  6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
  7 *
  8 * This program is free software; you can redistribute it and/or
  9 * modify it under the terms of the GNU General Public License
 10 * version 2 as published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it will be useful, but
 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 15 * General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 20 * 02110-1301 USA
 21 *
 22 */
 23
 24#include "wlcore.h"
 25#include "debug.h"
 26#include "io.h"
 27#include "event.h"
 28#include "ps.h"
 29#include "scan.h"
 30#include "wl12xx_80211.h"
 31#include "hw_ops.h"
 32
 33#define WL18XX_LOGGER_SDIO_BUFF_MAX	(0x1020)
 34#define WL18XX_DATA_RAM_BASE_ADDRESS	(0x20000000)
 35#define WL18XX_LOGGER_SDIO_BUFF_ADDR	(0x40159c)
 36#define WL18XX_LOGGER_BUFF_OFFSET	(sizeof(struct fw_logger_information))
 37#define WL18XX_LOGGER_READ_POINT_OFFSET		(12)
 38
 39int wlcore_event_fw_logger(struct wl1271 *wl)
 40{
 41	int ret;
 42	struct fw_logger_information fw_log;
 43	u8  *buffer;
 44	u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
 45	u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
 46	u32 end_buff_addr = WL18XX_LOGGER_SDIO_BUFF_ADDR +
 47				WL18XX_LOGGER_BUFF_OFFSET;
 48	u32 available_len;
 49	u32 actual_len;
 50	u32 clear_addr;
 51	size_t len;
 52	u32 start_loc;
 53
 54	buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
 55	if (!buffer) {
 56		wl1271_error("Fail to allocate fw logger memory");
 57		fw_log.actual_buff_size = cpu_to_le32(0);
 58		goto out;
 59	}
 60
 61	ret = wlcore_read(wl, addr, buffer, WL18XX_LOGGER_SDIO_BUFF_MAX,
 62			  false);
 63	if (ret < 0) {
 64		wl1271_error("Fail to read logger buffer, error_id = %d",
 65			     ret);
 66		fw_log.actual_buff_size = cpu_to_le32(0);
 67		goto free_out;
 68	}
 69
 70	memcpy(&fw_log, buffer, sizeof(fw_log));
 71
 72	if (le32_to_cpu(fw_log.actual_buff_size) == 0)
 73		goto free_out;
 74
 75	actual_len = le32_to_cpu(fw_log.actual_buff_size);
 76	start_loc = (le32_to_cpu(fw_log.buff_read_ptr) -
 77			internal_fw_addrbase) - addr;
 78	end_buff_addr += le32_to_cpu(fw_log.max_buff_size);
 79	available_len = end_buff_addr -
 80			(le32_to_cpu(fw_log.buff_read_ptr) -
 81				 internal_fw_addrbase);
 82	actual_len = min(actual_len, available_len);
 83	len = actual_len;
 84
 85	wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
 86	clear_addr = addr + start_loc + le32_to_cpu(fw_log.actual_buff_size) +
 87			internal_fw_addrbase;
 88
 89	len = le32_to_cpu(fw_log.actual_buff_size) - len;
 90	if (len) {
 91		wl12xx_copy_fwlog(wl,
 92				  &buffer[WL18XX_LOGGER_BUFF_OFFSET],
 93				  len);
 94		clear_addr = addr + WL18XX_LOGGER_BUFF_OFFSET + len +
 95				internal_fw_addrbase;
 96	}
 97
 98	/* double check that clear address and write pointer are the same */
 99	if (clear_addr != le32_to_cpu(fw_log.buff_write_ptr)) {
100		wl1271_error("Calculate of clear addr Clear = %x, write = %x",
101			     clear_addr, le32_to_cpu(fw_log.buff_write_ptr));
102	}
103
104	/* indicate FW about Clear buffer */
105	ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
106			     fw_log.buff_write_ptr);
107free_out:
108	kfree(buffer);
109out:
110	return le32_to_cpu(fw_log.actual_buff_size);
111}
112EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
113
114void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
115{
116	struct wl12xx_vif *wlvif;
117	struct ieee80211_vif *vif;
118	enum nl80211_cqm_rssi_threshold_event event;
119	s8 metric = metric_arr[0];
120
121	wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
122
123	/* TODO: check actual multi-role support */
124	wl12xx_for_each_wlvif_sta(wl, wlvif) {
125		if (metric <= wlvif->rssi_thold)
126			event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
127		else
128			event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
129
130		vif = wl12xx_wlvif_to_vif(wlvif);
131		if (event != wlvif->last_rssi_event)
132			ieee80211_cqm_rssi_notify(vif, event, metric,
133						  GFP_KERNEL);
134		wlvif->last_rssi_event = event;
135	}
136}
137EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
138
139static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
140{
141	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
142
143	if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
144		u8 hlid = wlvif->sta.hlid;
145		if (!wl->links[hlid].ba_bitmap)
146			return;
147		ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
148					     vif->bss_conf.bssid);
149	} else {
150		u8 hlid;
151		struct wl1271_link *lnk;
152		for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
153				 wl->num_links) {
154			lnk = &wl->links[hlid];
155			if (!lnk->ba_bitmap)
156				continue;
157
158			ieee80211_stop_rx_ba_session(vif,
159						     lnk->ba_bitmap,
160						     lnk->addr);
161		}
162	}
163}
164
165void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
166{
167	struct wl12xx_vif *wlvif;
168
169	if (enable) {
170		set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
171	} else {
172		clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
173		wl12xx_for_each_wlvif_sta(wl, wlvif) {
174			wl1271_recalc_rx_streaming(wl, wlvif);
175		}
176	}
177}
178EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
179
180void wlcore_event_sched_scan_completed(struct wl1271 *wl,
181				       u8 status)
182{
183	wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
184		     status);
185
186	if (wl->sched_vif) {
187		ieee80211_sched_scan_stopped(wl->hw);
188		wl->sched_vif = NULL;
189	}
190}
191EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
192
193void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
194				   unsigned long roles_bitmap,
195				   unsigned long allowed_bitmap)
196{
197	struct wl12xx_vif *wlvif;
198
199	wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
200		     __func__, roles_bitmap, allowed_bitmap);
201
202	wl12xx_for_each_wlvif(wl, wlvif) {
203		if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
204		    !test_bit(wlvif->role_id , &roles_bitmap))
205			continue;
206
207		wlvif->ba_allowed = !!test_bit(wlvif->role_id,
208					       &allowed_bitmap);
209		if (!wlvif->ba_allowed)
210			wl1271_stop_ba_event(wl, wlvif);
211	}
212}
213EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
214
215void wlcore_event_channel_switch(struct wl1271 *wl,
216				 unsigned long roles_bitmap,
217				 bool success)
218{
219	struct wl12xx_vif *wlvif;
220	struct ieee80211_vif *vif;
221
222	wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
223		     __func__, roles_bitmap, success);
224
225	wl12xx_for_each_wlvif(wl, wlvif) {
226		if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
227		    !test_bit(wlvif->role_id , &roles_bitmap))
228			continue;
229
230		if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
231					&wlvif->flags))
232			continue;
233
234		vif = wl12xx_wlvif_to_vif(wlvif);
235
236		if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
237			ieee80211_chswitch_done(vif, success);
238			cancel_delayed_work(&wlvif->channel_switch_work);
239		} else {
240			set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
241			ieee80211_csa_finish(vif);
242		}
243	}
244}
245EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
246
247void wlcore_event_dummy_packet(struct wl1271 *wl)
248{
249	if (wl->plt) {
250		wl1271_info("Got DUMMY_PACKET event in PLT mode.  FW bug, ignoring.");
251		return;
252	}
253
254	wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
255	wl1271_tx_dummy_packet(wl);
256}
257EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
258
259static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
260{
261	u32 num_packets = wl->conf.tx.max_tx_retries;
262	struct wl12xx_vif *wlvif;
263	struct ieee80211_vif *vif;
264	struct ieee80211_sta *sta;
265	const u8 *addr;
266	int h;
267
268	for_each_set_bit(h, &sta_bitmap, wl->num_links) {
269		bool found = false;
270		/* find the ap vif connected to this sta */
271		wl12xx_for_each_wlvif_ap(wl, wlvif) {
272			if (!test_bit(h, wlvif->ap.sta_hlid_map))
273				continue;
274			found = true;
275			break;
276		}
277		if (!found)
278			continue;
279
280		vif = wl12xx_wlvif_to_vif(wlvif);
281		addr = wl->links[h].addr;
282
283		rcu_read_lock();
284		sta = ieee80211_find_sta(vif, addr);
285		if (sta) {
286			wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
287			ieee80211_report_low_ack(sta, num_packets);
288		}
289		rcu_read_unlock();
290	}
291}
292
293void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
294{
295	wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
296	wlcore_disconnect_sta(wl, sta_bitmap);
297}
298EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
299
300void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
301{
302	wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
303	wlcore_disconnect_sta(wl, sta_bitmap);
304}
305EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
306
307void wlcore_event_roc_complete(struct wl1271 *wl)
308{
309	wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
310	if (wl->roc_vif)
311		ieee80211_ready_on_channel(wl->hw);
312}
313EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
314
315void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
316{
317	/*
318	 * We are HW_MONITOR device. On beacon loss - queue
319	 * connection loss work. Cancel it on REGAINED event.
320	 */
321	struct wl12xx_vif *wlvif;
322	struct ieee80211_vif *vif;
323	int delay = wl->conf.conn.synch_fail_thold *
324				wl->conf.conn.bss_lose_timeout;
325
326	wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
327
328	wl12xx_for_each_wlvif_sta(wl, wlvif) {
329		if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
330		    !test_bit(wlvif->role_id , &roles_bitmap))
331			continue;
332
333		vif = wl12xx_wlvif_to_vif(wlvif);
334
335		/* don't attempt roaming in case of p2p */
336		if (wlvif->p2p) {
337			ieee80211_connection_loss(vif);
338			continue;
339		}
340
341		/*
342		 * if the work is already queued, it should take place.
343		 * We don't want to delay the connection loss
344		 * indication any more.
345		 */
346		ieee80211_queue_delayed_work(wl->hw,
347					     &wlvif->connection_loss_work,
348					     msecs_to_jiffies(delay));
349
350		ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
351	}
352}
353EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
354
355int wl1271_event_unmask(struct wl1271 *wl)
356{
357	int ret;
358
359	wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
360	ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
361	if (ret < 0)
362		return ret;
363
364	return 0;
365}
366
367int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
368{
369	int ret;
370
371	wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
372
373	if (mbox_num > 1)
374		return -EINVAL;
375
376	/* first we read the mbox descriptor */
377	ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
378			  wl->mbox_size, false);
379	if (ret < 0)
380		return ret;
381
382	/* process the descriptor */
383	ret = wl->ops->process_mailbox_events(wl);
384	if (ret < 0)
385		return ret;
386
387	/*
388	 * TODO: we just need this because one bit is in a different
389	 * place.  Is there any better way?
390	 */
391	ret = wl->ops->ack_event(wl);
392
393	return ret;
394}