Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*
  2 * This file is part of wl1271
  3 *
  4 * Copyright (C) 2009 Nokia Corporation
  5 *
  6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
  7 *
  8 * This program is free software; you can redistribute it and/or
  9 * modify it under the terms of the GNU General Public License
 10 * version 2 as published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it will be useful, but
 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 15 * General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 20 * 02110-1301 USA
 21 *
 22 */
 23
 24#include <linux/kernel.h>
 25#include <linux/module.h>
 26#include <linux/etherdevice.h>
 27
 28#include "wl12xx.h"
 29#include "io.h"
 30#include "reg.h"
 31#include "ps.h"
 32#include "tx.h"
 33
 34static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
 35{
 36	int ret;
 37	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
 38
 39	if (is_ap)
 40		ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
 41	else
 42		ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
 43
 44	if (ret < 0)
 45		return ret;
 46
 47	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
 48	return 0;
 49}
 50
 51static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
 52{
 53	int id;
 54
 55	id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
 56	if (id >= ACX_TX_DESCRIPTORS)
 57		return -EBUSY;
 58
 59	__set_bit(id, wl->tx_frames_map);
 60	wl->tx_frames[id] = skb;
 61	wl->tx_frames_cnt++;
 62	return id;
 63}
 64
 65static void wl1271_free_tx_id(struct wl1271 *wl, int id)
 66{
 67	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
 68		if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS))
 69			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 70
 71		wl->tx_frames[id] = NULL;
 72		wl->tx_frames_cnt--;
 73	}
 74}
 75
 76static int wl1271_tx_update_filters(struct wl1271 *wl,
 77						 struct sk_buff *skb)
 78{
 79	struct ieee80211_hdr *hdr;
 80
 81	hdr = (struct ieee80211_hdr *)(skb->data +
 82				       sizeof(struct wl1271_tx_hw_descr));
 83
 84	/*
 85	 * stop bssid-based filtering before transmitting authentication
 86	 * requests. this way the hw will never drop authentication
 87	 * responses coming from BSSIDs it isn't familiar with (e.g. on
 88	 * roaming)
 89	 */
 90	if (!ieee80211_is_auth(hdr->frame_control))
 91		return 0;
 92
 93	wl1271_configure_filters(wl, FIF_OTHER_BSS);
 94
 95	return wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
 96}
 97
 98static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
 99						 struct sk_buff *skb)
100{
101	struct ieee80211_hdr *hdr;
102
103	/*
104	 * add the station to the known list before transmitting the
105	 * authentication response. this way it won't get de-authed by FW
106	 * when transmitting too soon.
107	 */
108	hdr = (struct ieee80211_hdr *)(skb->data +
109				       sizeof(struct wl1271_tx_hw_descr));
110	if (ieee80211_is_auth(hdr->frame_control))
111		wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
112}
113
114static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
115{
116	bool fw_ps;
117	u8 tx_blks;
118
119	/* only regulate station links */
120	if (hlid < WL1271_AP_STA_HLID_START)
121		return;
122
123	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
124	tx_blks = wl->links[hlid].allocated_blks;
125
126	/*
127	 * if in FW PS and there is enough data in FW we can put the link
128	 * into high-level PS and clean out its TX queues.
129	 */
130	if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
131		wl1271_ps_link_start(wl, hlid, true);
132}
133
134u8 wl1271_tx_get_hlid(struct sk_buff *skb)
135{
136	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
137
138	if (control->control.sta) {
139		struct wl1271_station *wl_sta;
140
141		wl_sta = (struct wl1271_station *)
142				control->control.sta->drv_priv;
143		return wl_sta->hlid;
144	} else {
145		struct ieee80211_hdr *hdr;
146
147		hdr = (struct ieee80211_hdr *)skb->data;
148		if (ieee80211_is_mgmt(hdr->frame_control))
149			return WL1271_AP_GLOBAL_HLID;
150		else
151			return WL1271_AP_BROADCAST_HLID;
152	}
153}
154
155static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
156						unsigned int packet_length)
157{
158	if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
159		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
160	else
161		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
162}
163
164static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
165				u32 buf_offset, u8 hlid)
166{
167	struct wl1271_tx_hw_descr *desc;
168	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
169	u32 len;
170	u32 total_blocks;
171	int id, ret = -EBUSY, ac;
172	u32 spare_blocks;
173
174	if (unlikely(wl->quirks & WL12XX_QUIRK_USE_2_SPARE_BLOCKS))
175		spare_blocks = 2;
176	else
177		spare_blocks = 1;
178
179	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
180		return -EAGAIN;
181
182	/* allocate free identifier for the packet */
183	id = wl1271_alloc_tx_id(wl, skb);
184	if (id < 0)
185		return id;
186
187	/* approximate the number of blocks required for this packet
188	   in the firmware */
189	len = wl12xx_calc_packet_alignment(wl, total_len);
190
191	total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
192		spare_blocks;
193
194	if (total_blocks <= wl->tx_blocks_available) {
195		desc = (struct wl1271_tx_hw_descr *)skb_push(
196			skb, total_len - skb->len);
197
198		/* HW descriptor fields change between wl127x and wl128x */
199		if (wl->chip.id == CHIP_ID_1283_PG20) {
200			desc->wl128x_mem.total_mem_blocks = total_blocks;
201		} else {
202			desc->wl127x_mem.extra_blocks = spare_blocks;
203			desc->wl127x_mem.total_mem_blocks = total_blocks;
204		}
205
206		desc->id = id;
207
208		wl->tx_blocks_available -= total_blocks;
209
210		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
211		wl->tx_allocated_blocks[ac] += total_blocks;
212
213		if (wl->bss_type == BSS_TYPE_AP_BSS)
214			wl->links[hlid].allocated_blks += total_blocks;
215
216		ret = 0;
217
218		wl1271_debug(DEBUG_TX,
219			     "tx_allocate: size: %d, blocks: %d, id: %d",
220			     total_len, total_blocks, id);
221	} else {
222		wl1271_free_tx_id(wl, id);
223	}
224
225	return ret;
226}
227
228static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
229{
230	return wl->dummy_packet == skb;
231}
232
233static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
234			      u32 extra, struct ieee80211_tx_info *control,
235			      u8 hlid)
236{
237	struct timespec ts;
238	struct wl1271_tx_hw_descr *desc;
239	int aligned_len, ac, rate_idx;
240	s64 hosttime;
241	u16 tx_attr;
242
243	desc = (struct wl1271_tx_hw_descr *) skb->data;
244
245	/* relocate space for security header */
246	if (extra) {
247		void *framestart = skb->data + sizeof(*desc);
248		u16 fc = *(u16 *)(framestart + extra);
249		int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
250		memmove(framestart, framestart + extra, hdrlen);
251	}
252
253	/* configure packet life time */
254	getnstimeofday(&ts);
255	hosttime = (timespec_to_ns(&ts) >> 10);
256	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
257
258	if (wl->bss_type != BSS_TYPE_AP_BSS)
259		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
260	else
261		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
262
263	/* queue */
264	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
265	desc->tid = skb->priority;
266
267	if (wl12xx_is_dummy_packet(wl, skb)) {
268		/*
269		 * FW expects the dummy packet to have an invalid session id -
270		 * any session id that is different than the one set in the join
271		 */
272		tx_attr = ((~wl->session_counter) <<
273			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
274			   TX_HW_ATTR_SESSION_COUNTER;
275
276		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
277	} else {
278		/* configure the tx attributes */
279		tx_attr =
280			wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
281	}
282
283	if (wl->bss_type != BSS_TYPE_AP_BSS) {
284		desc->aid = hlid;
285
286		/* if the packets are destined for AP (have a STA entry)
287		   send them with AP rate policies, otherwise use default
288		   basic rates */
289		if (control->control.sta)
290			rate_idx = ACX_TX_AP_FULL_RATE;
291		else
292			rate_idx = ACX_TX_BASIC_RATE;
293	} else {
294		desc->hlid = hlid;
295		switch (hlid) {
296		case WL1271_AP_GLOBAL_HLID:
297			rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
298			break;
299		case WL1271_AP_BROADCAST_HLID:
300			rate_idx = ACX_TX_AP_MODE_BCST_RATE;
301			break;
302		default:
303			rate_idx = ac;
304			break;
305		}
306	}
307
308	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
309	desc->reserved = 0;
310
311	aligned_len = wl12xx_calc_packet_alignment(wl, skb->len);
312
313	if (wl->chip.id == CHIP_ID_1283_PG20) {
314		desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
315		desc->length = cpu_to_le16(aligned_len >> 2);
316
317		wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
318			     "tx_attr: 0x%x len: %d life: %d mem: %d",
319			     desc->hlid, tx_attr,
320			     le16_to_cpu(desc->length),
321			     le16_to_cpu(desc->life_time),
322			     desc->wl128x_mem.total_mem_blocks);
323	} else {
324		int pad;
325
326		/* Store the aligned length in terms of words */
327		desc->length = cpu_to_le16(aligned_len >> 2);
328
329		/* calculate number of padding bytes */
330		pad = aligned_len - skb->len;
331		tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
332
333		wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
334			     "tx_attr: 0x%x len: %d life: %d mem: %d", pad,
335			     desc->hlid, tx_attr,
336			     le16_to_cpu(desc->length),
337			     le16_to_cpu(desc->life_time),
338			     desc->wl127x_mem.total_mem_blocks);
339	}
340
341	desc->tx_attr = cpu_to_le16(tx_attr);
342}
343
344/* caller must hold wl->mutex */
345static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
346							u32 buf_offset)
347{
348	struct ieee80211_tx_info *info;
349	u32 extra = 0;
350	int ret = 0;
351	u32 total_len;
352	u8 hlid;
353
354	if (!skb)
355		return -EINVAL;
356
357	info = IEEE80211_SKB_CB(skb);
358
359	if (info->control.hw_key &&
360	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
361		extra = WL1271_TKIP_IV_SPACE;
362
363	if (info->control.hw_key) {
364		bool is_wep;
365		u8 idx = info->control.hw_key->hw_key_idx;
366		u32 cipher = info->control.hw_key->cipher;
367
368		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
369			 (cipher == WLAN_CIPHER_SUITE_WEP104);
370
371		if (unlikely(is_wep && wl->default_key != idx)) {
372			ret = wl1271_set_default_wep_key(wl, idx);
373			if (ret < 0)
374				return ret;
375			wl->default_key = idx;
376		}
377	}
378
379	if (wl->bss_type == BSS_TYPE_AP_BSS)
380		hlid = wl1271_tx_get_hlid(skb);
381	else
382		hlid = TX_HW_DEFAULT_AID;
383
384	ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
385	if (ret < 0)
386		return ret;
387
388	wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
389
390	if (wl->bss_type == BSS_TYPE_AP_BSS) {
391		wl1271_tx_ap_update_inconnection_sta(wl, skb);
392		wl1271_tx_regulate_link(wl, hlid);
393	} else {
394		wl1271_tx_update_filters(wl, skb);
395	}
396
397	/*
398	 * The length of each packet is stored in terms of
399	 * words. Thus, we must pad the skb data to make sure its
400	 * length is aligned.  The number of padding bytes is computed
401	 * and set in wl1271_tx_fill_hdr.
402	 * In special cases, we want to align to a specific block size
403	 * (eg. for wl128x with SDIO we align to 256).
404	 */
405	total_len = wl12xx_calc_packet_alignment(wl, skb->len);
406
407	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
408	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
409
410	/* Revert side effects in the dummy packet skb, so it can be reused */
411	if (wl12xx_is_dummy_packet(wl, skb))
412		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
413
414	return total_len;
415}
416
417u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
418{
419	struct ieee80211_supported_band *band;
420	u32 enabled_rates = 0;
421	int bit;
422
423	band = wl->hw->wiphy->bands[wl->band];
424	for (bit = 0; bit < band->n_bitrates; bit++) {
425		if (rate_set & 0x1)
426			enabled_rates |= band->bitrates[bit].hw_value;
427		rate_set >>= 1;
428	}
429
430#ifdef CONFIG_WL12XX_HT
431	/* MCS rates indication are on bits 16 - 23 */
432	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
433
434	for (bit = 0; bit < 8; bit++) {
435		if (rate_set & 0x1)
436			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
437		rate_set >>= 1;
438	}
439#endif
440
441	return enabled_rates;
442}
443
444void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
445{
446	unsigned long flags;
447	int i;
448
449	for (i = 0; i < NUM_TX_QUEUES; i++) {
450		if (test_bit(i, &wl->stopped_queues_map) &&
451		    wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
452			/* firmware buffer has space, restart queues */
453			spin_lock_irqsave(&wl->wl_lock, flags);
454			ieee80211_wake_queue(wl->hw,
455					     wl1271_tx_get_mac80211_queue(i));
456			clear_bit(i, &wl->stopped_queues_map);
457			spin_unlock_irqrestore(&wl->wl_lock, flags);
458		}
459	}
460}
461
462static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
463						struct sk_buff_head *queues)
464{
465	int i, q = -1;
466	u32 min_blks = 0xffffffff;
467
468	/*
469	 * Find a non-empty ac where:
470	 * 1. There are packets to transmit
471	 * 2. The FW has the least allocated blocks
472	 */
473	for (i = 0; i < NUM_TX_QUEUES; i++)
474		if (!skb_queue_empty(&queues[i]) &&
475		    (wl->tx_allocated_blocks[i] < min_blks)) {
476			q = i;
477			min_blks = wl->tx_allocated_blocks[q];
478		}
479
480	if (q == -1)
481		return NULL;
482
483	return &queues[q];
484}
485
486static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
487{
488	struct sk_buff *skb = NULL;
489	unsigned long flags;
490	struct sk_buff_head *queue;
491
492	queue = wl1271_select_queue(wl, wl->tx_queue);
493	if (!queue)
494		goto out;
495
496	skb = skb_dequeue(queue);
497
498out:
499	if (skb) {
500		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
501		spin_lock_irqsave(&wl->wl_lock, flags);
502		wl->tx_queue_count[q]--;
503		spin_unlock_irqrestore(&wl->wl_lock, flags);
504	}
505
506	return skb;
507}
508
509static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
510{
511	struct sk_buff *skb = NULL;
512	unsigned long flags;
513	int i, h, start_hlid;
514	struct sk_buff_head *queue;
515
516	/* start from the link after the last one */
517	start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
518
519	/* dequeue according to AC, round robin on each link */
520	for (i = 0; i < AP_MAX_LINKS; i++) {
521		h = (start_hlid + i) % AP_MAX_LINKS;
522
523		/* only consider connected stations */
524		if (h >= WL1271_AP_STA_HLID_START &&
525		    !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
526			continue;
527
528		queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
529		if (!queue)
530			continue;
531
532		skb = skb_dequeue(queue);
533		if (skb)
534			break;
535	}
536
537	if (skb) {
538		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
539		wl->last_tx_hlid = h;
540		spin_lock_irqsave(&wl->wl_lock, flags);
541		wl->tx_queue_count[q]--;
542		spin_unlock_irqrestore(&wl->wl_lock, flags);
543	} else {
544		wl->last_tx_hlid = 0;
545	}
546
547	return skb;
548}
549
550static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
551{
552	unsigned long flags;
553	struct sk_buff *skb = NULL;
554
555	if (wl->bss_type == BSS_TYPE_AP_BSS)
556		skb = wl1271_ap_skb_dequeue(wl);
557	else
558		skb = wl1271_sta_skb_dequeue(wl);
559
560	if (!skb &&
561	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
562		int q;
563
564		skb = wl->dummy_packet;
565		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
566		spin_lock_irqsave(&wl->wl_lock, flags);
567		wl->tx_queue_count[q]--;
568		spin_unlock_irqrestore(&wl->wl_lock, flags);
569	}
570
571	return skb;
572}
573
574static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
575{
576	unsigned long flags;
577	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
578
579	if (wl12xx_is_dummy_packet(wl, skb)) {
580		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
581	} else if (wl->bss_type == BSS_TYPE_AP_BSS) {
582		u8 hlid = wl1271_tx_get_hlid(skb);
583		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
584
585		/* make sure we dequeue the same packet next time */
586		wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
587	} else {
588		skb_queue_head(&wl->tx_queue[q], skb);
589	}
590
591	spin_lock_irqsave(&wl->wl_lock, flags);
592	wl->tx_queue_count[q]++;
593	spin_unlock_irqrestore(&wl->wl_lock, flags);
594}
595
596static bool wl1271_tx_is_data_present(struct sk_buff *skb)
597{
598	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
599
600	return ieee80211_is_data_present(hdr->frame_control);
601}
602
603void wl1271_tx_work_locked(struct wl1271 *wl)
604{
605	struct sk_buff *skb;
606	u32 buf_offset = 0;
607	bool sent_packets = false;
608	bool had_data = false;
609	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
610	int ret;
611
612	if (unlikely(wl->state == WL1271_STATE_OFF))
613		return;
614
615	while ((skb = wl1271_skb_dequeue(wl))) {
616		if (wl1271_tx_is_data_present(skb))
617			had_data = true;
618
619		ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
620		if (ret == -EAGAIN) {
621			/*
622			 * Aggregation buffer is full.
623			 * Flush buffer and try again.
624			 */
625			wl1271_skb_queue_head(wl, skb);
626			wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
627				     buf_offset, true);
628			sent_packets = true;
629			buf_offset = 0;
630			continue;
631		} else if (ret == -EBUSY) {
632			/*
633			 * Firmware buffer is full.
634			 * Queue back last skb, and stop aggregating.
635			 */
636			wl1271_skb_queue_head(wl, skb);
637			/* No work left, avoid scheduling redundant tx work */
638			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
639			goto out_ack;
640		} else if (ret < 0) {
641			dev_kfree_skb(skb);
642			goto out_ack;
643		}
644		buf_offset += ret;
645		wl->tx_packets_count++;
646	}
647
648out_ack:
649	if (buf_offset) {
650		wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
651				buf_offset, true);
652		sent_packets = true;
653	}
654	if (sent_packets) {
655		/*
656		 * Interrupt the firmware with the new packets. This is only
657		 * required for older hardware revisions
658		 */
659		if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
660			wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
661				       wl->tx_packets_count);
662
663		wl1271_handle_tx_low_watermark(wl);
664	}
665	if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
666	    (wl->conf.rx_streaming.always ||
667	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
668		u32 timeout = wl->conf.rx_streaming.duration;
669
670		/* enable rx streaming */
671		if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
672			ieee80211_queue_work(wl->hw,
673					     &wl->rx_streaming_enable_work);
674
675		mod_timer(&wl->rx_streaming_timer,
676			  jiffies + msecs_to_jiffies(timeout));
677	}
678}
679
680void wl1271_tx_work(struct work_struct *work)
681{
682	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
683	int ret;
684
685	mutex_lock(&wl->mutex);
686	ret = wl1271_ps_elp_wakeup(wl);
687	if (ret < 0)
688		goto out;
689
690	wl1271_tx_work_locked(wl);
691
692	wl1271_ps_elp_sleep(wl);
693out:
694	mutex_unlock(&wl->mutex);
695}
696
697static void wl1271_tx_complete_packet(struct wl1271 *wl,
698				      struct wl1271_tx_hw_res_descr *result)
699{
700	struct ieee80211_tx_info *info;
701	struct sk_buff *skb;
702	int id = result->id;
703	int rate = -1;
704	u8 retries = 0;
705
706	/* check for id legality */
707	if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
708		wl1271_warning("TX result illegal id: %d", id);
709		return;
710	}
711
712	skb = wl->tx_frames[id];
713	info = IEEE80211_SKB_CB(skb);
714
715	if (wl12xx_is_dummy_packet(wl, skb)) {
716		wl1271_free_tx_id(wl, id);
717		return;
718	}
719
720	/* update the TX status info */
721	if (result->status == TX_SUCCESS) {
722		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
723			info->flags |= IEEE80211_TX_STAT_ACK;
724		rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
725		retries = result->ack_failures;
726	} else if (result->status == TX_RETRY_EXCEEDED) {
727		wl->stats.excessive_retries++;
728		retries = result->ack_failures;
729	}
730
731	info->status.rates[0].idx = rate;
732	info->status.rates[0].count = retries;
733	info->status.rates[0].flags = 0;
734	info->status.ack_signal = -1;
735
736	wl->stats.retry_count += result->ack_failures;
737
738	/*
739	 * update sequence number only when relevant, i.e. only in
740	 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
741	 */
742	if (info->control.hw_key &&
743	    (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
744	     info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
745	     info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
746		u8 fw_lsb = result->tx_security_sequence_number_lsb;
747		u8 cur_lsb = wl->tx_security_last_seq_lsb;
748
749		/*
750		 * update security sequence number, taking care of potential
751		 * wrap-around
752		 */
753		wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
754		wl->tx_security_last_seq_lsb = fw_lsb;
755	}
756
757	/* remove private header from packet */
758	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
759
760	/* remove TKIP header space if present */
761	if (info->control.hw_key &&
762	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
763		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
764		memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
765		skb_pull(skb, WL1271_TKIP_IV_SPACE);
766	}
767
768	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
769		     " status 0x%x",
770		     result->id, skb, result->ack_failures,
771		     result->rate_class_index, result->status);
772
773	/* return the packet to the stack */
774	skb_queue_tail(&wl->deferred_tx_queue, skb);
775	queue_work(wl->freezable_wq, &wl->netstack_work);
776	wl1271_free_tx_id(wl, result->id);
777}
778
779/* Called upon reception of a TX complete interrupt */
780void wl1271_tx_complete(struct wl1271 *wl)
781{
782	struct wl1271_acx_mem_map *memmap =
783		(struct wl1271_acx_mem_map *)wl->target_mem_map;
784	u32 count, fw_counter;
785	u32 i;
786
787	/* read the tx results from the chipset */
788	wl1271_read(wl, le32_to_cpu(memmap->tx_result),
789		    wl->tx_res_if, sizeof(*wl->tx_res_if), false);
790	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
791
792	/* write host counter to chipset (to ack) */
793	wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
794		       offsetof(struct wl1271_tx_hw_res_if,
795				tx_result_host_counter), fw_counter);
796
797	count = fw_counter - wl->tx_results_count;
798	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
799
800	/* verify that the result buffer is not getting overrun */
801	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
802		wl1271_warning("TX result overflow from chipset: %d", count);
803
804	/* process the results */
805	for (i = 0; i < count; i++) {
806		struct wl1271_tx_hw_res_descr *result;
807		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
808
809		/* process the packet */
810		result =  &(wl->tx_res_if->tx_results_queue[offset]);
811		wl1271_tx_complete_packet(wl, result);
812
813		wl->tx_results_count++;
814	}
815}
816
817void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
818{
819	struct sk_buff *skb;
820	int i;
821	unsigned long flags;
822	struct ieee80211_tx_info *info;
823	int total[NUM_TX_QUEUES];
824
825	for (i = 0; i < NUM_TX_QUEUES; i++) {
826		total[i] = 0;
827		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
828			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
829			info = IEEE80211_SKB_CB(skb);
830			info->status.rates[0].idx = -1;
831			info->status.rates[0].count = 0;
832			ieee80211_tx_status_ni(wl->hw, skb);
833			total[i]++;
834		}
835	}
836
837	spin_lock_irqsave(&wl->wl_lock, flags);
838	for (i = 0; i < NUM_TX_QUEUES; i++)
839		wl->tx_queue_count[i] -= total[i];
840	spin_unlock_irqrestore(&wl->wl_lock, flags);
841
842	wl1271_handle_tx_low_watermark(wl);
843}
844
845/* caller must hold wl->mutex and TX must be stopped */
846void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
847{
848	int i;
849	struct sk_buff *skb;
850	struct ieee80211_tx_info *info;
851
852	/* TX failure */
853	if (wl->bss_type == BSS_TYPE_AP_BSS) {
854		for (i = 0; i < AP_MAX_LINKS; i++) {
855			wl1271_tx_reset_link_queues(wl, i);
856			wl->links[i].allocated_blks = 0;
857			wl->links[i].prev_freed_blks = 0;
858		}
859
860		wl->last_tx_hlid = 0;
861	} else {
862		for (i = 0; i < NUM_TX_QUEUES; i++) {
863			while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
864				wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
865					     skb);
866
867				if (!wl12xx_is_dummy_packet(wl, skb)) {
868					info = IEEE80211_SKB_CB(skb);
869					info->status.rates[0].idx = -1;
870					info->status.rates[0].count = 0;
871					ieee80211_tx_status_ni(wl->hw, skb);
872				}
873			}
874			wl->tx_queue_count[i] = 0;
875		}
876	}
877
878	wl->stopped_queues_map = 0;
879
880	/*
881	 * Make sure the driver is at a consistent state, in case this
882	 * function is called from a context other than interface removal.
883	 * This call will always wake the TX queues.
884	 */
885	if (reset_tx_queues)
886		wl1271_handle_tx_low_watermark(wl);
887
888	for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
889		if (wl->tx_frames[i] == NULL)
890			continue;
891
892		skb = wl->tx_frames[i];
893		wl1271_free_tx_id(wl, i);
894		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
895
896		if (!wl12xx_is_dummy_packet(wl, skb)) {
897			/*
898			 * Remove private headers before passing the skb to
899			 * mac80211
900			 */
901			info = IEEE80211_SKB_CB(skb);
902			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
903			if (info->control.hw_key &&
904			    info->control.hw_key->cipher ==
905			    WLAN_CIPHER_SUITE_TKIP) {
906				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
907				memmove(skb->data + WL1271_TKIP_IV_SPACE,
908					skb->data, hdrlen);
909				skb_pull(skb, WL1271_TKIP_IV_SPACE);
910			}
911
912			info->status.rates[0].idx = -1;
913			info->status.rates[0].count = 0;
914
915			ieee80211_tx_status_ni(wl->hw, skb);
916		}
917	}
918}
919
920#define WL1271_TX_FLUSH_TIMEOUT 500000
921
922/* caller must *NOT* hold wl->mutex */
923void wl1271_tx_flush(struct wl1271 *wl)
924{
925	unsigned long timeout;
926	timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
927
928	while (!time_after(jiffies, timeout)) {
929		mutex_lock(&wl->mutex);
930		wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
931			     wl->tx_frames_cnt,
932			     wl1271_tx_total_queue_count(wl));
933		if ((wl->tx_frames_cnt == 0) &&
934		    (wl1271_tx_total_queue_count(wl) == 0)) {
935			mutex_unlock(&wl->mutex);
936			return;
937		}
938		mutex_unlock(&wl->mutex);
939		msleep(1);
940	}
941
942	wl1271_warning("Unable to flush all TX buffers, timed out.");
943}
944
945u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
946{
947	int i;
948	u32 rate = 0;
949
950	if (!wl->basic_rate_set) {
951		WARN_ON(1);
952		wl->basic_rate_set = wl->conf.tx.basic_rate;
953	}
954
955	for (i = 0; !rate; i++) {
956		if ((wl->basic_rate_set >> i) & 0x1)
957			rate = 1 << i;
958	}
959
960	return rate;
961}