Loading...
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/dma-mapping.h>
18#include "ath9k.h"
19#include "ar9003_mac.h"
20
21#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22
23static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
24 int mindelta, int main_rssi_avg,
25 int alt_rssi_avg, int pkt_count)
26{
27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
28 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
30}
31
32static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
33 int curr_main_set, int curr_alt_set,
34 int alt_rssi_avg, int main_rssi_avg)
35{
36 bool result = false;
37 switch (div_group) {
38 case 0:
39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
40 result = true;
41 break;
42 case 1:
43 case 2:
44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
46 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
49 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
50 (alt_rssi_avg >= 4))
51 result = true;
52 else
53 result = false;
54 break;
55 }
56
57 return result;
58}
59
60static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
61{
62 return sc->ps_enabled &&
63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
64}
65
66/*
67 * Setup and link descriptors.
68 *
69 * 11N: we can no longer afford to self link the last descriptor.
70 * MAC acknowledges BA status as long as it copies frames to host
71 * buffer (or rx fifo). This can incorrectly acknowledge packets
72 * to a sender if last desc is self-linked.
73 */
74static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
75{
76 struct ath_hw *ah = sc->sc_ah;
77 struct ath_common *common = ath9k_hw_common(ah);
78 struct ath_desc *ds;
79 struct sk_buff *skb;
80
81 ATH_RXBUF_RESET(bf);
82
83 ds = bf->bf_desc;
84 ds->ds_link = 0; /* link to null */
85 ds->ds_data = bf->bf_buf_addr;
86
87 /* virtual addr of the beginning of the buffer. */
88 skb = bf->bf_mpdu;
89 BUG_ON(skb == NULL);
90 ds->ds_vdata = skb->data;
91
92 /*
93 * setup rx descriptors. The rx_bufsize here tells the hardware
94 * how much data it can DMA to us and that we are prepared
95 * to process
96 */
97 ath9k_hw_setuprxdesc(ah, ds,
98 common->rx_bufsize,
99 0);
100
101 if (sc->rx.rxlink == NULL)
102 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
103 else
104 *sc->rx.rxlink = bf->bf_daddr;
105
106 sc->rx.rxlink = &ds->ds_link;
107}
108
109static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
110{
111 /* XXX block beacon interrupts */
112 ath9k_hw_setantenna(sc->sc_ah, antenna);
113 sc->rx.defant = antenna;
114 sc->rx.rxotherant = 0;
115}
116
117static void ath_opmode_init(struct ath_softc *sc)
118{
119 struct ath_hw *ah = sc->sc_ah;
120 struct ath_common *common = ath9k_hw_common(ah);
121
122 u32 rfilt, mfilt[2];
123
124 /* configure rx filter */
125 rfilt = ath_calcrxfilter(sc);
126 ath9k_hw_setrxfilter(ah, rfilt);
127
128 /* configure bssid mask */
129 ath_hw_setbssidmask(common);
130
131 /* configure operational mode */
132 ath9k_hw_setopmode(ah);
133
134 /* calculate and install multicast filter */
135 mfilt[0] = mfilt[1] = ~0;
136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
137}
138
139static bool ath_rx_edma_buf_link(struct ath_softc *sc,
140 enum ath9k_rx_qtype qtype)
141{
142 struct ath_hw *ah = sc->sc_ah;
143 struct ath_rx_edma *rx_edma;
144 struct sk_buff *skb;
145 struct ath_buf *bf;
146
147 rx_edma = &sc->rx.rx_edma[qtype];
148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
149 return false;
150
151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
152 list_del_init(&bf->list);
153
154 skb = bf->bf_mpdu;
155
156 ATH_RXBUF_RESET(bf);
157 memset(skb->data, 0, ah->caps.rx_status_len);
158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
159 ah->caps.rx_status_len, DMA_TO_DEVICE);
160
161 SKB_CB_ATHBUF(skb) = bf;
162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
163 skb_queue_tail(&rx_edma->rx_fifo, skb);
164
165 return true;
166}
167
168static void ath_rx_addbuffer_edma(struct ath_softc *sc,
169 enum ath9k_rx_qtype qtype, int size)
170{
171 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
172 u32 nbuf = 0;
173
174 if (list_empty(&sc->rx.rxbuf)) {
175 ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
176 return;
177 }
178
179 while (!list_empty(&sc->rx.rxbuf)) {
180 nbuf++;
181
182 if (!ath_rx_edma_buf_link(sc, qtype))
183 break;
184
185 if (nbuf >= size)
186 break;
187 }
188}
189
190static void ath_rx_remove_buffer(struct ath_softc *sc,
191 enum ath9k_rx_qtype qtype)
192{
193 struct ath_buf *bf;
194 struct ath_rx_edma *rx_edma;
195 struct sk_buff *skb;
196
197 rx_edma = &sc->rx.rx_edma[qtype];
198
199 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
200 bf = SKB_CB_ATHBUF(skb);
201 BUG_ON(!bf);
202 list_add_tail(&bf->list, &sc->rx.rxbuf);
203 }
204}
205
206static void ath_rx_edma_cleanup(struct ath_softc *sc)
207{
208 struct ath_hw *ah = sc->sc_ah;
209 struct ath_common *common = ath9k_hw_common(ah);
210 struct ath_buf *bf;
211
212 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
213 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
214
215 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
216 if (bf->bf_mpdu) {
217 dma_unmap_single(sc->dev, bf->bf_buf_addr,
218 common->rx_bufsize,
219 DMA_BIDIRECTIONAL);
220 dev_kfree_skb_any(bf->bf_mpdu);
221 bf->bf_buf_addr = 0;
222 bf->bf_mpdu = NULL;
223 }
224 }
225
226 INIT_LIST_HEAD(&sc->rx.rxbuf);
227
228 kfree(sc->rx.rx_bufptr);
229 sc->rx.rx_bufptr = NULL;
230}
231
232static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
233{
234 skb_queue_head_init(&rx_edma->rx_fifo);
235 skb_queue_head_init(&rx_edma->rx_buffers);
236 rx_edma->rx_fifo_hwsize = size;
237}
238
239static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
240{
241 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
242 struct ath_hw *ah = sc->sc_ah;
243 struct sk_buff *skb;
244 struct ath_buf *bf;
245 int error = 0, i;
246 u32 size;
247
248 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
249 ah->caps.rx_status_len);
250
251 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
252 ah->caps.rx_lp_qdepth);
253 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
254 ah->caps.rx_hp_qdepth);
255
256 size = sizeof(struct ath_buf) * nbufs;
257 bf = kzalloc(size, GFP_KERNEL);
258 if (!bf)
259 return -ENOMEM;
260
261 INIT_LIST_HEAD(&sc->rx.rxbuf);
262 sc->rx.rx_bufptr = bf;
263
264 for (i = 0; i < nbufs; i++, bf++) {
265 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
266 if (!skb) {
267 error = -ENOMEM;
268 goto rx_init_fail;
269 }
270
271 memset(skb->data, 0, common->rx_bufsize);
272 bf->bf_mpdu = skb;
273
274 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
275 common->rx_bufsize,
276 DMA_BIDIRECTIONAL);
277 if (unlikely(dma_mapping_error(sc->dev,
278 bf->bf_buf_addr))) {
279 dev_kfree_skb_any(skb);
280 bf->bf_mpdu = NULL;
281 bf->bf_buf_addr = 0;
282 ath_err(common,
283 "dma_mapping_error() on RX init\n");
284 error = -ENOMEM;
285 goto rx_init_fail;
286 }
287
288 list_add_tail(&bf->list, &sc->rx.rxbuf);
289 }
290
291 return 0;
292
293rx_init_fail:
294 ath_rx_edma_cleanup(sc);
295 return error;
296}
297
298static void ath_edma_start_recv(struct ath_softc *sc)
299{
300 spin_lock_bh(&sc->rx.rxbuflock);
301
302 ath9k_hw_rxena(sc->sc_ah);
303
304 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
305 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
306
307 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
308 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
309
310 ath_opmode_init(sc);
311
312 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
313
314 spin_unlock_bh(&sc->rx.rxbuflock);
315}
316
317static void ath_edma_stop_recv(struct ath_softc *sc)
318{
319 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
320 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
321}
322
323int ath_rx_init(struct ath_softc *sc, int nbufs)
324{
325 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
326 struct sk_buff *skb;
327 struct ath_buf *bf;
328 int error = 0;
329
330 spin_lock_init(&sc->sc_pcu_lock);
331 sc->sc_flags &= ~SC_OP_RXFLUSH;
332 spin_lock_init(&sc->rx.rxbuflock);
333
334 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
335 sc->sc_ah->caps.rx_status_len;
336
337 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
338 return ath_rx_edma_init(sc, nbufs);
339 } else {
340 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
341 common->cachelsz, common->rx_bufsize);
342
343 /* Initialize rx descriptors */
344
345 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
346 "rx", nbufs, 1, 0);
347 if (error != 0) {
348 ath_err(common,
349 "failed to allocate rx descriptors: %d\n",
350 error);
351 goto err;
352 }
353
354 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
355 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
356 GFP_KERNEL);
357 if (skb == NULL) {
358 error = -ENOMEM;
359 goto err;
360 }
361
362 bf->bf_mpdu = skb;
363 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
364 common->rx_bufsize,
365 DMA_FROM_DEVICE);
366 if (unlikely(dma_mapping_error(sc->dev,
367 bf->bf_buf_addr))) {
368 dev_kfree_skb_any(skb);
369 bf->bf_mpdu = NULL;
370 bf->bf_buf_addr = 0;
371 ath_err(common,
372 "dma_mapping_error() on RX init\n");
373 error = -ENOMEM;
374 goto err;
375 }
376 }
377 sc->rx.rxlink = NULL;
378 }
379
380err:
381 if (error)
382 ath_rx_cleanup(sc);
383
384 return error;
385}
386
387void ath_rx_cleanup(struct ath_softc *sc)
388{
389 struct ath_hw *ah = sc->sc_ah;
390 struct ath_common *common = ath9k_hw_common(ah);
391 struct sk_buff *skb;
392 struct ath_buf *bf;
393
394 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
395 ath_rx_edma_cleanup(sc);
396 return;
397 } else {
398 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
399 skb = bf->bf_mpdu;
400 if (skb) {
401 dma_unmap_single(sc->dev, bf->bf_buf_addr,
402 common->rx_bufsize,
403 DMA_FROM_DEVICE);
404 dev_kfree_skb(skb);
405 bf->bf_buf_addr = 0;
406 bf->bf_mpdu = NULL;
407 }
408 }
409
410 if (sc->rx.rxdma.dd_desc_len != 0)
411 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
412 }
413}
414
415/*
416 * Calculate the receive filter according to the
417 * operating mode and state:
418 *
419 * o always accept unicast, broadcast, and multicast traffic
420 * o maintain current state of phy error reception (the hal
421 * may enable phy error frames for noise immunity work)
422 * o probe request frames are accepted only when operating in
423 * hostap, adhoc, or monitor modes
424 * o enable promiscuous mode according to the interface state
425 * o accept beacons:
426 * - when operating in adhoc mode so the 802.11 layer creates
427 * node table entries for peers,
428 * - when operating in station mode for collecting rssi data when
429 * the station is otherwise quiet, or
430 * - when operating as a repeater so we see repeater-sta beacons
431 * - when scanning
432 */
433
434u32 ath_calcrxfilter(struct ath_softc *sc)
435{
436#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
437
438 u32 rfilt;
439
440 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
441 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
442 | ATH9K_RX_FILTER_MCAST;
443
444 if (sc->rx.rxfilter & FIF_PROBE_REQ)
445 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
446
447 /*
448 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
449 * mode interface or when in monitor mode. AP mode does not need this
450 * since it receives all in-BSS frames anyway.
451 */
452 if (sc->sc_ah->is_monitoring)
453 rfilt |= ATH9K_RX_FILTER_PROM;
454
455 if (sc->rx.rxfilter & FIF_CONTROL)
456 rfilt |= ATH9K_RX_FILTER_CONTROL;
457
458 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
459 (sc->nvifs <= 1) &&
460 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
461 rfilt |= ATH9K_RX_FILTER_MYBEACON;
462 else
463 rfilt |= ATH9K_RX_FILTER_BEACON;
464
465 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
466 (sc->rx.rxfilter & FIF_PSPOLL))
467 rfilt |= ATH9K_RX_FILTER_PSPOLL;
468
469 if (conf_is_ht(&sc->hw->conf))
470 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
471
472 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
473 /* The following may also be needed for other older chips */
474 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
475 rfilt |= ATH9K_RX_FILTER_PROM;
476 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
477 }
478
479 return rfilt;
480
481#undef RX_FILTER_PRESERVE
482}
483
484int ath_startrecv(struct ath_softc *sc)
485{
486 struct ath_hw *ah = sc->sc_ah;
487 struct ath_buf *bf, *tbf;
488
489 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
490 ath_edma_start_recv(sc);
491 return 0;
492 }
493
494 spin_lock_bh(&sc->rx.rxbuflock);
495 if (list_empty(&sc->rx.rxbuf))
496 goto start_recv;
497
498 sc->rx.rxlink = NULL;
499 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
500 ath_rx_buf_link(sc, bf);
501 }
502
503 /* We could have deleted elements so the list may be empty now */
504 if (list_empty(&sc->rx.rxbuf))
505 goto start_recv;
506
507 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
508 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
509 ath9k_hw_rxena(ah);
510
511start_recv:
512 ath_opmode_init(sc);
513 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
514
515 spin_unlock_bh(&sc->rx.rxbuflock);
516
517 return 0;
518}
519
520bool ath_stoprecv(struct ath_softc *sc)
521{
522 struct ath_hw *ah = sc->sc_ah;
523 bool stopped, reset = false;
524
525 spin_lock_bh(&sc->rx.rxbuflock);
526 ath9k_hw_abortpcurecv(ah);
527 ath9k_hw_setrxfilter(ah, 0);
528 stopped = ath9k_hw_stopdmarecv(ah, &reset);
529
530 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
531 ath_edma_stop_recv(sc);
532 else
533 sc->rx.rxlink = NULL;
534 spin_unlock_bh(&sc->rx.rxbuflock);
535
536 if (!(ah->ah_flags & AH_UNPLUGGED) &&
537 unlikely(!stopped)) {
538 ath_err(ath9k_hw_common(sc->sc_ah),
539 "Could not stop RX, we could be "
540 "confusing the DMA engine when we start RX up\n");
541 ATH_DBG_WARN_ON_ONCE(!stopped);
542 }
543 return stopped && !reset;
544}
545
546void ath_flushrecv(struct ath_softc *sc)
547{
548 sc->sc_flags |= SC_OP_RXFLUSH;
549 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
550 ath_rx_tasklet(sc, 1, true);
551 ath_rx_tasklet(sc, 1, false);
552 sc->sc_flags &= ~SC_OP_RXFLUSH;
553}
554
555static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
556{
557 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
558 struct ieee80211_mgmt *mgmt;
559 u8 *pos, *end, id, elen;
560 struct ieee80211_tim_ie *tim;
561
562 mgmt = (struct ieee80211_mgmt *)skb->data;
563 pos = mgmt->u.beacon.variable;
564 end = skb->data + skb->len;
565
566 while (pos + 2 < end) {
567 id = *pos++;
568 elen = *pos++;
569 if (pos + elen > end)
570 break;
571
572 if (id == WLAN_EID_TIM) {
573 if (elen < sizeof(*tim))
574 break;
575 tim = (struct ieee80211_tim_ie *) pos;
576 if (tim->dtim_count != 0)
577 break;
578 return tim->bitmap_ctrl & 0x01;
579 }
580
581 pos += elen;
582 }
583
584 return false;
585}
586
587static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
588{
589 struct ieee80211_mgmt *mgmt;
590 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
591
592 if (skb->len < 24 + 8 + 2 + 2)
593 return;
594
595 mgmt = (struct ieee80211_mgmt *)skb->data;
596 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
597 /* TODO: This doesn't work well if you have stations
598 * associated to two different APs because curbssid
599 * is just the last AP that any of the stations associated
600 * with.
601 */
602 return; /* not from our current AP */
603 }
604
605 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
606
607 if (sc->ps_flags & PS_BEACON_SYNC) {
608 sc->ps_flags &= ~PS_BEACON_SYNC;
609 ath_dbg(common, ATH_DBG_PS,
610 "Reconfigure Beacon timers based on timestamp from the AP\n");
611 ath_set_beacon(sc);
612 sc->ps_flags &= ~PS_TSFOOR_SYNC;
613 }
614
615 if (ath_beacon_dtim_pending_cab(skb)) {
616 /*
617 * Remain awake waiting for buffered broadcast/multicast
618 * frames. If the last broadcast/multicast frame is not
619 * received properly, the next beacon frame will work as
620 * a backup trigger for returning into NETWORK SLEEP state,
621 * so we are waiting for it as well.
622 */
623 ath_dbg(common, ATH_DBG_PS,
624 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
625 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
626 return;
627 }
628
629 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
630 /*
631 * This can happen if a broadcast frame is dropped or the AP
632 * fails to send a frame indicating that all CAB frames have
633 * been delivered.
634 */
635 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
636 ath_dbg(common, ATH_DBG_PS,
637 "PS wait for CAB frames timed out\n");
638 }
639}
640
641static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
642{
643 struct ieee80211_hdr *hdr;
644 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
645
646 hdr = (struct ieee80211_hdr *)skb->data;
647
648 /* Process Beacon and CAB receive in PS state */
649 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
650 && ieee80211_is_beacon(hdr->frame_control))
651 ath_rx_ps_beacon(sc, skb);
652 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
653 (ieee80211_is_data(hdr->frame_control) ||
654 ieee80211_is_action(hdr->frame_control)) &&
655 is_multicast_ether_addr(hdr->addr1) &&
656 !ieee80211_has_moredata(hdr->frame_control)) {
657 /*
658 * No more broadcast/multicast frames to be received at this
659 * point.
660 */
661 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
662 ath_dbg(common, ATH_DBG_PS,
663 "All PS CAB frames received, back to sleep\n");
664 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
665 !is_multicast_ether_addr(hdr->addr1) &&
666 !ieee80211_has_morefrags(hdr->frame_control)) {
667 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
668 ath_dbg(common, ATH_DBG_PS,
669 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
670 sc->ps_flags & (PS_WAIT_FOR_BEACON |
671 PS_WAIT_FOR_CAB |
672 PS_WAIT_FOR_PSPOLL_DATA |
673 PS_WAIT_FOR_TX_ACK));
674 }
675}
676
677static bool ath_edma_get_buffers(struct ath_softc *sc,
678 enum ath9k_rx_qtype qtype)
679{
680 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
681 struct ath_hw *ah = sc->sc_ah;
682 struct ath_common *common = ath9k_hw_common(ah);
683 struct sk_buff *skb;
684 struct ath_buf *bf;
685 int ret;
686
687 skb = skb_peek(&rx_edma->rx_fifo);
688 if (!skb)
689 return false;
690
691 bf = SKB_CB_ATHBUF(skb);
692 BUG_ON(!bf);
693
694 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
695 common->rx_bufsize, DMA_FROM_DEVICE);
696
697 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
698 if (ret == -EINPROGRESS) {
699 /*let device gain the buffer again*/
700 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
701 common->rx_bufsize, DMA_FROM_DEVICE);
702 return false;
703 }
704
705 __skb_unlink(skb, &rx_edma->rx_fifo);
706 if (ret == -EINVAL) {
707 /* corrupt descriptor, skip this one and the following one */
708 list_add_tail(&bf->list, &sc->rx.rxbuf);
709 ath_rx_edma_buf_link(sc, qtype);
710 skb = skb_peek(&rx_edma->rx_fifo);
711 if (!skb)
712 return true;
713
714 bf = SKB_CB_ATHBUF(skb);
715 BUG_ON(!bf);
716
717 __skb_unlink(skb, &rx_edma->rx_fifo);
718 list_add_tail(&bf->list, &sc->rx.rxbuf);
719 ath_rx_edma_buf_link(sc, qtype);
720 return true;
721 }
722 skb_queue_tail(&rx_edma->rx_buffers, skb);
723
724 return true;
725}
726
727static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
728 struct ath_rx_status *rs,
729 enum ath9k_rx_qtype qtype)
730{
731 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
732 struct sk_buff *skb;
733 struct ath_buf *bf;
734
735 while (ath_edma_get_buffers(sc, qtype));
736 skb = __skb_dequeue(&rx_edma->rx_buffers);
737 if (!skb)
738 return NULL;
739
740 bf = SKB_CB_ATHBUF(skb);
741 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
742 return bf;
743}
744
745static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
746 struct ath_rx_status *rs)
747{
748 struct ath_hw *ah = sc->sc_ah;
749 struct ath_common *common = ath9k_hw_common(ah);
750 struct ath_desc *ds;
751 struct ath_buf *bf;
752 int ret;
753
754 if (list_empty(&sc->rx.rxbuf)) {
755 sc->rx.rxlink = NULL;
756 return NULL;
757 }
758
759 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
760 ds = bf->bf_desc;
761
762 /*
763 * Must provide the virtual address of the current
764 * descriptor, the physical address, and the virtual
765 * address of the next descriptor in the h/w chain.
766 * This allows the HAL to look ahead to see if the
767 * hardware is done with a descriptor by checking the
768 * done bit in the following descriptor and the address
769 * of the current descriptor the DMA engine is working
770 * on. All this is necessary because of our use of
771 * a self-linked list to avoid rx overruns.
772 */
773 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
774 if (ret == -EINPROGRESS) {
775 struct ath_rx_status trs;
776 struct ath_buf *tbf;
777 struct ath_desc *tds;
778
779 memset(&trs, 0, sizeof(trs));
780 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
781 sc->rx.rxlink = NULL;
782 return NULL;
783 }
784
785 tbf = list_entry(bf->list.next, struct ath_buf, list);
786
787 /*
788 * On some hardware the descriptor status words could
789 * get corrupted, including the done bit. Because of
790 * this, check if the next descriptor's done bit is
791 * set or not.
792 *
793 * If the next descriptor's done bit is set, the current
794 * descriptor has been corrupted. Force s/w to discard
795 * this descriptor and continue...
796 */
797
798 tds = tbf->bf_desc;
799 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
800 if (ret == -EINPROGRESS)
801 return NULL;
802 }
803
804 if (!bf->bf_mpdu)
805 return bf;
806
807 /*
808 * Synchronize the DMA transfer with CPU before
809 * 1. accessing the frame
810 * 2. requeueing the same buffer to h/w
811 */
812 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
813 common->rx_bufsize,
814 DMA_FROM_DEVICE);
815
816 return bf;
817}
818
819/* Assumes you've already done the endian to CPU conversion */
820static bool ath9k_rx_accept(struct ath_common *common,
821 struct ieee80211_hdr *hdr,
822 struct ieee80211_rx_status *rxs,
823 struct ath_rx_status *rx_stats,
824 bool *decrypt_error)
825{
826 bool is_mc, is_valid_tkip, strip_mic, mic_error;
827 struct ath_hw *ah = common->ah;
828 __le16 fc;
829 u8 rx_status_len = ah->caps.rx_status_len;
830
831 fc = hdr->frame_control;
832
833 is_mc = !!is_multicast_ether_addr(hdr->addr1);
834 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
835 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
836 strip_mic = is_valid_tkip && !(rx_stats->rs_status &
837 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC));
838
839 if (!rx_stats->rs_datalen)
840 return false;
841 /*
842 * rs_status follows rs_datalen so if rs_datalen is too large
843 * we can take a hint that hardware corrupted it, so ignore
844 * those frames.
845 */
846 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
847 return false;
848
849 /* Only use error bits from the last fragment */
850 if (rx_stats->rs_more)
851 return true;
852
853 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
854 !ieee80211_has_morefrags(fc) &&
855 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
856 (rx_stats->rs_status & ATH9K_RXERR_MIC);
857
858 /*
859 * The rx_stats->rs_status will not be set until the end of the
860 * chained descriptors so it can be ignored if rs_more is set. The
861 * rs_more will be false at the last element of the chained
862 * descriptors.
863 */
864 if (rx_stats->rs_status != 0) {
865 if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
866 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
867 mic_error = false;
868 }
869 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
870 return false;
871
872 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
873 *decrypt_error = true;
874 mic_error = false;
875 }
876
877 /*
878 * Reject error frames with the exception of
879 * decryption and MIC failures. For monitor mode,
880 * we also ignore the CRC error.
881 */
882 if (ah->is_monitoring) {
883 if (rx_stats->rs_status &
884 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
885 ATH9K_RXERR_CRC))
886 return false;
887 } else {
888 if (rx_stats->rs_status &
889 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
890 return false;
891 }
892 }
893 }
894
895 /*
896 * For unicast frames the MIC error bit can have false positives,
897 * so all MIC error reports need to be validated in software.
898 * False negatives are not common, so skip software verification
899 * if the hardware considers the MIC valid.
900 */
901 if (strip_mic)
902 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
903 else if (is_mc && mic_error)
904 rxs->flag |= RX_FLAG_MMIC_ERROR;
905
906 return true;
907}
908
909static int ath9k_process_rate(struct ath_common *common,
910 struct ieee80211_hw *hw,
911 struct ath_rx_status *rx_stats,
912 struct ieee80211_rx_status *rxs)
913{
914 struct ieee80211_supported_band *sband;
915 enum ieee80211_band band;
916 unsigned int i = 0;
917
918 band = hw->conf.channel->band;
919 sband = hw->wiphy->bands[band];
920
921 if (rx_stats->rs_rate & 0x80) {
922 /* HT rate */
923 rxs->flag |= RX_FLAG_HT;
924 if (rx_stats->rs_flags & ATH9K_RX_2040)
925 rxs->flag |= RX_FLAG_40MHZ;
926 if (rx_stats->rs_flags & ATH9K_RX_GI)
927 rxs->flag |= RX_FLAG_SHORT_GI;
928 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
929 return 0;
930 }
931
932 for (i = 0; i < sband->n_bitrates; i++) {
933 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
934 rxs->rate_idx = i;
935 return 0;
936 }
937 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
938 rxs->flag |= RX_FLAG_SHORTPRE;
939 rxs->rate_idx = i;
940 return 0;
941 }
942 }
943
944 /*
945 * No valid hardware bitrate found -- we should not get here
946 * because hardware has already validated this frame as OK.
947 */
948 ath_dbg(common, ATH_DBG_XMIT,
949 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
950 rx_stats->rs_rate);
951
952 return -EINVAL;
953}
954
955static void ath9k_process_rssi(struct ath_common *common,
956 struct ieee80211_hw *hw,
957 struct ieee80211_hdr *hdr,
958 struct ath_rx_status *rx_stats)
959{
960 struct ath_softc *sc = hw->priv;
961 struct ath_hw *ah = common->ah;
962 int last_rssi;
963 __le16 fc;
964
965 if ((ah->opmode != NL80211_IFTYPE_STATION) &&
966 (ah->opmode != NL80211_IFTYPE_ADHOC))
967 return;
968
969 fc = hdr->frame_control;
970 if (!ieee80211_is_beacon(fc) ||
971 compare_ether_addr(hdr->addr3, common->curbssid)) {
972 /* TODO: This doesn't work well if you have stations
973 * associated to two different APs because curbssid
974 * is just the last AP that any of the stations associated
975 * with.
976 */
977 return;
978 }
979
980 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
981 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
982
983 last_rssi = sc->last_rssi;
984 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
985 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
986 ATH_RSSI_EP_MULTIPLIER);
987 if (rx_stats->rs_rssi < 0)
988 rx_stats->rs_rssi = 0;
989
990 /* Update Beacon RSSI, this is used by ANI. */
991 ah->stats.avgbrssi = rx_stats->rs_rssi;
992}
993
994/*
995 * For Decrypt or Demic errors, we only mark packet status here and always push
996 * up the frame up to let mac80211 handle the actual error case, be it no
997 * decryption key or real decryption error. This let us keep statistics there.
998 */
999static int ath9k_rx_skb_preprocess(struct ath_common *common,
1000 struct ieee80211_hw *hw,
1001 struct ieee80211_hdr *hdr,
1002 struct ath_rx_status *rx_stats,
1003 struct ieee80211_rx_status *rx_status,
1004 bool *decrypt_error)
1005{
1006 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
1007
1008 /*
1009 * everything but the rate is checked here, the rate check is done
1010 * separately to avoid doing two lookups for a rate for each frame.
1011 */
1012 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
1013 return -EINVAL;
1014
1015 /* Only use status info from the last fragment */
1016 if (rx_stats->rs_more)
1017 return 0;
1018
1019 ath9k_process_rssi(common, hw, hdr, rx_stats);
1020
1021 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
1022 return -EINVAL;
1023
1024 rx_status->band = hw->conf.channel->band;
1025 rx_status->freq = hw->conf.channel->center_freq;
1026 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
1027 rx_status->antenna = rx_stats->rs_antenna;
1028 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
1029
1030 return 0;
1031}
1032
1033static void ath9k_rx_skb_postprocess(struct ath_common *common,
1034 struct sk_buff *skb,
1035 struct ath_rx_status *rx_stats,
1036 struct ieee80211_rx_status *rxs,
1037 bool decrypt_error)
1038{
1039 struct ath_hw *ah = common->ah;
1040 struct ieee80211_hdr *hdr;
1041 int hdrlen, padpos, padsize;
1042 u8 keyix;
1043 __le16 fc;
1044
1045 /* see if any padding is done by the hw and remove it */
1046 hdr = (struct ieee80211_hdr *) skb->data;
1047 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1048 fc = hdr->frame_control;
1049 padpos = ath9k_cmn_padpos(hdr->frame_control);
1050
1051 /* The MAC header is padded to have 32-bit boundary if the
1052 * packet payload is non-zero. The general calculation for
1053 * padsize would take into account odd header lengths:
1054 * padsize = (4 - padpos % 4) % 4; However, since only
1055 * even-length headers are used, padding can only be 0 or 2
1056 * bytes and we can optimize this a bit. In addition, we must
1057 * not try to remove padding from short control frames that do
1058 * not have payload. */
1059 padsize = padpos & 3;
1060 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1061 memmove(skb->data + padsize, skb->data, padpos);
1062 skb_pull(skb, padsize);
1063 }
1064
1065 keyix = rx_stats->rs_keyix;
1066
1067 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1068 ieee80211_has_protected(fc)) {
1069 rxs->flag |= RX_FLAG_DECRYPTED;
1070 } else if (ieee80211_has_protected(fc)
1071 && !decrypt_error && skb->len >= hdrlen + 4) {
1072 keyix = skb->data[hdrlen + 3] >> 6;
1073
1074 if (test_bit(keyix, common->keymap))
1075 rxs->flag |= RX_FLAG_DECRYPTED;
1076 }
1077 if (ah->sw_mgmt_crypto &&
1078 (rxs->flag & RX_FLAG_DECRYPTED) &&
1079 ieee80211_is_mgmt(fc))
1080 /* Use software decrypt for management frames. */
1081 rxs->flag &= ~RX_FLAG_DECRYPTED;
1082}
1083
1084static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1085 struct ath_hw_antcomb_conf ant_conf,
1086 int main_rssi_avg)
1087{
1088 antcomb->quick_scan_cnt = 0;
1089
1090 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1091 antcomb->rssi_lna2 = main_rssi_avg;
1092 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1093 antcomb->rssi_lna1 = main_rssi_avg;
1094
1095 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1096 case 0x10: /* LNA2 A-B */
1097 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1098 antcomb->first_quick_scan_conf =
1099 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1100 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1101 break;
1102 case 0x20: /* LNA1 A-B */
1103 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1104 antcomb->first_quick_scan_conf =
1105 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1106 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1107 break;
1108 case 0x21: /* LNA1 LNA2 */
1109 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1110 antcomb->first_quick_scan_conf =
1111 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1112 antcomb->second_quick_scan_conf =
1113 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1114 break;
1115 case 0x12: /* LNA2 LNA1 */
1116 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1117 antcomb->first_quick_scan_conf =
1118 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1119 antcomb->second_quick_scan_conf =
1120 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1121 break;
1122 case 0x13: /* LNA2 A+B */
1123 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1124 antcomb->first_quick_scan_conf =
1125 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1126 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1127 break;
1128 case 0x23: /* LNA1 A+B */
1129 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1130 antcomb->first_quick_scan_conf =
1131 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1132 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1133 break;
1134 default:
1135 break;
1136 }
1137}
1138
1139static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1140 struct ath_hw_antcomb_conf *div_ant_conf,
1141 int main_rssi_avg, int alt_rssi_avg,
1142 int alt_ratio)
1143{
1144 /* alt_good */
1145 switch (antcomb->quick_scan_cnt) {
1146 case 0:
1147 /* set alt to main, and alt to first conf */
1148 div_ant_conf->main_lna_conf = antcomb->main_conf;
1149 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1150 break;
1151 case 1:
1152 /* set alt to main, and alt to first conf */
1153 div_ant_conf->main_lna_conf = antcomb->main_conf;
1154 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1155 antcomb->rssi_first = main_rssi_avg;
1156 antcomb->rssi_second = alt_rssi_avg;
1157
1158 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1159 /* main is LNA1 */
1160 if (ath_is_alt_ant_ratio_better(alt_ratio,
1161 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1162 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1163 main_rssi_avg, alt_rssi_avg,
1164 antcomb->total_pkt_count))
1165 antcomb->first_ratio = true;
1166 else
1167 antcomb->first_ratio = false;
1168 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1169 if (ath_is_alt_ant_ratio_better(alt_ratio,
1170 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1171 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1172 main_rssi_avg, alt_rssi_avg,
1173 antcomb->total_pkt_count))
1174 antcomb->first_ratio = true;
1175 else
1176 antcomb->first_ratio = false;
1177 } else {
1178 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1179 (alt_rssi_avg > main_rssi_avg +
1180 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1181 (alt_rssi_avg > main_rssi_avg)) &&
1182 (antcomb->total_pkt_count > 50))
1183 antcomb->first_ratio = true;
1184 else
1185 antcomb->first_ratio = false;
1186 }
1187 break;
1188 case 2:
1189 antcomb->alt_good = false;
1190 antcomb->scan_not_start = false;
1191 antcomb->scan = false;
1192 antcomb->rssi_first = main_rssi_avg;
1193 antcomb->rssi_third = alt_rssi_avg;
1194
1195 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1196 antcomb->rssi_lna1 = alt_rssi_avg;
1197 else if (antcomb->second_quick_scan_conf ==
1198 ATH_ANT_DIV_COMB_LNA2)
1199 antcomb->rssi_lna2 = alt_rssi_avg;
1200 else if (antcomb->second_quick_scan_conf ==
1201 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1202 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1203 antcomb->rssi_lna2 = main_rssi_avg;
1204 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1205 antcomb->rssi_lna1 = main_rssi_avg;
1206 }
1207
1208 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1209 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1210 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1211 else
1212 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1213
1214 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1215 if (ath_is_alt_ant_ratio_better(alt_ratio,
1216 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1217 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1218 main_rssi_avg, alt_rssi_avg,
1219 antcomb->total_pkt_count))
1220 antcomb->second_ratio = true;
1221 else
1222 antcomb->second_ratio = false;
1223 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1224 if (ath_is_alt_ant_ratio_better(alt_ratio,
1225 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1226 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1227 main_rssi_avg, alt_rssi_avg,
1228 antcomb->total_pkt_count))
1229 antcomb->second_ratio = true;
1230 else
1231 antcomb->second_ratio = false;
1232 } else {
1233 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1234 (alt_rssi_avg > main_rssi_avg +
1235 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1236 (alt_rssi_avg > main_rssi_avg)) &&
1237 (antcomb->total_pkt_count > 50))
1238 antcomb->second_ratio = true;
1239 else
1240 antcomb->second_ratio = false;
1241 }
1242
1243 /* set alt to the conf with maximun ratio */
1244 if (antcomb->first_ratio && antcomb->second_ratio) {
1245 if (antcomb->rssi_second > antcomb->rssi_third) {
1246 /* first alt*/
1247 if ((antcomb->first_quick_scan_conf ==
1248 ATH_ANT_DIV_COMB_LNA1) ||
1249 (antcomb->first_quick_scan_conf ==
1250 ATH_ANT_DIV_COMB_LNA2))
1251 /* Set alt LNA1 or LNA2*/
1252 if (div_ant_conf->main_lna_conf ==
1253 ATH_ANT_DIV_COMB_LNA2)
1254 div_ant_conf->alt_lna_conf =
1255 ATH_ANT_DIV_COMB_LNA1;
1256 else
1257 div_ant_conf->alt_lna_conf =
1258 ATH_ANT_DIV_COMB_LNA2;
1259 else
1260 /* Set alt to A+B or A-B */
1261 div_ant_conf->alt_lna_conf =
1262 antcomb->first_quick_scan_conf;
1263 } else if ((antcomb->second_quick_scan_conf ==
1264 ATH_ANT_DIV_COMB_LNA1) ||
1265 (antcomb->second_quick_scan_conf ==
1266 ATH_ANT_DIV_COMB_LNA2)) {
1267 /* Set alt LNA1 or LNA2 */
1268 if (div_ant_conf->main_lna_conf ==
1269 ATH_ANT_DIV_COMB_LNA2)
1270 div_ant_conf->alt_lna_conf =
1271 ATH_ANT_DIV_COMB_LNA1;
1272 else
1273 div_ant_conf->alt_lna_conf =
1274 ATH_ANT_DIV_COMB_LNA2;
1275 } else {
1276 /* Set alt to A+B or A-B */
1277 div_ant_conf->alt_lna_conf =
1278 antcomb->second_quick_scan_conf;
1279 }
1280 } else if (antcomb->first_ratio) {
1281 /* first alt */
1282 if ((antcomb->first_quick_scan_conf ==
1283 ATH_ANT_DIV_COMB_LNA1) ||
1284 (antcomb->first_quick_scan_conf ==
1285 ATH_ANT_DIV_COMB_LNA2))
1286 /* Set alt LNA1 or LNA2 */
1287 if (div_ant_conf->main_lna_conf ==
1288 ATH_ANT_DIV_COMB_LNA2)
1289 div_ant_conf->alt_lna_conf =
1290 ATH_ANT_DIV_COMB_LNA1;
1291 else
1292 div_ant_conf->alt_lna_conf =
1293 ATH_ANT_DIV_COMB_LNA2;
1294 else
1295 /* Set alt to A+B or A-B */
1296 div_ant_conf->alt_lna_conf =
1297 antcomb->first_quick_scan_conf;
1298 } else if (antcomb->second_ratio) {
1299 /* second alt */
1300 if ((antcomb->second_quick_scan_conf ==
1301 ATH_ANT_DIV_COMB_LNA1) ||
1302 (antcomb->second_quick_scan_conf ==
1303 ATH_ANT_DIV_COMB_LNA2))
1304 /* Set alt LNA1 or LNA2 */
1305 if (div_ant_conf->main_lna_conf ==
1306 ATH_ANT_DIV_COMB_LNA2)
1307 div_ant_conf->alt_lna_conf =
1308 ATH_ANT_DIV_COMB_LNA1;
1309 else
1310 div_ant_conf->alt_lna_conf =
1311 ATH_ANT_DIV_COMB_LNA2;
1312 else
1313 /* Set alt to A+B or A-B */
1314 div_ant_conf->alt_lna_conf =
1315 antcomb->second_quick_scan_conf;
1316 } else {
1317 /* main is largest */
1318 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1319 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1320 /* Set alt LNA1 or LNA2 */
1321 if (div_ant_conf->main_lna_conf ==
1322 ATH_ANT_DIV_COMB_LNA2)
1323 div_ant_conf->alt_lna_conf =
1324 ATH_ANT_DIV_COMB_LNA1;
1325 else
1326 div_ant_conf->alt_lna_conf =
1327 ATH_ANT_DIV_COMB_LNA2;
1328 else
1329 /* Set alt to A+B or A-B */
1330 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1331 }
1332 break;
1333 default:
1334 break;
1335 }
1336}
1337
1338static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1339 struct ath_ant_comb *antcomb, int alt_ratio)
1340{
1341 if (ant_conf->div_group == 0) {
1342 /* Adjust the fast_div_bias based on main and alt lna conf */
1343 switch ((ant_conf->main_lna_conf << 4) |
1344 ant_conf->alt_lna_conf) {
1345 case 0x01: /* A-B LNA2 */
1346 ant_conf->fast_div_bias = 0x3b;
1347 break;
1348 case 0x02: /* A-B LNA1 */
1349 ant_conf->fast_div_bias = 0x3d;
1350 break;
1351 case 0x03: /* A-B A+B */
1352 ant_conf->fast_div_bias = 0x1;
1353 break;
1354 case 0x10: /* LNA2 A-B */
1355 ant_conf->fast_div_bias = 0x7;
1356 break;
1357 case 0x12: /* LNA2 LNA1 */
1358 ant_conf->fast_div_bias = 0x2;
1359 break;
1360 case 0x13: /* LNA2 A+B */
1361 ant_conf->fast_div_bias = 0x7;
1362 break;
1363 case 0x20: /* LNA1 A-B */
1364 ant_conf->fast_div_bias = 0x6;
1365 break;
1366 case 0x21: /* LNA1 LNA2 */
1367 ant_conf->fast_div_bias = 0x0;
1368 break;
1369 case 0x23: /* LNA1 A+B */
1370 ant_conf->fast_div_bias = 0x6;
1371 break;
1372 case 0x30: /* A+B A-B */
1373 ant_conf->fast_div_bias = 0x1;
1374 break;
1375 case 0x31: /* A+B LNA2 */
1376 ant_conf->fast_div_bias = 0x3b;
1377 break;
1378 case 0x32: /* A+B LNA1 */
1379 ant_conf->fast_div_bias = 0x3d;
1380 break;
1381 default:
1382 break;
1383 }
1384 } else if (ant_conf->div_group == 1) {
1385 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1386 switch ((ant_conf->main_lna_conf << 4) |
1387 ant_conf->alt_lna_conf) {
1388 case 0x01: /* A-B LNA2 */
1389 ant_conf->fast_div_bias = 0x1;
1390 ant_conf->main_gaintb = 0;
1391 ant_conf->alt_gaintb = 0;
1392 break;
1393 case 0x02: /* A-B LNA1 */
1394 ant_conf->fast_div_bias = 0x1;
1395 ant_conf->main_gaintb = 0;
1396 ant_conf->alt_gaintb = 0;
1397 break;
1398 case 0x03: /* A-B A+B */
1399 ant_conf->fast_div_bias = 0x1;
1400 ant_conf->main_gaintb = 0;
1401 ant_conf->alt_gaintb = 0;
1402 break;
1403 case 0x10: /* LNA2 A-B */
1404 if (!(antcomb->scan) &&
1405 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1406 ant_conf->fast_div_bias = 0x3f;
1407 else
1408 ant_conf->fast_div_bias = 0x1;
1409 ant_conf->main_gaintb = 0;
1410 ant_conf->alt_gaintb = 0;
1411 break;
1412 case 0x12: /* LNA2 LNA1 */
1413 ant_conf->fast_div_bias = 0x1;
1414 ant_conf->main_gaintb = 0;
1415 ant_conf->alt_gaintb = 0;
1416 break;
1417 case 0x13: /* LNA2 A+B */
1418 if (!(antcomb->scan) &&
1419 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1420 ant_conf->fast_div_bias = 0x3f;
1421 else
1422 ant_conf->fast_div_bias = 0x1;
1423 ant_conf->main_gaintb = 0;
1424 ant_conf->alt_gaintb = 0;
1425 break;
1426 case 0x20: /* LNA1 A-B */
1427 if (!(antcomb->scan) &&
1428 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1429 ant_conf->fast_div_bias = 0x3f;
1430 else
1431 ant_conf->fast_div_bias = 0x1;
1432 ant_conf->main_gaintb = 0;
1433 ant_conf->alt_gaintb = 0;
1434 break;
1435 case 0x21: /* LNA1 LNA2 */
1436 ant_conf->fast_div_bias = 0x1;
1437 ant_conf->main_gaintb = 0;
1438 ant_conf->alt_gaintb = 0;
1439 break;
1440 case 0x23: /* LNA1 A+B */
1441 if (!(antcomb->scan) &&
1442 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1443 ant_conf->fast_div_bias = 0x3f;
1444 else
1445 ant_conf->fast_div_bias = 0x1;
1446 ant_conf->main_gaintb = 0;
1447 ant_conf->alt_gaintb = 0;
1448 break;
1449 case 0x30: /* A+B A-B */
1450 ant_conf->fast_div_bias = 0x1;
1451 ant_conf->main_gaintb = 0;
1452 ant_conf->alt_gaintb = 0;
1453 break;
1454 case 0x31: /* A+B LNA2 */
1455 ant_conf->fast_div_bias = 0x1;
1456 ant_conf->main_gaintb = 0;
1457 ant_conf->alt_gaintb = 0;
1458 break;
1459 case 0x32: /* A+B LNA1 */
1460 ant_conf->fast_div_bias = 0x1;
1461 ant_conf->main_gaintb = 0;
1462 ant_conf->alt_gaintb = 0;
1463 break;
1464 default:
1465 break;
1466 }
1467 } else if (ant_conf->div_group == 2) {
1468 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1469 switch ((ant_conf->main_lna_conf << 4) |
1470 ant_conf->alt_lna_conf) {
1471 case 0x01: /* A-B LNA2 */
1472 ant_conf->fast_div_bias = 0x1;
1473 ant_conf->main_gaintb = 0;
1474 ant_conf->alt_gaintb = 0;
1475 break;
1476 case 0x02: /* A-B LNA1 */
1477 ant_conf->fast_div_bias = 0x1;
1478 ant_conf->main_gaintb = 0;
1479 ant_conf->alt_gaintb = 0;
1480 break;
1481 case 0x03: /* A-B A+B */
1482 ant_conf->fast_div_bias = 0x1;
1483 ant_conf->main_gaintb = 0;
1484 ant_conf->alt_gaintb = 0;
1485 break;
1486 case 0x10: /* LNA2 A-B */
1487 if (!(antcomb->scan) &&
1488 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1489 ant_conf->fast_div_bias = 0x1;
1490 else
1491 ant_conf->fast_div_bias = 0x2;
1492 ant_conf->main_gaintb = 0;
1493 ant_conf->alt_gaintb = 0;
1494 break;
1495 case 0x12: /* LNA2 LNA1 */
1496 ant_conf->fast_div_bias = 0x1;
1497 ant_conf->main_gaintb = 0;
1498 ant_conf->alt_gaintb = 0;
1499 break;
1500 case 0x13: /* LNA2 A+B */
1501 if (!(antcomb->scan) &&
1502 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1503 ant_conf->fast_div_bias = 0x1;
1504 else
1505 ant_conf->fast_div_bias = 0x2;
1506 ant_conf->main_gaintb = 0;
1507 ant_conf->alt_gaintb = 0;
1508 break;
1509 case 0x20: /* LNA1 A-B */
1510 if (!(antcomb->scan) &&
1511 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1512 ant_conf->fast_div_bias = 0x1;
1513 else
1514 ant_conf->fast_div_bias = 0x2;
1515 ant_conf->main_gaintb = 0;
1516 ant_conf->alt_gaintb = 0;
1517 break;
1518 case 0x21: /* LNA1 LNA2 */
1519 ant_conf->fast_div_bias = 0x1;
1520 ant_conf->main_gaintb = 0;
1521 ant_conf->alt_gaintb = 0;
1522 break;
1523 case 0x23: /* LNA1 A+B */
1524 if (!(antcomb->scan) &&
1525 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1526 ant_conf->fast_div_bias = 0x1;
1527 else
1528 ant_conf->fast_div_bias = 0x2;
1529 ant_conf->main_gaintb = 0;
1530 ant_conf->alt_gaintb = 0;
1531 break;
1532 case 0x30: /* A+B A-B */
1533 ant_conf->fast_div_bias = 0x1;
1534 ant_conf->main_gaintb = 0;
1535 ant_conf->alt_gaintb = 0;
1536 break;
1537 case 0x31: /* A+B LNA2 */
1538 ant_conf->fast_div_bias = 0x1;
1539 ant_conf->main_gaintb = 0;
1540 ant_conf->alt_gaintb = 0;
1541 break;
1542 case 0x32: /* A+B LNA1 */
1543 ant_conf->fast_div_bias = 0x1;
1544 ant_conf->main_gaintb = 0;
1545 ant_conf->alt_gaintb = 0;
1546 break;
1547 default:
1548 break;
1549 }
1550 }
1551}
1552
1553/* Antenna diversity and combining */
1554static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1555{
1556 struct ath_hw_antcomb_conf div_ant_conf;
1557 struct ath_ant_comb *antcomb = &sc->ant_comb;
1558 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1559 int curr_main_set;
1560 int main_rssi = rs->rs_rssi_ctl0;
1561 int alt_rssi = rs->rs_rssi_ctl1;
1562 int rx_ant_conf, main_ant_conf;
1563 bool short_scan = false;
1564
1565 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1566 ATH_ANT_RX_MASK;
1567 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1568 ATH_ANT_RX_MASK;
1569
1570 /* Record packet only when both main_rssi and alt_rssi is positive */
1571 if (main_rssi > 0 && alt_rssi > 0) {
1572 antcomb->total_pkt_count++;
1573 antcomb->main_total_rssi += main_rssi;
1574 antcomb->alt_total_rssi += alt_rssi;
1575 if (main_ant_conf == rx_ant_conf)
1576 antcomb->main_recv_cnt++;
1577 else
1578 antcomb->alt_recv_cnt++;
1579 }
1580
1581 /* Short scan check */
1582 if (antcomb->scan && antcomb->alt_good) {
1583 if (time_after(jiffies, antcomb->scan_start_time +
1584 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1585 short_scan = true;
1586 else
1587 if (antcomb->total_pkt_count ==
1588 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1589 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1590 antcomb->total_pkt_count);
1591 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1592 short_scan = true;
1593 }
1594 }
1595
1596 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1597 rs->rs_moreaggr) && !short_scan)
1598 return;
1599
1600 if (antcomb->total_pkt_count) {
1601 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1602 antcomb->total_pkt_count);
1603 main_rssi_avg = (antcomb->main_total_rssi /
1604 antcomb->total_pkt_count);
1605 alt_rssi_avg = (antcomb->alt_total_rssi /
1606 antcomb->total_pkt_count);
1607 }
1608
1609
1610 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1611 curr_alt_set = div_ant_conf.alt_lna_conf;
1612 curr_main_set = div_ant_conf.main_lna_conf;
1613
1614 antcomb->count++;
1615
1616 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1617 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1618 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1619 main_rssi_avg);
1620 antcomb->alt_good = true;
1621 } else {
1622 antcomb->alt_good = false;
1623 }
1624
1625 antcomb->count = 0;
1626 antcomb->scan = true;
1627 antcomb->scan_not_start = true;
1628 }
1629
1630 if (!antcomb->scan) {
1631 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1632 alt_ratio, curr_main_set, curr_alt_set,
1633 alt_rssi_avg, main_rssi_avg)) {
1634 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1635 /* Switch main and alt LNA */
1636 div_ant_conf.main_lna_conf =
1637 ATH_ANT_DIV_COMB_LNA2;
1638 div_ant_conf.alt_lna_conf =
1639 ATH_ANT_DIV_COMB_LNA1;
1640 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1641 div_ant_conf.main_lna_conf =
1642 ATH_ANT_DIV_COMB_LNA1;
1643 div_ant_conf.alt_lna_conf =
1644 ATH_ANT_DIV_COMB_LNA2;
1645 }
1646
1647 goto div_comb_done;
1648 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1649 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1650 /* Set alt to another LNA */
1651 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1652 div_ant_conf.alt_lna_conf =
1653 ATH_ANT_DIV_COMB_LNA1;
1654 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1655 div_ant_conf.alt_lna_conf =
1656 ATH_ANT_DIV_COMB_LNA2;
1657
1658 goto div_comb_done;
1659 }
1660
1661 if ((alt_rssi_avg < (main_rssi_avg +
1662 div_ant_conf.lna1_lna2_delta)))
1663 goto div_comb_done;
1664 }
1665
1666 if (!antcomb->scan_not_start) {
1667 switch (curr_alt_set) {
1668 case ATH_ANT_DIV_COMB_LNA2:
1669 antcomb->rssi_lna2 = alt_rssi_avg;
1670 antcomb->rssi_lna1 = main_rssi_avg;
1671 antcomb->scan = true;
1672 /* set to A+B */
1673 div_ant_conf.main_lna_conf =
1674 ATH_ANT_DIV_COMB_LNA1;
1675 div_ant_conf.alt_lna_conf =
1676 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1677 break;
1678 case ATH_ANT_DIV_COMB_LNA1:
1679 antcomb->rssi_lna1 = alt_rssi_avg;
1680 antcomb->rssi_lna2 = main_rssi_avg;
1681 antcomb->scan = true;
1682 /* set to A+B */
1683 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1684 div_ant_conf.alt_lna_conf =
1685 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1686 break;
1687 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1688 antcomb->rssi_add = alt_rssi_avg;
1689 antcomb->scan = true;
1690 /* set to A-B */
1691 div_ant_conf.alt_lna_conf =
1692 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1693 break;
1694 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1695 antcomb->rssi_sub = alt_rssi_avg;
1696 antcomb->scan = false;
1697 if (antcomb->rssi_lna2 >
1698 (antcomb->rssi_lna1 +
1699 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1700 /* use LNA2 as main LNA */
1701 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1702 (antcomb->rssi_add > antcomb->rssi_sub)) {
1703 /* set to A+B */
1704 div_ant_conf.main_lna_conf =
1705 ATH_ANT_DIV_COMB_LNA2;
1706 div_ant_conf.alt_lna_conf =
1707 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1708 } else if (antcomb->rssi_sub >
1709 antcomb->rssi_lna1) {
1710 /* set to A-B */
1711 div_ant_conf.main_lna_conf =
1712 ATH_ANT_DIV_COMB_LNA2;
1713 div_ant_conf.alt_lna_conf =
1714 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1715 } else {
1716 /* set to LNA1 */
1717 div_ant_conf.main_lna_conf =
1718 ATH_ANT_DIV_COMB_LNA2;
1719 div_ant_conf.alt_lna_conf =
1720 ATH_ANT_DIV_COMB_LNA1;
1721 }
1722 } else {
1723 /* use LNA1 as main LNA */
1724 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1725 (antcomb->rssi_add > antcomb->rssi_sub)) {
1726 /* set to A+B */
1727 div_ant_conf.main_lna_conf =
1728 ATH_ANT_DIV_COMB_LNA1;
1729 div_ant_conf.alt_lna_conf =
1730 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1731 } else if (antcomb->rssi_sub >
1732 antcomb->rssi_lna1) {
1733 /* set to A-B */
1734 div_ant_conf.main_lna_conf =
1735 ATH_ANT_DIV_COMB_LNA1;
1736 div_ant_conf.alt_lna_conf =
1737 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1738 } else {
1739 /* set to LNA2 */
1740 div_ant_conf.main_lna_conf =
1741 ATH_ANT_DIV_COMB_LNA1;
1742 div_ant_conf.alt_lna_conf =
1743 ATH_ANT_DIV_COMB_LNA2;
1744 }
1745 }
1746 break;
1747 default:
1748 break;
1749 }
1750 } else {
1751 if (!antcomb->alt_good) {
1752 antcomb->scan_not_start = false;
1753 /* Set alt to another LNA */
1754 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1755 div_ant_conf.main_lna_conf =
1756 ATH_ANT_DIV_COMB_LNA2;
1757 div_ant_conf.alt_lna_conf =
1758 ATH_ANT_DIV_COMB_LNA1;
1759 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1760 div_ant_conf.main_lna_conf =
1761 ATH_ANT_DIV_COMB_LNA1;
1762 div_ant_conf.alt_lna_conf =
1763 ATH_ANT_DIV_COMB_LNA2;
1764 }
1765 goto div_comb_done;
1766 }
1767 }
1768
1769 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1770 main_rssi_avg, alt_rssi_avg,
1771 alt_ratio);
1772
1773 antcomb->quick_scan_cnt++;
1774
1775div_comb_done:
1776 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
1777 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1778
1779 antcomb->scan_start_time = jiffies;
1780 antcomb->total_pkt_count = 0;
1781 antcomb->main_total_rssi = 0;
1782 antcomb->alt_total_rssi = 0;
1783 antcomb->main_recv_cnt = 0;
1784 antcomb->alt_recv_cnt = 0;
1785}
1786
1787int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1788{
1789 struct ath_buf *bf;
1790 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1791 struct ieee80211_rx_status *rxs;
1792 struct ath_hw *ah = sc->sc_ah;
1793 struct ath_common *common = ath9k_hw_common(ah);
1794 /*
1795 * The hw can technically differ from common->hw when using ath9k
1796 * virtual wiphy so to account for that we iterate over the active
1797 * wiphys and find the appropriate wiphy and therefore hw.
1798 */
1799 struct ieee80211_hw *hw = sc->hw;
1800 struct ieee80211_hdr *hdr;
1801 int retval;
1802 bool decrypt_error = false;
1803 struct ath_rx_status rs;
1804 enum ath9k_rx_qtype qtype;
1805 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1806 int dma_type;
1807 u8 rx_status_len = ah->caps.rx_status_len;
1808 u64 tsf = 0;
1809 u32 tsf_lower = 0;
1810 unsigned long flags;
1811
1812 if (edma)
1813 dma_type = DMA_BIDIRECTIONAL;
1814 else
1815 dma_type = DMA_FROM_DEVICE;
1816
1817 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1818 spin_lock_bh(&sc->rx.rxbuflock);
1819
1820 tsf = ath9k_hw_gettsf64(ah);
1821 tsf_lower = tsf & 0xffffffff;
1822
1823 do {
1824 /* If handling rx interrupt and flush is in progress => exit */
1825 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
1826 break;
1827
1828 memset(&rs, 0, sizeof(rs));
1829 if (edma)
1830 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1831 else
1832 bf = ath_get_next_rx_buf(sc, &rs);
1833
1834 if (!bf)
1835 break;
1836
1837 skb = bf->bf_mpdu;
1838 if (!skb)
1839 continue;
1840
1841 /*
1842 * Take frame header from the first fragment and RX status from
1843 * the last one.
1844 */
1845 if (sc->rx.frag)
1846 hdr_skb = sc->rx.frag;
1847 else
1848 hdr_skb = skb;
1849
1850 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1851 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1852
1853 ath_debug_stat_rx(sc, &rs);
1854
1855 /*
1856 * If we're asked to flush receive queue, directly
1857 * chain it back at the queue without processing it.
1858 */
1859 if (flush)
1860 goto requeue_drop_frag;
1861
1862 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1863 rxs, &decrypt_error);
1864 if (retval)
1865 goto requeue_drop_frag;
1866
1867 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1868 if (rs.rs_tstamp > tsf_lower &&
1869 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1870 rxs->mactime -= 0x100000000ULL;
1871
1872 if (rs.rs_tstamp < tsf_lower &&
1873 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1874 rxs->mactime += 0x100000000ULL;
1875
1876 /* Ensure we always have an skb to requeue once we are done
1877 * processing the current buffer's skb */
1878 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1879
1880 /* If there is no memory we ignore the current RX'd frame,
1881 * tell hardware it can give us a new frame using the old
1882 * skb and put it at the tail of the sc->rx.rxbuf list for
1883 * processing. */
1884 if (!requeue_skb)
1885 goto requeue_drop_frag;
1886
1887 /* Unmap the frame */
1888 dma_unmap_single(sc->dev, bf->bf_buf_addr,
1889 common->rx_bufsize,
1890 dma_type);
1891
1892 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1893 if (ah->caps.rx_status_len)
1894 skb_pull(skb, ah->caps.rx_status_len);
1895
1896 if (!rs.rs_more)
1897 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1898 rxs, decrypt_error);
1899
1900 /* We will now give hardware our shiny new allocated skb */
1901 bf->bf_mpdu = requeue_skb;
1902 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1903 common->rx_bufsize,
1904 dma_type);
1905 if (unlikely(dma_mapping_error(sc->dev,
1906 bf->bf_buf_addr))) {
1907 dev_kfree_skb_any(requeue_skb);
1908 bf->bf_mpdu = NULL;
1909 bf->bf_buf_addr = 0;
1910 ath_err(common, "dma_mapping_error() on RX\n");
1911 ieee80211_rx(hw, skb);
1912 break;
1913 }
1914
1915 if (rs.rs_more) {
1916 /*
1917 * rs_more indicates chained descriptors which can be
1918 * used to link buffers together for a sort of
1919 * scatter-gather operation.
1920 */
1921 if (sc->rx.frag) {
1922 /* too many fragments - cannot handle frame */
1923 dev_kfree_skb_any(sc->rx.frag);
1924 dev_kfree_skb_any(skb);
1925 skb = NULL;
1926 }
1927 sc->rx.frag = skb;
1928 goto requeue;
1929 }
1930
1931 if (sc->rx.frag) {
1932 int space = skb->len - skb_tailroom(hdr_skb);
1933
1934 sc->rx.frag = NULL;
1935
1936 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1937 dev_kfree_skb(skb);
1938 goto requeue_drop_frag;
1939 }
1940
1941 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1942 skb->len);
1943 dev_kfree_skb_any(skb);
1944 skb = hdr_skb;
1945 }
1946
1947 /*
1948 * change the default rx antenna if rx diversity chooses the
1949 * other antenna 3 times in a row.
1950 */
1951 if (sc->rx.defant != rs.rs_antenna) {
1952 if (++sc->rx.rxotherant >= 3)
1953 ath_setdefantenna(sc, rs.rs_antenna);
1954 } else {
1955 sc->rx.rxotherant = 0;
1956 }
1957
1958 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1959 skb_trim(skb, skb->len - 8);
1960
1961 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1962
1963 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1964 PS_WAIT_FOR_CAB |
1965 PS_WAIT_FOR_PSPOLL_DATA)) ||
1966 ath9k_check_auto_sleep(sc))
1967 ath_rx_ps(sc, skb);
1968 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1969
1970 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1971 ath_ant_comb_scan(sc, &rs);
1972
1973 ieee80211_rx(hw, skb);
1974
1975requeue_drop_frag:
1976 if (sc->rx.frag) {
1977 dev_kfree_skb_any(sc->rx.frag);
1978 sc->rx.frag = NULL;
1979 }
1980requeue:
1981 if (edma) {
1982 list_add_tail(&bf->list, &sc->rx.rxbuf);
1983 ath_rx_edma_buf_link(sc, qtype);
1984 } else {
1985 list_move_tail(&bf->list, &sc->rx.rxbuf);
1986 ath_rx_buf_link(sc, bf);
1987 ath9k_hw_rxena(ah);
1988 }
1989 } while (1);
1990
1991 spin_unlock_bh(&sc->rx.rxbuflock);
1992
1993 return 0;
1994}
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/dma-mapping.h>
18#include "ath9k.h"
19#include "ar9003_mac.h"
20
21#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22
23static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
24 int mindelta, int main_rssi_avg,
25 int alt_rssi_avg, int pkt_count)
26{
27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
28 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
30}
31
32static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
33 int curr_main_set, int curr_alt_set,
34 int alt_rssi_avg, int main_rssi_avg)
35{
36 bool result = false;
37 switch (div_group) {
38 case 0:
39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
40 result = true;
41 break;
42 case 1:
43 case 2:
44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
46 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
49 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
50 (alt_rssi_avg >= 4))
51 result = true;
52 else
53 result = false;
54 break;
55 }
56
57 return result;
58}
59
60static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
61{
62 return sc->ps_enabled &&
63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
64}
65
66/*
67 * Setup and link descriptors.
68 *
69 * 11N: we can no longer afford to self link the last descriptor.
70 * MAC acknowledges BA status as long as it copies frames to host
71 * buffer (or rx fifo). This can incorrectly acknowledge packets
72 * to a sender if last desc is self-linked.
73 */
74static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
75{
76 struct ath_hw *ah = sc->sc_ah;
77 struct ath_common *common = ath9k_hw_common(ah);
78 struct ath_desc *ds;
79 struct sk_buff *skb;
80
81 ATH_RXBUF_RESET(bf);
82
83 ds = bf->bf_desc;
84 ds->ds_link = 0; /* link to null */
85 ds->ds_data = bf->bf_buf_addr;
86
87 /* virtual addr of the beginning of the buffer. */
88 skb = bf->bf_mpdu;
89 BUG_ON(skb == NULL);
90 ds->ds_vdata = skb->data;
91
92 /*
93 * setup rx descriptors. The rx_bufsize here tells the hardware
94 * how much data it can DMA to us and that we are prepared
95 * to process
96 */
97 ath9k_hw_setuprxdesc(ah, ds,
98 common->rx_bufsize,
99 0);
100
101 if (sc->rx.rxlink == NULL)
102 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
103 else
104 *sc->rx.rxlink = bf->bf_daddr;
105
106 sc->rx.rxlink = &ds->ds_link;
107}
108
109static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
110{
111 /* XXX block beacon interrupts */
112 ath9k_hw_setantenna(sc->sc_ah, antenna);
113 sc->rx.defant = antenna;
114 sc->rx.rxotherant = 0;
115}
116
117static void ath_opmode_init(struct ath_softc *sc)
118{
119 struct ath_hw *ah = sc->sc_ah;
120 struct ath_common *common = ath9k_hw_common(ah);
121
122 u32 rfilt, mfilt[2];
123
124 /* configure rx filter */
125 rfilt = ath_calcrxfilter(sc);
126 ath9k_hw_setrxfilter(ah, rfilt);
127
128 /* configure bssid mask */
129 ath_hw_setbssidmask(common);
130
131 /* configure operational mode */
132 ath9k_hw_setopmode(ah);
133
134 /* calculate and install multicast filter */
135 mfilt[0] = mfilt[1] = ~0;
136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
137}
138
139static bool ath_rx_edma_buf_link(struct ath_softc *sc,
140 enum ath9k_rx_qtype qtype)
141{
142 struct ath_hw *ah = sc->sc_ah;
143 struct ath_rx_edma *rx_edma;
144 struct sk_buff *skb;
145 struct ath_buf *bf;
146
147 rx_edma = &sc->rx.rx_edma[qtype];
148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
149 return false;
150
151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
152 list_del_init(&bf->list);
153
154 skb = bf->bf_mpdu;
155
156 ATH_RXBUF_RESET(bf);
157 memset(skb->data, 0, ah->caps.rx_status_len);
158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
159 ah->caps.rx_status_len, DMA_TO_DEVICE);
160
161 SKB_CB_ATHBUF(skb) = bf;
162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
163 skb_queue_tail(&rx_edma->rx_fifo, skb);
164
165 return true;
166}
167
168static void ath_rx_addbuffer_edma(struct ath_softc *sc,
169 enum ath9k_rx_qtype qtype, int size)
170{
171 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
172 struct ath_buf *bf, *tbf;
173
174 if (list_empty(&sc->rx.rxbuf)) {
175 ath_dbg(common, QUEUE, "No free rx buf available\n");
176 return;
177 }
178
179 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
180 if (!ath_rx_edma_buf_link(sc, qtype))
181 break;
182
183}
184
185static void ath_rx_remove_buffer(struct ath_softc *sc,
186 enum ath9k_rx_qtype qtype)
187{
188 struct ath_buf *bf;
189 struct ath_rx_edma *rx_edma;
190 struct sk_buff *skb;
191
192 rx_edma = &sc->rx.rx_edma[qtype];
193
194 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
195 bf = SKB_CB_ATHBUF(skb);
196 BUG_ON(!bf);
197 list_add_tail(&bf->list, &sc->rx.rxbuf);
198 }
199}
200
201static void ath_rx_edma_cleanup(struct ath_softc *sc)
202{
203 struct ath_hw *ah = sc->sc_ah;
204 struct ath_common *common = ath9k_hw_common(ah);
205 struct ath_buf *bf;
206
207 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
208 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
209
210 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
211 if (bf->bf_mpdu) {
212 dma_unmap_single(sc->dev, bf->bf_buf_addr,
213 common->rx_bufsize,
214 DMA_BIDIRECTIONAL);
215 dev_kfree_skb_any(bf->bf_mpdu);
216 bf->bf_buf_addr = 0;
217 bf->bf_mpdu = NULL;
218 }
219 }
220
221 INIT_LIST_HEAD(&sc->rx.rxbuf);
222
223 kfree(sc->rx.rx_bufptr);
224 sc->rx.rx_bufptr = NULL;
225}
226
227static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
228{
229 skb_queue_head_init(&rx_edma->rx_fifo);
230 rx_edma->rx_fifo_hwsize = size;
231}
232
233static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
234{
235 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
236 struct ath_hw *ah = sc->sc_ah;
237 struct sk_buff *skb;
238 struct ath_buf *bf;
239 int error = 0, i;
240 u32 size;
241
242 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
243 ah->caps.rx_status_len);
244
245 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
246 ah->caps.rx_lp_qdepth);
247 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
248 ah->caps.rx_hp_qdepth);
249
250 size = sizeof(struct ath_buf) * nbufs;
251 bf = kzalloc(size, GFP_KERNEL);
252 if (!bf)
253 return -ENOMEM;
254
255 INIT_LIST_HEAD(&sc->rx.rxbuf);
256 sc->rx.rx_bufptr = bf;
257
258 for (i = 0; i < nbufs; i++, bf++) {
259 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
260 if (!skb) {
261 error = -ENOMEM;
262 goto rx_init_fail;
263 }
264
265 memset(skb->data, 0, common->rx_bufsize);
266 bf->bf_mpdu = skb;
267
268 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
269 common->rx_bufsize,
270 DMA_BIDIRECTIONAL);
271 if (unlikely(dma_mapping_error(sc->dev,
272 bf->bf_buf_addr))) {
273 dev_kfree_skb_any(skb);
274 bf->bf_mpdu = NULL;
275 bf->bf_buf_addr = 0;
276 ath_err(common,
277 "dma_mapping_error() on RX init\n");
278 error = -ENOMEM;
279 goto rx_init_fail;
280 }
281
282 list_add_tail(&bf->list, &sc->rx.rxbuf);
283 }
284
285 return 0;
286
287rx_init_fail:
288 ath_rx_edma_cleanup(sc);
289 return error;
290}
291
292static void ath_edma_start_recv(struct ath_softc *sc)
293{
294 spin_lock_bh(&sc->rx.rxbuflock);
295
296 ath9k_hw_rxena(sc->sc_ah);
297
298 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
299 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
300
301 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
302 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
303
304 ath_opmode_init(sc);
305
306 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
307
308 spin_unlock_bh(&sc->rx.rxbuflock);
309}
310
311static void ath_edma_stop_recv(struct ath_softc *sc)
312{
313 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
314 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
315}
316
317int ath_rx_init(struct ath_softc *sc, int nbufs)
318{
319 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
320 struct sk_buff *skb;
321 struct ath_buf *bf;
322 int error = 0;
323
324 spin_lock_init(&sc->sc_pcu_lock);
325 sc->sc_flags &= ~SC_OP_RXFLUSH;
326 spin_lock_init(&sc->rx.rxbuflock);
327
328 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
329 sc->sc_ah->caps.rx_status_len;
330
331 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
332 return ath_rx_edma_init(sc, nbufs);
333 } else {
334 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
335 common->cachelsz, common->rx_bufsize);
336
337 /* Initialize rx descriptors */
338
339 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
340 "rx", nbufs, 1, 0);
341 if (error != 0) {
342 ath_err(common,
343 "failed to allocate rx descriptors: %d\n",
344 error);
345 goto err;
346 }
347
348 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
349 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
350 GFP_KERNEL);
351 if (skb == NULL) {
352 error = -ENOMEM;
353 goto err;
354 }
355
356 bf->bf_mpdu = skb;
357 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
358 common->rx_bufsize,
359 DMA_FROM_DEVICE);
360 if (unlikely(dma_mapping_error(sc->dev,
361 bf->bf_buf_addr))) {
362 dev_kfree_skb_any(skb);
363 bf->bf_mpdu = NULL;
364 bf->bf_buf_addr = 0;
365 ath_err(common,
366 "dma_mapping_error() on RX init\n");
367 error = -ENOMEM;
368 goto err;
369 }
370 }
371 sc->rx.rxlink = NULL;
372 }
373
374err:
375 if (error)
376 ath_rx_cleanup(sc);
377
378 return error;
379}
380
381void ath_rx_cleanup(struct ath_softc *sc)
382{
383 struct ath_hw *ah = sc->sc_ah;
384 struct ath_common *common = ath9k_hw_common(ah);
385 struct sk_buff *skb;
386 struct ath_buf *bf;
387
388 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
389 ath_rx_edma_cleanup(sc);
390 return;
391 } else {
392 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
393 skb = bf->bf_mpdu;
394 if (skb) {
395 dma_unmap_single(sc->dev, bf->bf_buf_addr,
396 common->rx_bufsize,
397 DMA_FROM_DEVICE);
398 dev_kfree_skb(skb);
399 bf->bf_buf_addr = 0;
400 bf->bf_mpdu = NULL;
401 }
402 }
403
404 if (sc->rx.rxdma.dd_desc_len != 0)
405 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
406 }
407}
408
409/*
410 * Calculate the receive filter according to the
411 * operating mode and state:
412 *
413 * o always accept unicast, broadcast, and multicast traffic
414 * o maintain current state of phy error reception (the hal
415 * may enable phy error frames for noise immunity work)
416 * o probe request frames are accepted only when operating in
417 * hostap, adhoc, or monitor modes
418 * o enable promiscuous mode according to the interface state
419 * o accept beacons:
420 * - when operating in adhoc mode so the 802.11 layer creates
421 * node table entries for peers,
422 * - when operating in station mode for collecting rssi data when
423 * the station is otherwise quiet, or
424 * - when operating as a repeater so we see repeater-sta beacons
425 * - when scanning
426 */
427
428u32 ath_calcrxfilter(struct ath_softc *sc)
429{
430 u32 rfilt;
431
432 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
433 | ATH9K_RX_FILTER_MCAST;
434
435 if (sc->rx.rxfilter & FIF_PROBE_REQ)
436 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
437
438 /*
439 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
440 * mode interface or when in monitor mode. AP mode does not need this
441 * since it receives all in-BSS frames anyway.
442 */
443 if (sc->sc_ah->is_monitoring)
444 rfilt |= ATH9K_RX_FILTER_PROM;
445
446 if (sc->rx.rxfilter & FIF_CONTROL)
447 rfilt |= ATH9K_RX_FILTER_CONTROL;
448
449 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
450 (sc->nvifs <= 1) &&
451 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
452 rfilt |= ATH9K_RX_FILTER_MYBEACON;
453 else
454 rfilt |= ATH9K_RX_FILTER_BEACON;
455
456 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
457 (sc->rx.rxfilter & FIF_PSPOLL))
458 rfilt |= ATH9K_RX_FILTER_PSPOLL;
459
460 if (conf_is_ht(&sc->hw->conf))
461 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
462
463 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
464 /* The following may also be needed for other older chips */
465 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
466 rfilt |= ATH9K_RX_FILTER_PROM;
467 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
468 }
469
470 return rfilt;
471
472}
473
474int ath_startrecv(struct ath_softc *sc)
475{
476 struct ath_hw *ah = sc->sc_ah;
477 struct ath_buf *bf, *tbf;
478
479 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
480 ath_edma_start_recv(sc);
481 return 0;
482 }
483
484 spin_lock_bh(&sc->rx.rxbuflock);
485 if (list_empty(&sc->rx.rxbuf))
486 goto start_recv;
487
488 sc->rx.rxlink = NULL;
489 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
490 ath_rx_buf_link(sc, bf);
491 }
492
493 /* We could have deleted elements so the list may be empty now */
494 if (list_empty(&sc->rx.rxbuf))
495 goto start_recv;
496
497 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
498 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
499 ath9k_hw_rxena(ah);
500
501start_recv:
502 ath_opmode_init(sc);
503 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
504
505 spin_unlock_bh(&sc->rx.rxbuflock);
506
507 return 0;
508}
509
510bool ath_stoprecv(struct ath_softc *sc)
511{
512 struct ath_hw *ah = sc->sc_ah;
513 bool stopped, reset = false;
514
515 spin_lock_bh(&sc->rx.rxbuflock);
516 ath9k_hw_abortpcurecv(ah);
517 ath9k_hw_setrxfilter(ah, 0);
518 stopped = ath9k_hw_stopdmarecv(ah, &reset);
519
520 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
521 ath_edma_stop_recv(sc);
522 else
523 sc->rx.rxlink = NULL;
524 spin_unlock_bh(&sc->rx.rxbuflock);
525
526 if (!(ah->ah_flags & AH_UNPLUGGED) &&
527 unlikely(!stopped)) {
528 ath_err(ath9k_hw_common(sc->sc_ah),
529 "Could not stop RX, we could be "
530 "confusing the DMA engine when we start RX up\n");
531 ATH_DBG_WARN_ON_ONCE(!stopped);
532 }
533 return stopped && !reset;
534}
535
536void ath_flushrecv(struct ath_softc *sc)
537{
538 sc->sc_flags |= SC_OP_RXFLUSH;
539 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
540 ath_rx_tasklet(sc, 1, true);
541 ath_rx_tasklet(sc, 1, false);
542 sc->sc_flags &= ~SC_OP_RXFLUSH;
543}
544
545static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
546{
547 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
548 struct ieee80211_mgmt *mgmt;
549 u8 *pos, *end, id, elen;
550 struct ieee80211_tim_ie *tim;
551
552 mgmt = (struct ieee80211_mgmt *)skb->data;
553 pos = mgmt->u.beacon.variable;
554 end = skb->data + skb->len;
555
556 while (pos + 2 < end) {
557 id = *pos++;
558 elen = *pos++;
559 if (pos + elen > end)
560 break;
561
562 if (id == WLAN_EID_TIM) {
563 if (elen < sizeof(*tim))
564 break;
565 tim = (struct ieee80211_tim_ie *) pos;
566 if (tim->dtim_count != 0)
567 break;
568 return tim->bitmap_ctrl & 0x01;
569 }
570
571 pos += elen;
572 }
573
574 return false;
575}
576
577static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
578{
579 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
580
581 if (skb->len < 24 + 8 + 2 + 2)
582 return;
583
584 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
585
586 if (sc->ps_flags & PS_BEACON_SYNC) {
587 sc->ps_flags &= ~PS_BEACON_SYNC;
588 ath_dbg(common, PS,
589 "Reconfigure Beacon timers based on timestamp from the AP\n");
590 ath_set_beacon(sc);
591 }
592
593 if (ath_beacon_dtim_pending_cab(skb)) {
594 /*
595 * Remain awake waiting for buffered broadcast/multicast
596 * frames. If the last broadcast/multicast frame is not
597 * received properly, the next beacon frame will work as
598 * a backup trigger for returning into NETWORK SLEEP state,
599 * so we are waiting for it as well.
600 */
601 ath_dbg(common, PS,
602 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
603 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
604 return;
605 }
606
607 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
608 /*
609 * This can happen if a broadcast frame is dropped or the AP
610 * fails to send a frame indicating that all CAB frames have
611 * been delivered.
612 */
613 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
614 ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
615 }
616}
617
618static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
619{
620 struct ieee80211_hdr *hdr;
621 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
622
623 hdr = (struct ieee80211_hdr *)skb->data;
624
625 /* Process Beacon and CAB receive in PS state */
626 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
627 && mybeacon)
628 ath_rx_ps_beacon(sc, skb);
629 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
630 (ieee80211_is_data(hdr->frame_control) ||
631 ieee80211_is_action(hdr->frame_control)) &&
632 is_multicast_ether_addr(hdr->addr1) &&
633 !ieee80211_has_moredata(hdr->frame_control)) {
634 /*
635 * No more broadcast/multicast frames to be received at this
636 * point.
637 */
638 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
639 ath_dbg(common, PS,
640 "All PS CAB frames received, back to sleep\n");
641 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
642 !is_multicast_ether_addr(hdr->addr1) &&
643 !ieee80211_has_morefrags(hdr->frame_control)) {
644 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
645 ath_dbg(common, PS,
646 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
647 sc->ps_flags & (PS_WAIT_FOR_BEACON |
648 PS_WAIT_FOR_CAB |
649 PS_WAIT_FOR_PSPOLL_DATA |
650 PS_WAIT_FOR_TX_ACK));
651 }
652}
653
654static bool ath_edma_get_buffers(struct ath_softc *sc,
655 enum ath9k_rx_qtype qtype,
656 struct ath_rx_status *rs,
657 struct ath_buf **dest)
658{
659 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
660 struct ath_hw *ah = sc->sc_ah;
661 struct ath_common *common = ath9k_hw_common(ah);
662 struct sk_buff *skb;
663 struct ath_buf *bf;
664 int ret;
665
666 skb = skb_peek(&rx_edma->rx_fifo);
667 if (!skb)
668 return false;
669
670 bf = SKB_CB_ATHBUF(skb);
671 BUG_ON(!bf);
672
673 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
674 common->rx_bufsize, DMA_FROM_DEVICE);
675
676 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
677 if (ret == -EINPROGRESS) {
678 /*let device gain the buffer again*/
679 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
680 common->rx_bufsize, DMA_FROM_DEVICE);
681 return false;
682 }
683
684 __skb_unlink(skb, &rx_edma->rx_fifo);
685 if (ret == -EINVAL) {
686 /* corrupt descriptor, skip this one and the following one */
687 list_add_tail(&bf->list, &sc->rx.rxbuf);
688 ath_rx_edma_buf_link(sc, qtype);
689
690 skb = skb_peek(&rx_edma->rx_fifo);
691 if (skb) {
692 bf = SKB_CB_ATHBUF(skb);
693 BUG_ON(!bf);
694
695 __skb_unlink(skb, &rx_edma->rx_fifo);
696 list_add_tail(&bf->list, &sc->rx.rxbuf);
697 ath_rx_edma_buf_link(sc, qtype);
698 }
699
700 bf = NULL;
701 }
702
703 *dest = bf;
704 return true;
705}
706
707static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
708 struct ath_rx_status *rs,
709 enum ath9k_rx_qtype qtype)
710{
711 struct ath_buf *bf = NULL;
712
713 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
714 if (!bf)
715 continue;
716
717 return bf;
718 }
719 return NULL;
720}
721
722static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
723 struct ath_rx_status *rs)
724{
725 struct ath_hw *ah = sc->sc_ah;
726 struct ath_common *common = ath9k_hw_common(ah);
727 struct ath_desc *ds;
728 struct ath_buf *bf;
729 int ret;
730
731 if (list_empty(&sc->rx.rxbuf)) {
732 sc->rx.rxlink = NULL;
733 return NULL;
734 }
735
736 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
737 ds = bf->bf_desc;
738
739 /*
740 * Must provide the virtual address of the current
741 * descriptor, the physical address, and the virtual
742 * address of the next descriptor in the h/w chain.
743 * This allows the HAL to look ahead to see if the
744 * hardware is done with a descriptor by checking the
745 * done bit in the following descriptor and the address
746 * of the current descriptor the DMA engine is working
747 * on. All this is necessary because of our use of
748 * a self-linked list to avoid rx overruns.
749 */
750 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
751 if (ret == -EINPROGRESS) {
752 struct ath_rx_status trs;
753 struct ath_buf *tbf;
754 struct ath_desc *tds;
755
756 memset(&trs, 0, sizeof(trs));
757 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
758 sc->rx.rxlink = NULL;
759 return NULL;
760 }
761
762 tbf = list_entry(bf->list.next, struct ath_buf, list);
763
764 /*
765 * On some hardware the descriptor status words could
766 * get corrupted, including the done bit. Because of
767 * this, check if the next descriptor's done bit is
768 * set or not.
769 *
770 * If the next descriptor's done bit is set, the current
771 * descriptor has been corrupted. Force s/w to discard
772 * this descriptor and continue...
773 */
774
775 tds = tbf->bf_desc;
776 ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
777 if (ret == -EINPROGRESS)
778 return NULL;
779 }
780
781 if (!bf->bf_mpdu)
782 return bf;
783
784 /*
785 * Synchronize the DMA transfer with CPU before
786 * 1. accessing the frame
787 * 2. requeueing the same buffer to h/w
788 */
789 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
790 common->rx_bufsize,
791 DMA_FROM_DEVICE);
792
793 return bf;
794}
795
796/* Assumes you've already done the endian to CPU conversion */
797static bool ath9k_rx_accept(struct ath_common *common,
798 struct ieee80211_hdr *hdr,
799 struct ieee80211_rx_status *rxs,
800 struct ath_rx_status *rx_stats,
801 bool *decrypt_error)
802{
803 struct ath_softc *sc = (struct ath_softc *) common->priv;
804 bool is_mc, is_valid_tkip, strip_mic, mic_error;
805 struct ath_hw *ah = common->ah;
806 __le16 fc;
807 u8 rx_status_len = ah->caps.rx_status_len;
808
809 fc = hdr->frame_control;
810
811 is_mc = !!is_multicast_ether_addr(hdr->addr1);
812 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
813 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
814 strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
815 ieee80211_has_protected(fc) &&
816 !(rx_stats->rs_status &
817 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
818 ATH9K_RXERR_KEYMISS));
819
820 /*
821 * Key miss events are only relevant for pairwise keys where the
822 * descriptor does contain a valid key index. This has been observed
823 * mostly with CCMP encryption.
824 */
825 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
826 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
827 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
828
829 if (!rx_stats->rs_datalen) {
830 RX_STAT_INC(rx_len_err);
831 return false;
832 }
833
834 /*
835 * rs_status follows rs_datalen so if rs_datalen is too large
836 * we can take a hint that hardware corrupted it, so ignore
837 * those frames.
838 */
839 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
840 RX_STAT_INC(rx_len_err);
841 return false;
842 }
843
844 /* Only use error bits from the last fragment */
845 if (rx_stats->rs_more)
846 return true;
847
848 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
849 !ieee80211_has_morefrags(fc) &&
850 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
851 (rx_stats->rs_status & ATH9K_RXERR_MIC);
852
853 /*
854 * The rx_stats->rs_status will not be set until the end of the
855 * chained descriptors so it can be ignored if rs_more is set. The
856 * rs_more will be false at the last element of the chained
857 * descriptors.
858 */
859 if (rx_stats->rs_status != 0) {
860 u8 status_mask;
861
862 if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
863 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
864 mic_error = false;
865 }
866 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
867 return false;
868
869 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
870 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
871 *decrypt_error = true;
872 mic_error = false;
873 }
874
875 /*
876 * Reject error frames with the exception of
877 * decryption and MIC failures. For monitor mode,
878 * we also ignore the CRC error.
879 */
880 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
881 ATH9K_RXERR_KEYMISS;
882
883 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
884 status_mask |= ATH9K_RXERR_CRC;
885
886 if (rx_stats->rs_status & ~status_mask)
887 return false;
888 }
889
890 /*
891 * For unicast frames the MIC error bit can have false positives,
892 * so all MIC error reports need to be validated in software.
893 * False negatives are not common, so skip software verification
894 * if the hardware considers the MIC valid.
895 */
896 if (strip_mic)
897 rxs->flag |= RX_FLAG_MMIC_STRIPPED;
898 else if (is_mc && mic_error)
899 rxs->flag |= RX_FLAG_MMIC_ERROR;
900
901 return true;
902}
903
904static int ath9k_process_rate(struct ath_common *common,
905 struct ieee80211_hw *hw,
906 struct ath_rx_status *rx_stats,
907 struct ieee80211_rx_status *rxs)
908{
909 struct ieee80211_supported_band *sband;
910 enum ieee80211_band band;
911 unsigned int i = 0;
912 struct ath_softc __maybe_unused *sc = common->priv;
913
914 band = hw->conf.channel->band;
915 sband = hw->wiphy->bands[band];
916
917 if (rx_stats->rs_rate & 0x80) {
918 /* HT rate */
919 rxs->flag |= RX_FLAG_HT;
920 if (rx_stats->rs_flags & ATH9K_RX_2040)
921 rxs->flag |= RX_FLAG_40MHZ;
922 if (rx_stats->rs_flags & ATH9K_RX_GI)
923 rxs->flag |= RX_FLAG_SHORT_GI;
924 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
925 return 0;
926 }
927
928 for (i = 0; i < sband->n_bitrates; i++) {
929 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
930 rxs->rate_idx = i;
931 return 0;
932 }
933 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
934 rxs->flag |= RX_FLAG_SHORTPRE;
935 rxs->rate_idx = i;
936 return 0;
937 }
938 }
939
940 /*
941 * No valid hardware bitrate found -- we should not get here
942 * because hardware has already validated this frame as OK.
943 */
944 ath_dbg(common, ANY,
945 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
946 rx_stats->rs_rate);
947 RX_STAT_INC(rx_rate_err);
948 return -EINVAL;
949}
950
951static void ath9k_process_rssi(struct ath_common *common,
952 struct ieee80211_hw *hw,
953 struct ieee80211_hdr *hdr,
954 struct ath_rx_status *rx_stats)
955{
956 struct ath_softc *sc = hw->priv;
957 struct ath_hw *ah = common->ah;
958 int last_rssi;
959 int rssi = rx_stats->rs_rssi;
960
961 if (!rx_stats->is_mybeacon ||
962 ((ah->opmode != NL80211_IFTYPE_STATION) &&
963 (ah->opmode != NL80211_IFTYPE_ADHOC)))
964 return;
965
966 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
967 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
968
969 last_rssi = sc->last_rssi;
970 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
971 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
972 if (rssi < 0)
973 rssi = 0;
974
975 /* Update Beacon RSSI, this is used by ANI. */
976 ah->stats.avgbrssi = rssi;
977}
978
979/*
980 * For Decrypt or Demic errors, we only mark packet status here and always push
981 * up the frame up to let mac80211 handle the actual error case, be it no
982 * decryption key or real decryption error. This let us keep statistics there.
983 */
984static int ath9k_rx_skb_preprocess(struct ath_common *common,
985 struct ieee80211_hw *hw,
986 struct ieee80211_hdr *hdr,
987 struct ath_rx_status *rx_stats,
988 struct ieee80211_rx_status *rx_status,
989 bool *decrypt_error)
990{
991 struct ath_hw *ah = common->ah;
992
993 /*
994 * everything but the rate is checked here, the rate check is done
995 * separately to avoid doing two lookups for a rate for each frame.
996 */
997 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
998 return -EINVAL;
999
1000 /* Only use status info from the last fragment */
1001 if (rx_stats->rs_more)
1002 return 0;
1003
1004 ath9k_process_rssi(common, hw, hdr, rx_stats);
1005
1006 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
1007 return -EINVAL;
1008
1009 rx_status->band = hw->conf.channel->band;
1010 rx_status->freq = hw->conf.channel->center_freq;
1011 rx_status->signal = ah->noise + rx_stats->rs_rssi;
1012 rx_status->antenna = rx_stats->rs_antenna;
1013 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
1014 if (rx_stats->rs_moreaggr)
1015 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1016
1017 return 0;
1018}
1019
1020static void ath9k_rx_skb_postprocess(struct ath_common *common,
1021 struct sk_buff *skb,
1022 struct ath_rx_status *rx_stats,
1023 struct ieee80211_rx_status *rxs,
1024 bool decrypt_error)
1025{
1026 struct ath_hw *ah = common->ah;
1027 struct ieee80211_hdr *hdr;
1028 int hdrlen, padpos, padsize;
1029 u8 keyix;
1030 __le16 fc;
1031
1032 /* see if any padding is done by the hw and remove it */
1033 hdr = (struct ieee80211_hdr *) skb->data;
1034 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1035 fc = hdr->frame_control;
1036 padpos = ath9k_cmn_padpos(hdr->frame_control);
1037
1038 /* The MAC header is padded to have 32-bit boundary if the
1039 * packet payload is non-zero. The general calculation for
1040 * padsize would take into account odd header lengths:
1041 * padsize = (4 - padpos % 4) % 4; However, since only
1042 * even-length headers are used, padding can only be 0 or 2
1043 * bytes and we can optimize this a bit. In addition, we must
1044 * not try to remove padding from short control frames that do
1045 * not have payload. */
1046 padsize = padpos & 3;
1047 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1048 memmove(skb->data + padsize, skb->data, padpos);
1049 skb_pull(skb, padsize);
1050 }
1051
1052 keyix = rx_stats->rs_keyix;
1053
1054 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1055 ieee80211_has_protected(fc)) {
1056 rxs->flag |= RX_FLAG_DECRYPTED;
1057 } else if (ieee80211_has_protected(fc)
1058 && !decrypt_error && skb->len >= hdrlen + 4) {
1059 keyix = skb->data[hdrlen + 3] >> 6;
1060
1061 if (test_bit(keyix, common->keymap))
1062 rxs->flag |= RX_FLAG_DECRYPTED;
1063 }
1064 if (ah->sw_mgmt_crypto &&
1065 (rxs->flag & RX_FLAG_DECRYPTED) &&
1066 ieee80211_is_mgmt(fc))
1067 /* Use software decrypt for management frames. */
1068 rxs->flag &= ~RX_FLAG_DECRYPTED;
1069}
1070
1071static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1072 struct ath_hw_antcomb_conf ant_conf,
1073 int main_rssi_avg)
1074{
1075 antcomb->quick_scan_cnt = 0;
1076
1077 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1078 antcomb->rssi_lna2 = main_rssi_avg;
1079 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1080 antcomb->rssi_lna1 = main_rssi_avg;
1081
1082 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1083 case 0x10: /* LNA2 A-B */
1084 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1085 antcomb->first_quick_scan_conf =
1086 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1087 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1088 break;
1089 case 0x20: /* LNA1 A-B */
1090 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1091 antcomb->first_quick_scan_conf =
1092 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1093 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1094 break;
1095 case 0x21: /* LNA1 LNA2 */
1096 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1097 antcomb->first_quick_scan_conf =
1098 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1099 antcomb->second_quick_scan_conf =
1100 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1101 break;
1102 case 0x12: /* LNA2 LNA1 */
1103 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1104 antcomb->first_quick_scan_conf =
1105 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1106 antcomb->second_quick_scan_conf =
1107 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1108 break;
1109 case 0x13: /* LNA2 A+B */
1110 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1111 antcomb->first_quick_scan_conf =
1112 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1113 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1114 break;
1115 case 0x23: /* LNA1 A+B */
1116 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1117 antcomb->first_quick_scan_conf =
1118 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1119 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1120 break;
1121 default:
1122 break;
1123 }
1124}
1125
1126static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1127 struct ath_hw_antcomb_conf *div_ant_conf,
1128 int main_rssi_avg, int alt_rssi_avg,
1129 int alt_ratio)
1130{
1131 /* alt_good */
1132 switch (antcomb->quick_scan_cnt) {
1133 case 0:
1134 /* set alt to main, and alt to first conf */
1135 div_ant_conf->main_lna_conf = antcomb->main_conf;
1136 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1137 break;
1138 case 1:
1139 /* set alt to main, and alt to first conf */
1140 div_ant_conf->main_lna_conf = antcomb->main_conf;
1141 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1142 antcomb->rssi_first = main_rssi_avg;
1143 antcomb->rssi_second = alt_rssi_avg;
1144
1145 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1146 /* main is LNA1 */
1147 if (ath_is_alt_ant_ratio_better(alt_ratio,
1148 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1149 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1150 main_rssi_avg, alt_rssi_avg,
1151 antcomb->total_pkt_count))
1152 antcomb->first_ratio = true;
1153 else
1154 antcomb->first_ratio = false;
1155 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1156 if (ath_is_alt_ant_ratio_better(alt_ratio,
1157 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1158 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1159 main_rssi_avg, alt_rssi_avg,
1160 antcomb->total_pkt_count))
1161 antcomb->first_ratio = true;
1162 else
1163 antcomb->first_ratio = false;
1164 } else {
1165 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1166 (alt_rssi_avg > main_rssi_avg +
1167 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1168 (alt_rssi_avg > main_rssi_avg)) &&
1169 (antcomb->total_pkt_count > 50))
1170 antcomb->first_ratio = true;
1171 else
1172 antcomb->first_ratio = false;
1173 }
1174 break;
1175 case 2:
1176 antcomb->alt_good = false;
1177 antcomb->scan_not_start = false;
1178 antcomb->scan = false;
1179 antcomb->rssi_first = main_rssi_avg;
1180 antcomb->rssi_third = alt_rssi_avg;
1181
1182 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1183 antcomb->rssi_lna1 = alt_rssi_avg;
1184 else if (antcomb->second_quick_scan_conf ==
1185 ATH_ANT_DIV_COMB_LNA2)
1186 antcomb->rssi_lna2 = alt_rssi_avg;
1187 else if (antcomb->second_quick_scan_conf ==
1188 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1189 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1190 antcomb->rssi_lna2 = main_rssi_avg;
1191 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1192 antcomb->rssi_lna1 = main_rssi_avg;
1193 }
1194
1195 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1196 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1197 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1198 else
1199 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1200
1201 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1202 if (ath_is_alt_ant_ratio_better(alt_ratio,
1203 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1204 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1205 main_rssi_avg, alt_rssi_avg,
1206 antcomb->total_pkt_count))
1207 antcomb->second_ratio = true;
1208 else
1209 antcomb->second_ratio = false;
1210 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1211 if (ath_is_alt_ant_ratio_better(alt_ratio,
1212 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1213 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1214 main_rssi_avg, alt_rssi_avg,
1215 antcomb->total_pkt_count))
1216 antcomb->second_ratio = true;
1217 else
1218 antcomb->second_ratio = false;
1219 } else {
1220 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1221 (alt_rssi_avg > main_rssi_avg +
1222 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1223 (alt_rssi_avg > main_rssi_avg)) &&
1224 (antcomb->total_pkt_count > 50))
1225 antcomb->second_ratio = true;
1226 else
1227 antcomb->second_ratio = false;
1228 }
1229
1230 /* set alt to the conf with maximun ratio */
1231 if (antcomb->first_ratio && antcomb->second_ratio) {
1232 if (antcomb->rssi_second > antcomb->rssi_third) {
1233 /* first alt*/
1234 if ((antcomb->first_quick_scan_conf ==
1235 ATH_ANT_DIV_COMB_LNA1) ||
1236 (antcomb->first_quick_scan_conf ==
1237 ATH_ANT_DIV_COMB_LNA2))
1238 /* Set alt LNA1 or LNA2*/
1239 if (div_ant_conf->main_lna_conf ==
1240 ATH_ANT_DIV_COMB_LNA2)
1241 div_ant_conf->alt_lna_conf =
1242 ATH_ANT_DIV_COMB_LNA1;
1243 else
1244 div_ant_conf->alt_lna_conf =
1245 ATH_ANT_DIV_COMB_LNA2;
1246 else
1247 /* Set alt to A+B or A-B */
1248 div_ant_conf->alt_lna_conf =
1249 antcomb->first_quick_scan_conf;
1250 } else if ((antcomb->second_quick_scan_conf ==
1251 ATH_ANT_DIV_COMB_LNA1) ||
1252 (antcomb->second_quick_scan_conf ==
1253 ATH_ANT_DIV_COMB_LNA2)) {
1254 /* Set alt LNA1 or LNA2 */
1255 if (div_ant_conf->main_lna_conf ==
1256 ATH_ANT_DIV_COMB_LNA2)
1257 div_ant_conf->alt_lna_conf =
1258 ATH_ANT_DIV_COMB_LNA1;
1259 else
1260 div_ant_conf->alt_lna_conf =
1261 ATH_ANT_DIV_COMB_LNA2;
1262 } else {
1263 /* Set alt to A+B or A-B */
1264 div_ant_conf->alt_lna_conf =
1265 antcomb->second_quick_scan_conf;
1266 }
1267 } else if (antcomb->first_ratio) {
1268 /* first alt */
1269 if ((antcomb->first_quick_scan_conf ==
1270 ATH_ANT_DIV_COMB_LNA1) ||
1271 (antcomb->first_quick_scan_conf ==
1272 ATH_ANT_DIV_COMB_LNA2))
1273 /* Set alt LNA1 or LNA2 */
1274 if (div_ant_conf->main_lna_conf ==
1275 ATH_ANT_DIV_COMB_LNA2)
1276 div_ant_conf->alt_lna_conf =
1277 ATH_ANT_DIV_COMB_LNA1;
1278 else
1279 div_ant_conf->alt_lna_conf =
1280 ATH_ANT_DIV_COMB_LNA2;
1281 else
1282 /* Set alt to A+B or A-B */
1283 div_ant_conf->alt_lna_conf =
1284 antcomb->first_quick_scan_conf;
1285 } else if (antcomb->second_ratio) {
1286 /* second alt */
1287 if ((antcomb->second_quick_scan_conf ==
1288 ATH_ANT_DIV_COMB_LNA1) ||
1289 (antcomb->second_quick_scan_conf ==
1290 ATH_ANT_DIV_COMB_LNA2))
1291 /* Set alt LNA1 or LNA2 */
1292 if (div_ant_conf->main_lna_conf ==
1293 ATH_ANT_DIV_COMB_LNA2)
1294 div_ant_conf->alt_lna_conf =
1295 ATH_ANT_DIV_COMB_LNA1;
1296 else
1297 div_ant_conf->alt_lna_conf =
1298 ATH_ANT_DIV_COMB_LNA2;
1299 else
1300 /* Set alt to A+B or A-B */
1301 div_ant_conf->alt_lna_conf =
1302 antcomb->second_quick_scan_conf;
1303 } else {
1304 /* main is largest */
1305 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1306 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1307 /* Set alt LNA1 or LNA2 */
1308 if (div_ant_conf->main_lna_conf ==
1309 ATH_ANT_DIV_COMB_LNA2)
1310 div_ant_conf->alt_lna_conf =
1311 ATH_ANT_DIV_COMB_LNA1;
1312 else
1313 div_ant_conf->alt_lna_conf =
1314 ATH_ANT_DIV_COMB_LNA2;
1315 else
1316 /* Set alt to A+B or A-B */
1317 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1318 }
1319 break;
1320 default:
1321 break;
1322 }
1323}
1324
1325static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1326 struct ath_ant_comb *antcomb, int alt_ratio)
1327{
1328 if (ant_conf->div_group == 0) {
1329 /* Adjust the fast_div_bias based on main and alt lna conf */
1330 switch ((ant_conf->main_lna_conf << 4) |
1331 ant_conf->alt_lna_conf) {
1332 case 0x01: /* A-B LNA2 */
1333 ant_conf->fast_div_bias = 0x3b;
1334 break;
1335 case 0x02: /* A-B LNA1 */
1336 ant_conf->fast_div_bias = 0x3d;
1337 break;
1338 case 0x03: /* A-B A+B */
1339 ant_conf->fast_div_bias = 0x1;
1340 break;
1341 case 0x10: /* LNA2 A-B */
1342 ant_conf->fast_div_bias = 0x7;
1343 break;
1344 case 0x12: /* LNA2 LNA1 */
1345 ant_conf->fast_div_bias = 0x2;
1346 break;
1347 case 0x13: /* LNA2 A+B */
1348 ant_conf->fast_div_bias = 0x7;
1349 break;
1350 case 0x20: /* LNA1 A-B */
1351 ant_conf->fast_div_bias = 0x6;
1352 break;
1353 case 0x21: /* LNA1 LNA2 */
1354 ant_conf->fast_div_bias = 0x0;
1355 break;
1356 case 0x23: /* LNA1 A+B */
1357 ant_conf->fast_div_bias = 0x6;
1358 break;
1359 case 0x30: /* A+B A-B */
1360 ant_conf->fast_div_bias = 0x1;
1361 break;
1362 case 0x31: /* A+B LNA2 */
1363 ant_conf->fast_div_bias = 0x3b;
1364 break;
1365 case 0x32: /* A+B LNA1 */
1366 ant_conf->fast_div_bias = 0x3d;
1367 break;
1368 default:
1369 break;
1370 }
1371 } else if (ant_conf->div_group == 1) {
1372 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1373 switch ((ant_conf->main_lna_conf << 4) |
1374 ant_conf->alt_lna_conf) {
1375 case 0x01: /* A-B LNA2 */
1376 ant_conf->fast_div_bias = 0x1;
1377 ant_conf->main_gaintb = 0;
1378 ant_conf->alt_gaintb = 0;
1379 break;
1380 case 0x02: /* A-B LNA1 */
1381 ant_conf->fast_div_bias = 0x1;
1382 ant_conf->main_gaintb = 0;
1383 ant_conf->alt_gaintb = 0;
1384 break;
1385 case 0x03: /* A-B A+B */
1386 ant_conf->fast_div_bias = 0x1;
1387 ant_conf->main_gaintb = 0;
1388 ant_conf->alt_gaintb = 0;
1389 break;
1390 case 0x10: /* LNA2 A-B */
1391 if (!(antcomb->scan) &&
1392 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1393 ant_conf->fast_div_bias = 0x3f;
1394 else
1395 ant_conf->fast_div_bias = 0x1;
1396 ant_conf->main_gaintb = 0;
1397 ant_conf->alt_gaintb = 0;
1398 break;
1399 case 0x12: /* LNA2 LNA1 */
1400 ant_conf->fast_div_bias = 0x1;
1401 ant_conf->main_gaintb = 0;
1402 ant_conf->alt_gaintb = 0;
1403 break;
1404 case 0x13: /* LNA2 A+B */
1405 if (!(antcomb->scan) &&
1406 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1407 ant_conf->fast_div_bias = 0x3f;
1408 else
1409 ant_conf->fast_div_bias = 0x1;
1410 ant_conf->main_gaintb = 0;
1411 ant_conf->alt_gaintb = 0;
1412 break;
1413 case 0x20: /* LNA1 A-B */
1414 if (!(antcomb->scan) &&
1415 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1416 ant_conf->fast_div_bias = 0x3f;
1417 else
1418 ant_conf->fast_div_bias = 0x1;
1419 ant_conf->main_gaintb = 0;
1420 ant_conf->alt_gaintb = 0;
1421 break;
1422 case 0x21: /* LNA1 LNA2 */
1423 ant_conf->fast_div_bias = 0x1;
1424 ant_conf->main_gaintb = 0;
1425 ant_conf->alt_gaintb = 0;
1426 break;
1427 case 0x23: /* LNA1 A+B */
1428 if (!(antcomb->scan) &&
1429 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1430 ant_conf->fast_div_bias = 0x3f;
1431 else
1432 ant_conf->fast_div_bias = 0x1;
1433 ant_conf->main_gaintb = 0;
1434 ant_conf->alt_gaintb = 0;
1435 break;
1436 case 0x30: /* A+B A-B */
1437 ant_conf->fast_div_bias = 0x1;
1438 ant_conf->main_gaintb = 0;
1439 ant_conf->alt_gaintb = 0;
1440 break;
1441 case 0x31: /* A+B LNA2 */
1442 ant_conf->fast_div_bias = 0x1;
1443 ant_conf->main_gaintb = 0;
1444 ant_conf->alt_gaintb = 0;
1445 break;
1446 case 0x32: /* A+B LNA1 */
1447 ant_conf->fast_div_bias = 0x1;
1448 ant_conf->main_gaintb = 0;
1449 ant_conf->alt_gaintb = 0;
1450 break;
1451 default:
1452 break;
1453 }
1454 } else if (ant_conf->div_group == 2) {
1455 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1456 switch ((ant_conf->main_lna_conf << 4) |
1457 ant_conf->alt_lna_conf) {
1458 case 0x01: /* A-B LNA2 */
1459 ant_conf->fast_div_bias = 0x1;
1460 ant_conf->main_gaintb = 0;
1461 ant_conf->alt_gaintb = 0;
1462 break;
1463 case 0x02: /* A-B LNA1 */
1464 ant_conf->fast_div_bias = 0x1;
1465 ant_conf->main_gaintb = 0;
1466 ant_conf->alt_gaintb = 0;
1467 break;
1468 case 0x03: /* A-B A+B */
1469 ant_conf->fast_div_bias = 0x1;
1470 ant_conf->main_gaintb = 0;
1471 ant_conf->alt_gaintb = 0;
1472 break;
1473 case 0x10: /* LNA2 A-B */
1474 if (!(antcomb->scan) &&
1475 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1476 ant_conf->fast_div_bias = 0x1;
1477 else
1478 ant_conf->fast_div_bias = 0x2;
1479 ant_conf->main_gaintb = 0;
1480 ant_conf->alt_gaintb = 0;
1481 break;
1482 case 0x12: /* LNA2 LNA1 */
1483 ant_conf->fast_div_bias = 0x1;
1484 ant_conf->main_gaintb = 0;
1485 ant_conf->alt_gaintb = 0;
1486 break;
1487 case 0x13: /* LNA2 A+B */
1488 if (!(antcomb->scan) &&
1489 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1490 ant_conf->fast_div_bias = 0x1;
1491 else
1492 ant_conf->fast_div_bias = 0x2;
1493 ant_conf->main_gaintb = 0;
1494 ant_conf->alt_gaintb = 0;
1495 break;
1496 case 0x20: /* LNA1 A-B */
1497 if (!(antcomb->scan) &&
1498 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1499 ant_conf->fast_div_bias = 0x1;
1500 else
1501 ant_conf->fast_div_bias = 0x2;
1502 ant_conf->main_gaintb = 0;
1503 ant_conf->alt_gaintb = 0;
1504 break;
1505 case 0x21: /* LNA1 LNA2 */
1506 ant_conf->fast_div_bias = 0x1;
1507 ant_conf->main_gaintb = 0;
1508 ant_conf->alt_gaintb = 0;
1509 break;
1510 case 0x23: /* LNA1 A+B */
1511 if (!(antcomb->scan) &&
1512 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1513 ant_conf->fast_div_bias = 0x1;
1514 else
1515 ant_conf->fast_div_bias = 0x2;
1516 ant_conf->main_gaintb = 0;
1517 ant_conf->alt_gaintb = 0;
1518 break;
1519 case 0x30: /* A+B A-B */
1520 ant_conf->fast_div_bias = 0x1;
1521 ant_conf->main_gaintb = 0;
1522 ant_conf->alt_gaintb = 0;
1523 break;
1524 case 0x31: /* A+B LNA2 */
1525 ant_conf->fast_div_bias = 0x1;
1526 ant_conf->main_gaintb = 0;
1527 ant_conf->alt_gaintb = 0;
1528 break;
1529 case 0x32: /* A+B LNA1 */
1530 ant_conf->fast_div_bias = 0x1;
1531 ant_conf->main_gaintb = 0;
1532 ant_conf->alt_gaintb = 0;
1533 break;
1534 default:
1535 break;
1536 }
1537 }
1538}
1539
1540/* Antenna diversity and combining */
1541static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1542{
1543 struct ath_hw_antcomb_conf div_ant_conf;
1544 struct ath_ant_comb *antcomb = &sc->ant_comb;
1545 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1546 int curr_main_set;
1547 int main_rssi = rs->rs_rssi_ctl0;
1548 int alt_rssi = rs->rs_rssi_ctl1;
1549 int rx_ant_conf, main_ant_conf;
1550 bool short_scan = false;
1551
1552 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1553 ATH_ANT_RX_MASK;
1554 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1555 ATH_ANT_RX_MASK;
1556
1557 /* Record packet only when both main_rssi and alt_rssi is positive */
1558 if (main_rssi > 0 && alt_rssi > 0) {
1559 antcomb->total_pkt_count++;
1560 antcomb->main_total_rssi += main_rssi;
1561 antcomb->alt_total_rssi += alt_rssi;
1562 if (main_ant_conf == rx_ant_conf)
1563 antcomb->main_recv_cnt++;
1564 else
1565 antcomb->alt_recv_cnt++;
1566 }
1567
1568 /* Short scan check */
1569 if (antcomb->scan && antcomb->alt_good) {
1570 if (time_after(jiffies, antcomb->scan_start_time +
1571 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1572 short_scan = true;
1573 else
1574 if (antcomb->total_pkt_count ==
1575 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1576 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1577 antcomb->total_pkt_count);
1578 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1579 short_scan = true;
1580 }
1581 }
1582
1583 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1584 rs->rs_moreaggr) && !short_scan)
1585 return;
1586
1587 if (antcomb->total_pkt_count) {
1588 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1589 antcomb->total_pkt_count);
1590 main_rssi_avg = (antcomb->main_total_rssi /
1591 antcomb->total_pkt_count);
1592 alt_rssi_avg = (antcomb->alt_total_rssi /
1593 antcomb->total_pkt_count);
1594 }
1595
1596
1597 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1598 curr_alt_set = div_ant_conf.alt_lna_conf;
1599 curr_main_set = div_ant_conf.main_lna_conf;
1600
1601 antcomb->count++;
1602
1603 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1604 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1605 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1606 main_rssi_avg);
1607 antcomb->alt_good = true;
1608 } else {
1609 antcomb->alt_good = false;
1610 }
1611
1612 antcomb->count = 0;
1613 antcomb->scan = true;
1614 antcomb->scan_not_start = true;
1615 }
1616
1617 if (!antcomb->scan) {
1618 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1619 alt_ratio, curr_main_set, curr_alt_set,
1620 alt_rssi_avg, main_rssi_avg)) {
1621 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1622 /* Switch main and alt LNA */
1623 div_ant_conf.main_lna_conf =
1624 ATH_ANT_DIV_COMB_LNA2;
1625 div_ant_conf.alt_lna_conf =
1626 ATH_ANT_DIV_COMB_LNA1;
1627 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1628 div_ant_conf.main_lna_conf =
1629 ATH_ANT_DIV_COMB_LNA1;
1630 div_ant_conf.alt_lna_conf =
1631 ATH_ANT_DIV_COMB_LNA2;
1632 }
1633
1634 goto div_comb_done;
1635 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1636 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1637 /* Set alt to another LNA */
1638 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1639 div_ant_conf.alt_lna_conf =
1640 ATH_ANT_DIV_COMB_LNA1;
1641 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1642 div_ant_conf.alt_lna_conf =
1643 ATH_ANT_DIV_COMB_LNA2;
1644
1645 goto div_comb_done;
1646 }
1647
1648 if ((alt_rssi_avg < (main_rssi_avg +
1649 div_ant_conf.lna1_lna2_delta)))
1650 goto div_comb_done;
1651 }
1652
1653 if (!antcomb->scan_not_start) {
1654 switch (curr_alt_set) {
1655 case ATH_ANT_DIV_COMB_LNA2:
1656 antcomb->rssi_lna2 = alt_rssi_avg;
1657 antcomb->rssi_lna1 = main_rssi_avg;
1658 antcomb->scan = true;
1659 /* set to A+B */
1660 div_ant_conf.main_lna_conf =
1661 ATH_ANT_DIV_COMB_LNA1;
1662 div_ant_conf.alt_lna_conf =
1663 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1664 break;
1665 case ATH_ANT_DIV_COMB_LNA1:
1666 antcomb->rssi_lna1 = alt_rssi_avg;
1667 antcomb->rssi_lna2 = main_rssi_avg;
1668 antcomb->scan = true;
1669 /* set to A+B */
1670 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1671 div_ant_conf.alt_lna_conf =
1672 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1673 break;
1674 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1675 antcomb->rssi_add = alt_rssi_avg;
1676 antcomb->scan = true;
1677 /* set to A-B */
1678 div_ant_conf.alt_lna_conf =
1679 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1680 break;
1681 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1682 antcomb->rssi_sub = alt_rssi_avg;
1683 antcomb->scan = false;
1684 if (antcomb->rssi_lna2 >
1685 (antcomb->rssi_lna1 +
1686 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1687 /* use LNA2 as main LNA */
1688 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1689 (antcomb->rssi_add > antcomb->rssi_sub)) {
1690 /* set to A+B */
1691 div_ant_conf.main_lna_conf =
1692 ATH_ANT_DIV_COMB_LNA2;
1693 div_ant_conf.alt_lna_conf =
1694 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1695 } else if (antcomb->rssi_sub >
1696 antcomb->rssi_lna1) {
1697 /* set to A-B */
1698 div_ant_conf.main_lna_conf =
1699 ATH_ANT_DIV_COMB_LNA2;
1700 div_ant_conf.alt_lna_conf =
1701 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1702 } else {
1703 /* set to LNA1 */
1704 div_ant_conf.main_lna_conf =
1705 ATH_ANT_DIV_COMB_LNA2;
1706 div_ant_conf.alt_lna_conf =
1707 ATH_ANT_DIV_COMB_LNA1;
1708 }
1709 } else {
1710 /* use LNA1 as main LNA */
1711 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1712 (antcomb->rssi_add > antcomb->rssi_sub)) {
1713 /* set to A+B */
1714 div_ant_conf.main_lna_conf =
1715 ATH_ANT_DIV_COMB_LNA1;
1716 div_ant_conf.alt_lna_conf =
1717 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1718 } else if (antcomb->rssi_sub >
1719 antcomb->rssi_lna1) {
1720 /* set to A-B */
1721 div_ant_conf.main_lna_conf =
1722 ATH_ANT_DIV_COMB_LNA1;
1723 div_ant_conf.alt_lna_conf =
1724 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1725 } else {
1726 /* set to LNA2 */
1727 div_ant_conf.main_lna_conf =
1728 ATH_ANT_DIV_COMB_LNA1;
1729 div_ant_conf.alt_lna_conf =
1730 ATH_ANT_DIV_COMB_LNA2;
1731 }
1732 }
1733 break;
1734 default:
1735 break;
1736 }
1737 } else {
1738 if (!antcomb->alt_good) {
1739 antcomb->scan_not_start = false;
1740 /* Set alt to another LNA */
1741 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1742 div_ant_conf.main_lna_conf =
1743 ATH_ANT_DIV_COMB_LNA2;
1744 div_ant_conf.alt_lna_conf =
1745 ATH_ANT_DIV_COMB_LNA1;
1746 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1747 div_ant_conf.main_lna_conf =
1748 ATH_ANT_DIV_COMB_LNA1;
1749 div_ant_conf.alt_lna_conf =
1750 ATH_ANT_DIV_COMB_LNA2;
1751 }
1752 goto div_comb_done;
1753 }
1754 }
1755
1756 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1757 main_rssi_avg, alt_rssi_avg,
1758 alt_ratio);
1759
1760 antcomb->quick_scan_cnt++;
1761
1762div_comb_done:
1763 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
1764 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1765
1766 antcomb->scan_start_time = jiffies;
1767 antcomb->total_pkt_count = 0;
1768 antcomb->main_total_rssi = 0;
1769 antcomb->alt_total_rssi = 0;
1770 antcomb->main_recv_cnt = 0;
1771 antcomb->alt_recv_cnt = 0;
1772}
1773
1774int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1775{
1776 struct ath_buf *bf;
1777 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1778 struct ieee80211_rx_status *rxs;
1779 struct ath_hw *ah = sc->sc_ah;
1780 struct ath_common *common = ath9k_hw_common(ah);
1781 struct ieee80211_hw *hw = sc->hw;
1782 struct ieee80211_hdr *hdr;
1783 int retval;
1784 struct ath_rx_status rs;
1785 enum ath9k_rx_qtype qtype;
1786 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1787 int dma_type;
1788 u8 rx_status_len = ah->caps.rx_status_len;
1789 u64 tsf = 0;
1790 u32 tsf_lower = 0;
1791 unsigned long flags;
1792
1793 if (edma)
1794 dma_type = DMA_BIDIRECTIONAL;
1795 else
1796 dma_type = DMA_FROM_DEVICE;
1797
1798 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1799 spin_lock_bh(&sc->rx.rxbuflock);
1800
1801 tsf = ath9k_hw_gettsf64(ah);
1802 tsf_lower = tsf & 0xffffffff;
1803
1804 do {
1805 bool decrypt_error = false;
1806 /* If handling rx interrupt and flush is in progress => exit */
1807 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
1808 break;
1809
1810 memset(&rs, 0, sizeof(rs));
1811 if (edma)
1812 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1813 else
1814 bf = ath_get_next_rx_buf(sc, &rs);
1815
1816 if (!bf)
1817 break;
1818
1819 skb = bf->bf_mpdu;
1820 if (!skb)
1821 continue;
1822
1823 /*
1824 * Take frame header from the first fragment and RX status from
1825 * the last one.
1826 */
1827 if (sc->rx.frag)
1828 hdr_skb = sc->rx.frag;
1829 else
1830 hdr_skb = skb;
1831
1832 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1833 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1834 if (ieee80211_is_beacon(hdr->frame_control)) {
1835 RX_STAT_INC(rx_beacons);
1836 if (!is_zero_ether_addr(common->curbssid) &&
1837 ether_addr_equal(hdr->addr3, common->curbssid))
1838 rs.is_mybeacon = true;
1839 else
1840 rs.is_mybeacon = false;
1841 }
1842 else
1843 rs.is_mybeacon = false;
1844
1845 ath_debug_stat_rx(sc, &rs);
1846
1847 /*
1848 * If we're asked to flush receive queue, directly
1849 * chain it back at the queue without processing it.
1850 */
1851 if (sc->sc_flags & SC_OP_RXFLUSH) {
1852 RX_STAT_INC(rx_drop_rxflush);
1853 goto requeue_drop_frag;
1854 }
1855
1856 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1857
1858 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1859 if (rs.rs_tstamp > tsf_lower &&
1860 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1861 rxs->mactime -= 0x100000000ULL;
1862
1863 if (rs.rs_tstamp < tsf_lower &&
1864 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1865 rxs->mactime += 0x100000000ULL;
1866
1867 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1868 rxs, &decrypt_error);
1869 if (retval)
1870 goto requeue_drop_frag;
1871
1872 if (rs.is_mybeacon) {
1873 sc->hw_busy_count = 0;
1874 ath_start_rx_poll(sc, 3);
1875 }
1876 /* Ensure we always have an skb to requeue once we are done
1877 * processing the current buffer's skb */
1878 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1879
1880 /* If there is no memory we ignore the current RX'd frame,
1881 * tell hardware it can give us a new frame using the old
1882 * skb and put it at the tail of the sc->rx.rxbuf list for
1883 * processing. */
1884 if (!requeue_skb) {
1885 RX_STAT_INC(rx_oom_err);
1886 goto requeue_drop_frag;
1887 }
1888
1889 /* Unmap the frame */
1890 dma_unmap_single(sc->dev, bf->bf_buf_addr,
1891 common->rx_bufsize,
1892 dma_type);
1893
1894 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1895 if (ah->caps.rx_status_len)
1896 skb_pull(skb, ah->caps.rx_status_len);
1897
1898 if (!rs.rs_more)
1899 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1900 rxs, decrypt_error);
1901
1902 /* We will now give hardware our shiny new allocated skb */
1903 bf->bf_mpdu = requeue_skb;
1904 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1905 common->rx_bufsize,
1906 dma_type);
1907 if (unlikely(dma_mapping_error(sc->dev,
1908 bf->bf_buf_addr))) {
1909 dev_kfree_skb_any(requeue_skb);
1910 bf->bf_mpdu = NULL;
1911 bf->bf_buf_addr = 0;
1912 ath_err(common, "dma_mapping_error() on RX\n");
1913 ieee80211_rx(hw, skb);
1914 break;
1915 }
1916
1917 if (rs.rs_more) {
1918 RX_STAT_INC(rx_frags);
1919 /*
1920 * rs_more indicates chained descriptors which can be
1921 * used to link buffers together for a sort of
1922 * scatter-gather operation.
1923 */
1924 if (sc->rx.frag) {
1925 /* too many fragments - cannot handle frame */
1926 dev_kfree_skb_any(sc->rx.frag);
1927 dev_kfree_skb_any(skb);
1928 RX_STAT_INC(rx_too_many_frags_err);
1929 skb = NULL;
1930 }
1931 sc->rx.frag = skb;
1932 goto requeue;
1933 }
1934
1935 if (sc->rx.frag) {
1936 int space = skb->len - skb_tailroom(hdr_skb);
1937
1938 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1939 dev_kfree_skb(skb);
1940 RX_STAT_INC(rx_oom_err);
1941 goto requeue_drop_frag;
1942 }
1943
1944 sc->rx.frag = NULL;
1945
1946 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1947 skb->len);
1948 dev_kfree_skb_any(skb);
1949 skb = hdr_skb;
1950 }
1951
1952
1953 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
1954
1955 /*
1956 * change the default rx antenna if rx diversity
1957 * chooses the other antenna 3 times in a row.
1958 */
1959 if (sc->rx.defant != rs.rs_antenna) {
1960 if (++sc->rx.rxotherant >= 3)
1961 ath_setdefantenna(sc, rs.rs_antenna);
1962 } else {
1963 sc->rx.rxotherant = 0;
1964 }
1965
1966 }
1967
1968 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1969 skb_trim(skb, skb->len - 8);
1970
1971 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1972
1973 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1974 PS_WAIT_FOR_CAB |
1975 PS_WAIT_FOR_PSPOLL_DATA)) ||
1976 ath9k_check_auto_sleep(sc))
1977 ath_rx_ps(sc, skb, rs.is_mybeacon);
1978 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1979
1980 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
1981 ath_ant_comb_scan(sc, &rs);
1982
1983 ieee80211_rx(hw, skb);
1984
1985requeue_drop_frag:
1986 if (sc->rx.frag) {
1987 dev_kfree_skb_any(sc->rx.frag);
1988 sc->rx.frag = NULL;
1989 }
1990requeue:
1991 if (edma) {
1992 list_add_tail(&bf->list, &sc->rx.rxbuf);
1993 ath_rx_edma_buf_link(sc, qtype);
1994 } else {
1995 list_move_tail(&bf->list, &sc->rx.rxbuf);
1996 ath_rx_buf_link(sc, bf);
1997 if (!flush)
1998 ath9k_hw_rxena(ah);
1999 }
2000 } while (1);
2001
2002 spin_unlock_bh(&sc->rx.rxbuflock);
2003
2004 if (!(ah->imask & ATH9K_INT_RXEOL)) {
2005 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
2006 ath9k_hw_set_interrupts(ah);
2007 }
2008
2009 return 0;
2010}