Loading...
1/*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/slab.h>
41#include <linux/module.h>
42#include <linux/etherdevice.h>
43#include <linux/random.h>
44#include <net/mac80211.h>
45#include <net/cfg80211.h>
46#include "hw.h"
47#include "carl9170.h"
48#include "cmd.h"
49
50static bool modparam_nohwcrypt;
51module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444);
52MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
53
54int modparam_noht;
55module_param_named(noht, modparam_noht, int, 0444);
56MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
57
58#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
59 .bitrate = (_bitrate), \
60 .flags = (_flags), \
61 .hw_value = (_hw_rate) | (_txpidx) << 4, \
62}
63
64struct ieee80211_rate __carl9170_ratetable[] = {
65 RATE(10, 0, 0, 0),
66 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
67 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(60, 0xb, 0, 0),
70 RATE(90, 0xf, 0, 0),
71 RATE(120, 0xa, 0, 0),
72 RATE(180, 0xe, 0, 0),
73 RATE(240, 0x9, 0, 0),
74 RATE(360, 0xd, 1, 0),
75 RATE(480, 0x8, 2, 0),
76 RATE(540, 0xc, 3, 0),
77};
78#undef RATE
79
80#define carl9170_g_ratetable (__carl9170_ratetable + 0)
81#define carl9170_g_ratetable_size 12
82#define carl9170_a_ratetable (__carl9170_ratetable + 4)
83#define carl9170_a_ratetable_size 8
84
85/*
86 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
87 * array in phy.c so that we don't have to do frequency lookups!
88 */
89#define CHAN(_freq, _idx) { \
90 .center_freq = (_freq), \
91 .hw_value = (_idx), \
92 .max_power = 18, /* XXX */ \
93}
94
95static struct ieee80211_channel carl9170_2ghz_chantable[] = {
96 CHAN(2412, 0),
97 CHAN(2417, 1),
98 CHAN(2422, 2),
99 CHAN(2427, 3),
100 CHAN(2432, 4),
101 CHAN(2437, 5),
102 CHAN(2442, 6),
103 CHAN(2447, 7),
104 CHAN(2452, 8),
105 CHAN(2457, 9),
106 CHAN(2462, 10),
107 CHAN(2467, 11),
108 CHAN(2472, 12),
109 CHAN(2484, 13),
110};
111
112static struct ieee80211_channel carl9170_5ghz_chantable[] = {
113 CHAN(4920, 14),
114 CHAN(4940, 15),
115 CHAN(4960, 16),
116 CHAN(4980, 17),
117 CHAN(5040, 18),
118 CHAN(5060, 19),
119 CHAN(5080, 20),
120 CHAN(5180, 21),
121 CHAN(5200, 22),
122 CHAN(5220, 23),
123 CHAN(5240, 24),
124 CHAN(5260, 25),
125 CHAN(5280, 26),
126 CHAN(5300, 27),
127 CHAN(5320, 28),
128 CHAN(5500, 29),
129 CHAN(5520, 30),
130 CHAN(5540, 31),
131 CHAN(5560, 32),
132 CHAN(5580, 33),
133 CHAN(5600, 34),
134 CHAN(5620, 35),
135 CHAN(5640, 36),
136 CHAN(5660, 37),
137 CHAN(5680, 38),
138 CHAN(5700, 39),
139 CHAN(5745, 40),
140 CHAN(5765, 41),
141 CHAN(5785, 42),
142 CHAN(5805, 43),
143 CHAN(5825, 44),
144 CHAN(5170, 45),
145 CHAN(5190, 46),
146 CHAN(5210, 47),
147 CHAN(5230, 48),
148};
149#undef CHAN
150
151#define CARL9170_HT_CAP \
152{ \
153 .ht_supported = true, \
154 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
155 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
156 IEEE80211_HT_CAP_SGI_40 | \
157 IEEE80211_HT_CAP_DSSSCCK40 | \
158 IEEE80211_HT_CAP_SM_PS, \
159 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
160 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
161 .mcs = { \
162 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
163 .rx_highest = cpu_to_le16(300), \
164 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
165 }, \
166}
167
168static struct ieee80211_supported_band carl9170_band_2GHz = {
169 .channels = carl9170_2ghz_chantable,
170 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
171 .bitrates = carl9170_g_ratetable,
172 .n_bitrates = carl9170_g_ratetable_size,
173 .ht_cap = CARL9170_HT_CAP,
174};
175
176static struct ieee80211_supported_band carl9170_band_5GHz = {
177 .channels = carl9170_5ghz_chantable,
178 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
179 .bitrates = carl9170_a_ratetable,
180 .n_bitrates = carl9170_a_ratetable_size,
181 .ht_cap = CARL9170_HT_CAP,
182};
183
184static void carl9170_ampdu_gc(struct ar9170 *ar)
185{
186 struct carl9170_sta_tid *tid_info;
187 LIST_HEAD(tid_gc);
188
189 rcu_read_lock();
190 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
191 spin_lock_bh(&ar->tx_ampdu_list_lock);
192 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
193 tid_info->state = CARL9170_TID_STATE_KILLED;
194 list_del_rcu(&tid_info->list);
195 ar->tx_ampdu_list_len--;
196 list_add_tail(&tid_info->tmp_list, &tid_gc);
197 }
198 spin_unlock_bh(&ar->tx_ampdu_list_lock);
199
200 }
201 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
202 rcu_read_unlock();
203
204 synchronize_rcu();
205
206 while (!list_empty(&tid_gc)) {
207 struct sk_buff *skb;
208 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
209 tmp_list);
210
211 while ((skb = __skb_dequeue(&tid_info->queue)))
212 carl9170_tx_status(ar, skb, false);
213
214 list_del_init(&tid_info->tmp_list);
215 kfree(tid_info);
216 }
217}
218
219static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
220{
221 if (drop_queued) {
222 int i;
223
224 /*
225 * We can only drop frames which have not been uploaded
226 * to the device yet.
227 */
228
229 for (i = 0; i < ar->hw->queues; i++) {
230 struct sk_buff *skb;
231
232 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
233 struct ieee80211_tx_info *info;
234
235 info = IEEE80211_SKB_CB(skb);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 atomic_dec(&ar->tx_ampdu_upload);
238
239 carl9170_tx_status(ar, skb, false);
240 }
241 }
242 }
243
244 /* Wait for all other outstanding frames to timeout. */
245 if (atomic_read(&ar->tx_total_queued))
246 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
247}
248
249static void carl9170_flush_ba(struct ar9170 *ar)
250{
251 struct sk_buff_head free;
252 struct carl9170_sta_tid *tid_info;
253 struct sk_buff *skb;
254
255 __skb_queue_head_init(&free);
256
257 rcu_read_lock();
258 spin_lock_bh(&ar->tx_ampdu_list_lock);
259 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
260 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
261 tid_info->state = CARL9170_TID_STATE_SUSPEND;
262
263 spin_lock(&tid_info->lock);
264 while ((skb = __skb_dequeue(&tid_info->queue)))
265 __skb_queue_tail(&free, skb);
266 spin_unlock(&tid_info->lock);
267 }
268 }
269 spin_unlock_bh(&ar->tx_ampdu_list_lock);
270 rcu_read_unlock();
271
272 while ((skb = __skb_dequeue(&free)))
273 carl9170_tx_status(ar, skb, false);
274}
275
276static void carl9170_zap_queues(struct ar9170 *ar)
277{
278 struct carl9170_vif_info *cvif;
279 unsigned int i;
280
281 carl9170_ampdu_gc(ar);
282
283 carl9170_flush_ba(ar);
284 carl9170_flush(ar, true);
285
286 for (i = 0; i < ar->hw->queues; i++) {
287 spin_lock_bh(&ar->tx_status[i].lock);
288 while (!skb_queue_empty(&ar->tx_status[i])) {
289 struct sk_buff *skb;
290
291 skb = skb_peek(&ar->tx_status[i]);
292 carl9170_tx_get_skb(skb);
293 spin_unlock_bh(&ar->tx_status[i].lock);
294 carl9170_tx_drop(ar, skb);
295 spin_lock_bh(&ar->tx_status[i].lock);
296 carl9170_tx_put_skb(skb);
297 }
298 spin_unlock_bh(&ar->tx_status[i].lock);
299 }
300
301 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
304
305 /* reinitialize queues statistics */
306 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
307 for (i = 0; i < ar->hw->queues; i++)
308 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
309
310 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
311 ar->mem_bitmap[i] = 0;
312
313 rcu_read_lock();
314 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
315 spin_lock_bh(&ar->beacon_lock);
316 dev_kfree_skb_any(cvif->beacon);
317 cvif->beacon = NULL;
318 spin_unlock_bh(&ar->beacon_lock);
319 }
320 rcu_read_unlock();
321
322 atomic_set(&ar->tx_ampdu_upload, 0);
323 atomic_set(&ar->tx_ampdu_scheduler, 0);
324 atomic_set(&ar->tx_total_pending, 0);
325 atomic_set(&ar->tx_total_queued, 0);
326 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
327}
328
329#define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
330do { \
331 queue.aifs = ai_fs; \
332 queue.cw_min = cwmin; \
333 queue.cw_max = cwmax; \
334 queue.txop = _txop; \
335} while (0)
336
337static int carl9170_op_start(struct ieee80211_hw *hw)
338{
339 struct ar9170 *ar = hw->priv;
340 int err, i;
341
342 mutex_lock(&ar->mutex);
343
344 carl9170_zap_queues(ar);
345
346 /* reset QoS defaults */
347 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
352
353 ar->current_factor = ar->current_density = -1;
354 /* "The first key is unique." */
355 ar->usedkeys = 1;
356 ar->filter_state = 0;
357 ar->ps.last_action = jiffies;
358 ar->ps.last_slept = jiffies;
359 ar->erp_mode = CARL9170_ERP_AUTO;
360
361 /* Set "disable hw crypto offload" whenever the module parameter
362 * nohwcrypt is true or if the firmware does not support it.
363 */
364 ar->disable_offload = modparam_nohwcrypt |
365 ar->fw.disable_offload_fw;
366 ar->rx_software_decryption = ar->disable_offload;
367
368 for (i = 0; i < ar->hw->queues; i++) {
369 ar->queue_stop_timeout[i] = jiffies;
370 ar->max_queue_stop_timeout[i] = 0;
371 }
372
373 atomic_set(&ar->mem_allocs, 0);
374
375 err = carl9170_usb_open(ar);
376 if (err)
377 goto out;
378
379 err = carl9170_init_mac(ar);
380 if (err)
381 goto out;
382
383 err = carl9170_set_qos(ar);
384 if (err)
385 goto out;
386
387 if (ar->fw.rx_filter) {
388 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
389 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
390 if (err)
391 goto out;
392 }
393
394 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
395 AR9170_DMA_TRIGGER_RXQ);
396 if (err)
397 goto out;
398
399 /* Clear key-cache */
400 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
401 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
402 0, NULL, 0);
403 if (err)
404 goto out;
405
406 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
407 1, NULL, 0);
408 if (err)
409 goto out;
410
411 if (i < AR9170_CAM_MAX_USER) {
412 err = carl9170_disable_key(ar, i);
413 if (err)
414 goto out;
415 }
416 }
417
418 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
419
420 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
421 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
422
423 ieee80211_wake_queues(ar->hw);
424 err = 0;
425
426out:
427 mutex_unlock(&ar->mutex);
428 return err;
429}
430
431static void carl9170_cancel_worker(struct ar9170 *ar)
432{
433 cancel_delayed_work_sync(&ar->stat_work);
434 cancel_delayed_work_sync(&ar->tx_janitor);
435#ifdef CONFIG_CARL9170_LEDS
436 cancel_delayed_work_sync(&ar->led_work);
437#endif /* CONFIG_CARL9170_LEDS */
438 cancel_work_sync(&ar->ps_work);
439 cancel_work_sync(&ar->ping_work);
440 cancel_work_sync(&ar->ampdu_work);
441}
442
443static void carl9170_op_stop(struct ieee80211_hw *hw)
444{
445 struct ar9170 *ar = hw->priv;
446
447 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
448
449 ieee80211_stop_queues(ar->hw);
450
451 mutex_lock(&ar->mutex);
452 if (IS_ACCEPTING_CMD(ar)) {
453 RCU_INIT_POINTER(ar->beacon_iter, NULL);
454
455 carl9170_led_set_state(ar, 0);
456
457 /* stop DMA */
458 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
459 carl9170_usb_stop(ar);
460 }
461
462 carl9170_zap_queues(ar);
463 mutex_unlock(&ar->mutex);
464
465 carl9170_cancel_worker(ar);
466}
467
468static void carl9170_restart_work(struct work_struct *work)
469{
470 struct ar9170 *ar = container_of(work, struct ar9170,
471 restart_work);
472 int err = -EIO;
473
474 ar->usedkeys = 0;
475 ar->filter_state = 0;
476 carl9170_cancel_worker(ar);
477
478 mutex_lock(&ar->mutex);
479 if (!ar->force_usb_reset) {
480 err = carl9170_usb_restart(ar);
481 if (net_ratelimit()) {
482 if (err)
483 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
484 else
485 dev_info(&ar->udev->dev, "device restarted successfully.\n");
486 }
487 }
488 carl9170_zap_queues(ar);
489 mutex_unlock(&ar->mutex);
490
491 if (!err && !ar->force_usb_reset) {
492 ar->restart_counter++;
493 atomic_set(&ar->pending_restarts, 0);
494
495 ieee80211_restart_hw(ar->hw);
496 } else {
497 /*
498 * The reset was unsuccessful and the device seems to
499 * be dead. But there's still one option: a low-level
500 * usb subsystem reset...
501 */
502
503 carl9170_usb_reset(ar);
504 }
505}
506
507void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
508{
509 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
510
511 /*
512 * Sometimes, an error can trigger several different reset events.
513 * By ignoring these *surplus* reset events, the device won't be
514 * killed again, right after it has recovered.
515 */
516 if (atomic_inc_return(&ar->pending_restarts) > 1) {
517 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
518 return;
519 }
520
521 ieee80211_stop_queues(ar->hw);
522
523 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
524
525 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
526 !WARN_ON(r >= __CARL9170_RR_LAST))
527 ar->last_reason = r;
528
529 if (!ar->registered)
530 return;
531
532 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
533 ar->force_usb_reset = true;
534
535 ieee80211_queue_work(ar->hw, &ar->restart_work);
536
537 /*
538 * At this point, the device instance might have vanished/disabled.
539 * So, don't put any code which access the ar9170 struct
540 * without proper protection.
541 */
542}
543
544static void carl9170_ping_work(struct work_struct *work)
545{
546 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
547 int err;
548
549 if (!IS_STARTED(ar))
550 return;
551
552 mutex_lock(&ar->mutex);
553 err = carl9170_echo_test(ar, 0xdeadbeef);
554 if (err)
555 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
556 mutex_unlock(&ar->mutex);
557}
558
559static int carl9170_init_interface(struct ar9170 *ar,
560 struct ieee80211_vif *vif)
561{
562 struct ath_common *common = &ar->common;
563 int err;
564
565 if (!vif) {
566 WARN_ON_ONCE(IS_STARTED(ar));
567 return 0;
568 }
569
570 memcpy(common->macaddr, vif->addr, ETH_ALEN);
571
572 /* We have to fall back to software crypto, whenever
573 * the user choose to participates in an IBSS. HW
574 * offload for IBSS RSN is not supported by this driver.
575 *
576 * NOTE: If the previous main interface has already
577 * disabled hw crypto offload, we have to keep this
578 * previous disable_offload setting as it was.
579 * Altough ideally, we should notify mac80211 and tell
580 * it to forget about any HW crypto offload for now.
581 */
582 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
583 (vif->type != NL80211_IFTYPE_AP));
584
585 /* The driver used to have P2P GO+CLIENT support,
586 * but since this was dropped and we don't know if
587 * there are any gremlins lurking in the shadows,
588 * so best we keep HW offload disabled for P2P.
589 */
590 ar->disable_offload |= vif->p2p;
591
592 ar->rx_software_decryption = ar->disable_offload;
593
594 err = carl9170_set_operating_mode(ar);
595 return err;
596}
597
598static int carl9170_op_add_interface(struct ieee80211_hw *hw,
599 struct ieee80211_vif *vif)
600{
601 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
602 struct ieee80211_vif *main_vif, *old_main = NULL;
603 struct ar9170 *ar = hw->priv;
604 int vif_id = -1, err = 0;
605
606 mutex_lock(&ar->mutex);
607 rcu_read_lock();
608 if (vif_priv->active) {
609 /*
610 * Skip the interface structure initialization,
611 * if the vif survived the _restart call.
612 */
613 vif_id = vif_priv->id;
614 vif_priv->enable_beacon = false;
615
616 spin_lock_bh(&ar->beacon_lock);
617 dev_kfree_skb_any(vif_priv->beacon);
618 vif_priv->beacon = NULL;
619 spin_unlock_bh(&ar->beacon_lock);
620
621 goto init;
622 }
623
624 /* Because the AR9170 HW's MAC doesn't provide full support for
625 * multiple, independent interfaces [of different operation modes].
626 * We have to select ONE main interface [main mode of HW], but we
627 * can have multiple slaves [AKA: entry in the ACK-table].
628 *
629 * The first (from HEAD/TOP) interface in the ar->vif_list is
630 * always the main intf. All following intfs in this list
631 * are considered to be slave intfs.
632 */
633 main_vif = carl9170_get_main_vif(ar);
634
635 if (main_vif) {
636 switch (main_vif->type) {
637 case NL80211_IFTYPE_STATION:
638 if (vif->type == NL80211_IFTYPE_STATION)
639 break;
640
641 err = -EBUSY;
642 rcu_read_unlock();
643
644 goto unlock;
645
646 case NL80211_IFTYPE_MESH_POINT:
647 case NL80211_IFTYPE_AP:
648 if ((vif->type == NL80211_IFTYPE_STATION) ||
649 (vif->type == NL80211_IFTYPE_WDS) ||
650 (vif->type == NL80211_IFTYPE_AP) ||
651 (vif->type == NL80211_IFTYPE_MESH_POINT))
652 break;
653
654 err = -EBUSY;
655 rcu_read_unlock();
656 goto unlock;
657
658 default:
659 rcu_read_unlock();
660 goto unlock;
661 }
662 }
663
664 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
665
666 if (vif_id < 0) {
667 rcu_read_unlock();
668
669 err = -ENOSPC;
670 goto unlock;
671 }
672
673 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
674
675 vif_priv->active = true;
676 vif_priv->id = vif_id;
677 vif_priv->enable_beacon = false;
678 ar->vifs++;
679 if (old_main) {
680 /* We end up in here, if the main interface is being replaced.
681 * Put the new main interface at the HEAD of the list and the
682 * previous inteface will automatically become second in line.
683 */
684 list_add_rcu(&vif_priv->list, &ar->vif_list);
685 } else {
686 /* Add new inteface. If the list is empty, it will become the
687 * main inteface, otherwise it will be slave.
688 */
689 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
690 }
691 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
692
693init:
694 main_vif = carl9170_get_main_vif(ar);
695
696 if (main_vif == vif) {
697 rcu_assign_pointer(ar->beacon_iter, vif_priv);
698 rcu_read_unlock();
699
700 if (old_main) {
701 struct carl9170_vif_info *old_main_priv =
702 (void *) old_main->drv_priv;
703 /* downgrade old main intf to slave intf.
704 * NOTE: We are no longer under rcu_read_lock.
705 * But we are still holding ar->mutex, so the
706 * vif data [id, addr] is safe.
707 */
708 err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
709 old_main->addr);
710 if (err)
711 goto unlock;
712 }
713
714 err = carl9170_init_interface(ar, vif);
715 if (err)
716 goto unlock;
717 } else {
718 rcu_read_unlock();
719 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
720
721 if (err)
722 goto unlock;
723 }
724
725 if (ar->fw.tx_seq_table) {
726 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
727 0);
728 if (err)
729 goto unlock;
730 }
731
732unlock:
733 if (err && (vif_id >= 0)) {
734 vif_priv->active = false;
735 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
736 ar->vifs--;
737 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
738 list_del_rcu(&vif_priv->list);
739 mutex_unlock(&ar->mutex);
740 synchronize_rcu();
741 } else {
742 if (ar->vifs > 1)
743 ar->ps.off_override |= PS_OFF_VIF;
744
745 mutex_unlock(&ar->mutex);
746 }
747
748 return err;
749}
750
751static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
752 struct ieee80211_vif *vif)
753{
754 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
755 struct ieee80211_vif *main_vif;
756 struct ar9170 *ar = hw->priv;
757 unsigned int id;
758
759 mutex_lock(&ar->mutex);
760
761 if (WARN_ON_ONCE(!vif_priv->active))
762 goto unlock;
763
764 ar->vifs--;
765
766 rcu_read_lock();
767 main_vif = carl9170_get_main_vif(ar);
768
769 id = vif_priv->id;
770
771 vif_priv->active = false;
772 WARN_ON(vif_priv->enable_beacon);
773 vif_priv->enable_beacon = false;
774 list_del_rcu(&vif_priv->list);
775 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
776
777 if (vif == main_vif) {
778 rcu_read_unlock();
779
780 if (ar->vifs) {
781 WARN_ON(carl9170_init_interface(ar,
782 carl9170_get_main_vif(ar)));
783 } else {
784 carl9170_set_operating_mode(ar);
785 }
786 } else {
787 rcu_read_unlock();
788
789 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
790 }
791
792 carl9170_update_beacon(ar, false);
793 carl9170_flush_cab(ar, id);
794
795 spin_lock_bh(&ar->beacon_lock);
796 dev_kfree_skb_any(vif_priv->beacon);
797 vif_priv->beacon = NULL;
798 spin_unlock_bh(&ar->beacon_lock);
799
800 bitmap_release_region(&ar->vif_bitmap, id, 0);
801
802 carl9170_set_beacon_timers(ar);
803
804 if (ar->vifs == 1)
805 ar->ps.off_override &= ~PS_OFF_VIF;
806
807unlock:
808 mutex_unlock(&ar->mutex);
809
810 synchronize_rcu();
811}
812
813void carl9170_ps_check(struct ar9170 *ar)
814{
815 ieee80211_queue_work(ar->hw, &ar->ps_work);
816}
817
818/* caller must hold ar->mutex */
819static int carl9170_ps_update(struct ar9170 *ar)
820{
821 bool ps = false;
822 int err = 0;
823
824 if (!ar->ps.off_override)
825 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
826
827 if (ps != ar->ps.state) {
828 err = carl9170_powersave(ar, ps);
829 if (err)
830 return err;
831
832 if (ar->ps.state && !ps) {
833 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
834 ar->ps.last_action);
835 }
836
837 if (ps)
838 ar->ps.last_slept = jiffies;
839
840 ar->ps.last_action = jiffies;
841 ar->ps.state = ps;
842 }
843
844 return 0;
845}
846
847static void carl9170_ps_work(struct work_struct *work)
848{
849 struct ar9170 *ar = container_of(work, struct ar9170,
850 ps_work);
851 mutex_lock(&ar->mutex);
852 if (IS_STARTED(ar))
853 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
854 mutex_unlock(&ar->mutex);
855}
856
857static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
858{
859 int err;
860
861 if (noise) {
862 err = carl9170_get_noisefloor(ar);
863 if (err)
864 return err;
865 }
866
867 if (ar->fw.hw_counters) {
868 err = carl9170_collect_tally(ar);
869 if (err)
870 return err;
871 }
872
873 if (flush)
874 memset(&ar->tally, 0, sizeof(ar->tally));
875
876 return 0;
877}
878
879static void carl9170_stat_work(struct work_struct *work)
880{
881 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
882 int err;
883
884 mutex_lock(&ar->mutex);
885 err = carl9170_update_survey(ar, false, true);
886 mutex_unlock(&ar->mutex);
887
888 if (err)
889 return;
890
891 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
892 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
893}
894
895static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
896{
897 struct ar9170 *ar = hw->priv;
898 int err = 0;
899
900 mutex_lock(&ar->mutex);
901 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
902 /* TODO */
903 err = 0;
904 }
905
906 if (changed & IEEE80211_CONF_CHANGE_PS) {
907 err = carl9170_ps_update(ar);
908 if (err)
909 goto out;
910 }
911
912 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
913 /* TODO */
914 err = 0;
915 }
916
917 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
918 enum nl80211_channel_type channel_type =
919 cfg80211_get_chandef_type(&hw->conf.chandef);
920
921 /* adjust slot time for 5 GHz */
922 err = carl9170_set_slot_time(ar);
923 if (err)
924 goto out;
925
926 err = carl9170_update_survey(ar, true, false);
927 if (err)
928 goto out;
929
930 err = carl9170_set_channel(ar, hw->conf.chandef.chan,
931 channel_type);
932 if (err)
933 goto out;
934
935 err = carl9170_update_survey(ar, false, true);
936 if (err)
937 goto out;
938
939 err = carl9170_set_dyn_sifs_ack(ar);
940 if (err)
941 goto out;
942
943 err = carl9170_set_rts_cts_rate(ar);
944 if (err)
945 goto out;
946 }
947
948 if (changed & IEEE80211_CONF_CHANGE_POWER) {
949 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
950 if (err)
951 goto out;
952 }
953
954out:
955 mutex_unlock(&ar->mutex);
956 return err;
957}
958
959static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
960 struct netdev_hw_addr_list *mc_list)
961{
962 struct netdev_hw_addr *ha;
963 u64 mchash;
964
965 /* always get broadcast frames */
966 mchash = 1ULL << (0xff >> 2);
967
968 netdev_hw_addr_list_for_each(ha, mc_list)
969 mchash |= 1ULL << (ha->addr[5] >> 2);
970
971 return mchash;
972}
973
974static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
975 unsigned int changed_flags,
976 unsigned int *new_flags,
977 u64 multicast)
978{
979 struct ar9170 *ar = hw->priv;
980
981 /* mask supported flags */
982 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
983
984 if (!IS_ACCEPTING_CMD(ar))
985 return;
986
987 mutex_lock(&ar->mutex);
988
989 ar->filter_state = *new_flags;
990 /*
991 * We can support more by setting the sniffer bit and
992 * then checking the error flags, later.
993 */
994
995 if (*new_flags & FIF_ALLMULTI)
996 multicast = ~0ULL;
997
998 if (multicast != ar->cur_mc_hash)
999 WARN_ON(carl9170_update_multicast(ar, multicast));
1000
1001 if (changed_flags & FIF_OTHER_BSS) {
1002 ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
1003
1004 WARN_ON(carl9170_set_operating_mode(ar));
1005 }
1006
1007 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1008 u32 rx_filter = 0;
1009
1010 if (!ar->fw.ba_filter)
1011 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1012
1013 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1014 rx_filter |= CARL9170_RX_FILTER_BAD;
1015
1016 if (!(*new_flags & FIF_CONTROL))
1017 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1018
1019 if (!(*new_flags & FIF_PSPOLL))
1020 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1021
1022 if (!(*new_flags & FIF_OTHER_BSS)) {
1023 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1024 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1025 }
1026
1027 WARN_ON(carl9170_rx_filter(ar, rx_filter));
1028 }
1029
1030 mutex_unlock(&ar->mutex);
1031}
1032
1033
1034static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1035 struct ieee80211_vif *vif,
1036 struct ieee80211_bss_conf *bss_conf,
1037 u32 changed)
1038{
1039 struct ar9170 *ar = hw->priv;
1040 struct ath_common *common = &ar->common;
1041 int err = 0;
1042 struct carl9170_vif_info *vif_priv;
1043 struct ieee80211_vif *main_vif;
1044
1045 mutex_lock(&ar->mutex);
1046 vif_priv = (void *) vif->drv_priv;
1047 main_vif = carl9170_get_main_vif(ar);
1048 if (WARN_ON(!main_vif))
1049 goto out;
1050
1051 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1052 struct carl9170_vif_info *iter;
1053 int i = 0;
1054
1055 vif_priv->enable_beacon = bss_conf->enable_beacon;
1056 rcu_read_lock();
1057 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1058 if (iter->active && iter->enable_beacon)
1059 i++;
1060
1061 }
1062 rcu_read_unlock();
1063
1064 ar->beacon_enabled = i;
1065 }
1066
1067 if (changed & BSS_CHANGED_BEACON) {
1068 err = carl9170_update_beacon(ar, false);
1069 if (err)
1070 goto out;
1071 }
1072
1073 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1074 BSS_CHANGED_BEACON_INT)) {
1075
1076 if (main_vif != vif) {
1077 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1078 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1079 }
1080
1081 /*
1082 * Therefore a hard limit for the broadcast traffic should
1083 * prevent false alarms.
1084 */
1085 if (vif->type != NL80211_IFTYPE_STATION &&
1086 (bss_conf->beacon_int * bss_conf->dtim_period >=
1087 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1088 err = -EINVAL;
1089 goto out;
1090 }
1091
1092 err = carl9170_set_beacon_timers(ar);
1093 if (err)
1094 goto out;
1095 }
1096
1097 if (changed & BSS_CHANGED_HT) {
1098 /* TODO */
1099 err = 0;
1100 if (err)
1101 goto out;
1102 }
1103
1104 if (main_vif != vif)
1105 goto out;
1106
1107 /*
1108 * The following settings can only be changed by the
1109 * master interface.
1110 */
1111
1112 if (changed & BSS_CHANGED_BSSID) {
1113 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1114 err = carl9170_set_operating_mode(ar);
1115 if (err)
1116 goto out;
1117 }
1118
1119 if (changed & BSS_CHANGED_ASSOC) {
1120 ar->common.curaid = bss_conf->aid;
1121 err = carl9170_set_beacon_timers(ar);
1122 if (err)
1123 goto out;
1124 }
1125
1126 if (changed & BSS_CHANGED_ERP_SLOT) {
1127 err = carl9170_set_slot_time(ar);
1128 if (err)
1129 goto out;
1130 }
1131
1132 if (changed & BSS_CHANGED_BASIC_RATES) {
1133 err = carl9170_set_mac_rates(ar);
1134 if (err)
1135 goto out;
1136 }
1137
1138out:
1139 WARN_ON_ONCE(err && IS_STARTED(ar));
1140 mutex_unlock(&ar->mutex);
1141}
1142
1143static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1144 struct ieee80211_vif *vif)
1145{
1146 struct ar9170 *ar = hw->priv;
1147 struct carl9170_tsf_rsp tsf;
1148 int err;
1149
1150 mutex_lock(&ar->mutex);
1151 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1152 0, NULL, sizeof(tsf), &tsf);
1153 mutex_unlock(&ar->mutex);
1154 if (WARN_ON(err))
1155 return 0;
1156
1157 return le64_to_cpu(tsf.tsf_64);
1158}
1159
1160static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1161 struct ieee80211_vif *vif,
1162 struct ieee80211_sta *sta,
1163 struct ieee80211_key_conf *key)
1164{
1165 struct ar9170 *ar = hw->priv;
1166 int err = 0, i;
1167 u8 ktype;
1168
1169 if (ar->disable_offload || !vif)
1170 return -EOPNOTSUPP;
1171
1172 /* Fall back to software encryption whenever the driver is connected
1173 * to more than one network.
1174 *
1175 * This is very unfortunate, because some machines cannot handle
1176 * the high througput speed in 802.11n networks.
1177 */
1178
1179 if (!is_main_vif(ar, vif)) {
1180 mutex_lock(&ar->mutex);
1181 goto err_softw;
1182 }
1183
1184 /*
1185 * While the hardware supports *catch-all* key, for offloading
1186 * group-key en-/de-cryption. The way of how the hardware
1187 * decides which keyId maps to which key, remains a mystery...
1188 */
1189 if ((vif->type != NL80211_IFTYPE_STATION &&
1190 vif->type != NL80211_IFTYPE_ADHOC) &&
1191 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1192 return -EOPNOTSUPP;
1193
1194 switch (key->cipher) {
1195 case WLAN_CIPHER_SUITE_WEP40:
1196 ktype = AR9170_ENC_ALG_WEP64;
1197 break;
1198 case WLAN_CIPHER_SUITE_WEP104:
1199 ktype = AR9170_ENC_ALG_WEP128;
1200 break;
1201 case WLAN_CIPHER_SUITE_TKIP:
1202 ktype = AR9170_ENC_ALG_TKIP;
1203 break;
1204 case WLAN_CIPHER_SUITE_CCMP:
1205 ktype = AR9170_ENC_ALG_AESCCMP;
1206 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1207 break;
1208 default:
1209 return -EOPNOTSUPP;
1210 }
1211
1212 mutex_lock(&ar->mutex);
1213 if (cmd == SET_KEY) {
1214 if (!IS_STARTED(ar)) {
1215 err = -EOPNOTSUPP;
1216 goto out;
1217 }
1218
1219 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1220 sta = NULL;
1221
1222 i = 64 + key->keyidx;
1223 } else {
1224 for (i = 0; i < 64; i++)
1225 if (!(ar->usedkeys & BIT(i)))
1226 break;
1227 if (i == 64)
1228 goto err_softw;
1229 }
1230
1231 key->hw_key_idx = i;
1232
1233 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1234 ktype, 0, key->key,
1235 min_t(u8, 16, key->keylen));
1236 if (err)
1237 goto out;
1238
1239 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1240 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1241 NULL, ktype, 1,
1242 key->key + 16, 16);
1243 if (err)
1244 goto out;
1245
1246 /*
1247 * hardware is not capable generating MMIC
1248 * of fragmented frames!
1249 */
1250 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1251 }
1252
1253 if (i < 64)
1254 ar->usedkeys |= BIT(i);
1255
1256 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1257 } else {
1258 if (!IS_STARTED(ar)) {
1259 /* The device is gone... together with the key ;-) */
1260 err = 0;
1261 goto out;
1262 }
1263
1264 if (key->hw_key_idx < 64) {
1265 ar->usedkeys &= ~BIT(key->hw_key_idx);
1266 } else {
1267 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1268 AR9170_ENC_ALG_NONE, 0,
1269 NULL, 0);
1270 if (err)
1271 goto out;
1272
1273 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1274 err = carl9170_upload_key(ar, key->hw_key_idx,
1275 NULL,
1276 AR9170_ENC_ALG_NONE,
1277 1, NULL, 0);
1278 if (err)
1279 goto out;
1280 }
1281
1282 }
1283
1284 err = carl9170_disable_key(ar, key->hw_key_idx);
1285 if (err)
1286 goto out;
1287 }
1288
1289out:
1290 mutex_unlock(&ar->mutex);
1291 return err;
1292
1293err_softw:
1294 if (!ar->rx_software_decryption) {
1295 ar->rx_software_decryption = true;
1296 carl9170_set_operating_mode(ar);
1297 }
1298 mutex_unlock(&ar->mutex);
1299 return -ENOSPC;
1300}
1301
1302static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1303 struct ieee80211_vif *vif,
1304 struct ieee80211_sta *sta)
1305{
1306 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1307 unsigned int i;
1308
1309 atomic_set(&sta_info->pending_frames, 0);
1310
1311 if (sta->ht_cap.ht_supported) {
1312 if (sta->ht_cap.ampdu_density > 6) {
1313 /*
1314 * HW does support 16us AMPDU density.
1315 * No HT-Xmit for station.
1316 */
1317
1318 return 0;
1319 }
1320
1321 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1322 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1323
1324 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1325 sta_info->ht_sta = true;
1326 }
1327
1328 return 0;
1329}
1330
1331static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1332 struct ieee80211_vif *vif,
1333 struct ieee80211_sta *sta)
1334{
1335 struct ar9170 *ar = hw->priv;
1336 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1337 unsigned int i;
1338 bool cleanup = false;
1339
1340 if (sta->ht_cap.ht_supported) {
1341
1342 sta_info->ht_sta = false;
1343
1344 rcu_read_lock();
1345 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1346 struct carl9170_sta_tid *tid_info;
1347
1348 tid_info = rcu_dereference(sta_info->agg[i]);
1349 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1350
1351 if (!tid_info)
1352 continue;
1353
1354 spin_lock_bh(&ar->tx_ampdu_list_lock);
1355 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1356 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1357 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1358 cleanup = true;
1359 }
1360 rcu_read_unlock();
1361
1362 if (cleanup)
1363 carl9170_ampdu_gc(ar);
1364 }
1365
1366 return 0;
1367}
1368
1369static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1370 struct ieee80211_vif *vif, u16 queue,
1371 const struct ieee80211_tx_queue_params *param)
1372{
1373 struct ar9170 *ar = hw->priv;
1374 int ret;
1375
1376 mutex_lock(&ar->mutex);
1377 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1378 ret = carl9170_set_qos(ar);
1379 mutex_unlock(&ar->mutex);
1380 return ret;
1381}
1382
1383static void carl9170_ampdu_work(struct work_struct *work)
1384{
1385 struct ar9170 *ar = container_of(work, struct ar9170,
1386 ampdu_work);
1387
1388 if (!IS_STARTED(ar))
1389 return;
1390
1391 mutex_lock(&ar->mutex);
1392 carl9170_ampdu_gc(ar);
1393 mutex_unlock(&ar->mutex);
1394}
1395
1396static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1397 struct ieee80211_vif *vif,
1398 struct ieee80211_ampdu_params *params)
1399{
1400 struct ieee80211_sta *sta = params->sta;
1401 enum ieee80211_ampdu_mlme_action action = params->action;
1402 u16 tid = params->tid;
1403 u16 *ssn = ¶ms->ssn;
1404 struct ar9170 *ar = hw->priv;
1405 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1406 struct carl9170_sta_tid *tid_info;
1407
1408 if (modparam_noht)
1409 return -EOPNOTSUPP;
1410
1411 switch (action) {
1412 case IEEE80211_AMPDU_TX_START:
1413 if (!sta_info->ht_sta)
1414 return -EOPNOTSUPP;
1415
1416 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1417 GFP_ATOMIC);
1418 if (!tid_info)
1419 return -ENOMEM;
1420
1421 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1422 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1423 tid_info->tid = tid;
1424 tid_info->max = sta_info->ampdu_max_len;
1425 tid_info->sta = sta;
1426 tid_info->vif = vif;
1427
1428 INIT_LIST_HEAD(&tid_info->list);
1429 INIT_LIST_HEAD(&tid_info->tmp_list);
1430 skb_queue_head_init(&tid_info->queue);
1431 spin_lock_init(&tid_info->lock);
1432
1433 spin_lock_bh(&ar->tx_ampdu_list_lock);
1434 ar->tx_ampdu_list_len++;
1435 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1436 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1437 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1438
1439 return IEEE80211_AMPDU_TX_START_IMMEDIATE;
1440
1441 case IEEE80211_AMPDU_TX_STOP_CONT:
1442 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1443 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1444 rcu_read_lock();
1445 tid_info = rcu_dereference(sta_info->agg[tid]);
1446 if (tid_info) {
1447 spin_lock_bh(&ar->tx_ampdu_list_lock);
1448 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1449 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1450 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1451 }
1452
1453 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1454 rcu_read_unlock();
1455
1456 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1457 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1458 break;
1459
1460 case IEEE80211_AMPDU_TX_OPERATIONAL:
1461 rcu_read_lock();
1462 tid_info = rcu_dereference(sta_info->agg[tid]);
1463
1464 sta_info->stats[tid].clear = true;
1465 sta_info->stats[tid].req = false;
1466
1467 if (tid_info) {
1468 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1469 tid_info->state = CARL9170_TID_STATE_IDLE;
1470 }
1471 rcu_read_unlock();
1472
1473 if (WARN_ON_ONCE(!tid_info))
1474 return -EFAULT;
1475
1476 break;
1477
1478 case IEEE80211_AMPDU_RX_START:
1479 case IEEE80211_AMPDU_RX_STOP:
1480 /* Handled by hardware */
1481 break;
1482
1483 default:
1484 return -EOPNOTSUPP;
1485 }
1486
1487 return 0;
1488}
1489
1490#ifdef CONFIG_CARL9170_WPC
1491static int carl9170_register_wps_button(struct ar9170 *ar)
1492{
1493 struct input_dev *input;
1494 int err;
1495
1496 if (!(ar->features & CARL9170_WPS_BUTTON))
1497 return 0;
1498
1499 input = input_allocate_device();
1500 if (!input)
1501 return -ENOMEM;
1502
1503 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1504 wiphy_name(ar->hw->wiphy));
1505
1506 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1507 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1508
1509 input->name = ar->wps.name;
1510 input->phys = ar->wps.phys;
1511 input->id.bustype = BUS_USB;
1512 input->dev.parent = &ar->hw->wiphy->dev;
1513
1514 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1515
1516 err = input_register_device(input);
1517 if (err) {
1518 input_free_device(input);
1519 return err;
1520 }
1521
1522 ar->wps.pbc = input;
1523 return 0;
1524}
1525#endif /* CONFIG_CARL9170_WPC */
1526
1527#ifdef CONFIG_CARL9170_HWRNG
1528static int carl9170_rng_get(struct ar9170 *ar)
1529{
1530
1531#define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1532#define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1533
1534 static const __le32 rng_load[RW] = {
1535 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1536
1537 u32 buf[RW];
1538
1539 unsigned int i, off = 0, transfer, count;
1540 int err;
1541
1542 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1543
1544 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1545 return -EAGAIN;
1546
1547 count = ARRAY_SIZE(ar->rng.cache);
1548 while (count) {
1549 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1550 RB, (u8 *) rng_load,
1551 RB, (u8 *) buf);
1552 if (err)
1553 return err;
1554
1555 transfer = min_t(unsigned int, count, RW);
1556 for (i = 0; i < transfer; i++)
1557 ar->rng.cache[off + i] = buf[i];
1558
1559 off += transfer;
1560 count -= transfer;
1561 }
1562
1563 ar->rng.cache_idx = 0;
1564
1565#undef RW
1566#undef RB
1567 return 0;
1568}
1569
1570static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1571{
1572 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1573 int ret = -EIO;
1574
1575 mutex_lock(&ar->mutex);
1576 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1577 ret = carl9170_rng_get(ar);
1578 if (ret) {
1579 mutex_unlock(&ar->mutex);
1580 return ret;
1581 }
1582 }
1583
1584 *data = ar->rng.cache[ar->rng.cache_idx++];
1585 mutex_unlock(&ar->mutex);
1586
1587 return sizeof(u16);
1588}
1589
1590static void carl9170_unregister_hwrng(struct ar9170 *ar)
1591{
1592 if (ar->rng.initialized) {
1593 hwrng_unregister(&ar->rng.rng);
1594 ar->rng.initialized = false;
1595 }
1596}
1597
1598static int carl9170_register_hwrng(struct ar9170 *ar)
1599{
1600 int err;
1601
1602 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1603 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1604 ar->rng.rng.name = ar->rng.name;
1605 ar->rng.rng.data_read = carl9170_rng_read;
1606 ar->rng.rng.priv = (unsigned long)ar;
1607
1608 if (WARN_ON(ar->rng.initialized))
1609 return -EALREADY;
1610
1611 err = hwrng_register(&ar->rng.rng);
1612 if (err) {
1613 dev_err(&ar->udev->dev, "Failed to register the random "
1614 "number generator (%d)\n", err);
1615 return err;
1616 }
1617
1618 ar->rng.initialized = true;
1619
1620 err = carl9170_rng_get(ar);
1621 if (err) {
1622 carl9170_unregister_hwrng(ar);
1623 return err;
1624 }
1625
1626 return 0;
1627}
1628#endif /* CONFIG_CARL9170_HWRNG */
1629
1630static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1631 struct survey_info *survey)
1632{
1633 struct ar9170 *ar = hw->priv;
1634 struct ieee80211_channel *chan;
1635 struct ieee80211_supported_band *band;
1636 int err, b, i;
1637
1638 chan = ar->channel;
1639 if (!chan)
1640 return -ENODEV;
1641
1642 if (idx == chan->hw_value) {
1643 mutex_lock(&ar->mutex);
1644 err = carl9170_update_survey(ar, false, true);
1645 mutex_unlock(&ar->mutex);
1646 if (err)
1647 return err;
1648 }
1649
1650 for (b = 0; b < NUM_NL80211_BANDS; b++) {
1651 band = ar->hw->wiphy->bands[b];
1652
1653 if (!band)
1654 continue;
1655
1656 for (i = 0; i < band->n_channels; i++) {
1657 if (band->channels[i].hw_value == idx) {
1658 chan = &band->channels[i];
1659 goto found;
1660 }
1661 }
1662 }
1663 return -ENOENT;
1664
1665found:
1666 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1667
1668 survey->channel = chan;
1669 survey->filled = SURVEY_INFO_NOISE_DBM;
1670
1671 if (ar->channel == chan)
1672 survey->filled |= SURVEY_INFO_IN_USE;
1673
1674 if (ar->fw.hw_counters) {
1675 survey->filled |= SURVEY_INFO_TIME |
1676 SURVEY_INFO_TIME_BUSY |
1677 SURVEY_INFO_TIME_TX;
1678 }
1679
1680 return 0;
1681}
1682
1683static void carl9170_op_flush(struct ieee80211_hw *hw,
1684 struct ieee80211_vif *vif,
1685 u32 queues, bool drop)
1686{
1687 struct ar9170 *ar = hw->priv;
1688 unsigned int vid;
1689
1690 mutex_lock(&ar->mutex);
1691 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1692 carl9170_flush_cab(ar, vid);
1693
1694 carl9170_flush(ar, drop);
1695 mutex_unlock(&ar->mutex);
1696}
1697
1698static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1699 struct ieee80211_low_level_stats *stats)
1700{
1701 struct ar9170 *ar = hw->priv;
1702
1703 memset(stats, 0, sizeof(*stats));
1704 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1705 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1706 return 0;
1707}
1708
1709static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1710 struct ieee80211_vif *vif,
1711 enum sta_notify_cmd cmd,
1712 struct ieee80211_sta *sta)
1713{
1714 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1715
1716 switch (cmd) {
1717 case STA_NOTIFY_SLEEP:
1718 sta_info->sleeping = true;
1719 if (atomic_read(&sta_info->pending_frames))
1720 ieee80211_sta_block_awake(hw, sta, true);
1721 break;
1722
1723 case STA_NOTIFY_AWAKE:
1724 sta_info->sleeping = false;
1725 break;
1726 }
1727}
1728
1729static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1730{
1731 struct ar9170 *ar = hw->priv;
1732
1733 return !!atomic_read(&ar->tx_total_queued);
1734}
1735
1736static const struct ieee80211_ops carl9170_ops = {
1737 .start = carl9170_op_start,
1738 .stop = carl9170_op_stop,
1739 .tx = carl9170_op_tx,
1740 .flush = carl9170_op_flush,
1741 .add_interface = carl9170_op_add_interface,
1742 .remove_interface = carl9170_op_remove_interface,
1743 .config = carl9170_op_config,
1744 .prepare_multicast = carl9170_op_prepare_multicast,
1745 .configure_filter = carl9170_op_configure_filter,
1746 .conf_tx = carl9170_op_conf_tx,
1747 .bss_info_changed = carl9170_op_bss_info_changed,
1748 .get_tsf = carl9170_op_get_tsf,
1749 .set_key = carl9170_op_set_key,
1750 .sta_add = carl9170_op_sta_add,
1751 .sta_remove = carl9170_op_sta_remove,
1752 .sta_notify = carl9170_op_sta_notify,
1753 .get_survey = carl9170_op_get_survey,
1754 .get_stats = carl9170_op_get_stats,
1755 .ampdu_action = carl9170_op_ampdu_action,
1756 .tx_frames_pending = carl9170_tx_frames_pending,
1757};
1758
1759void *carl9170_alloc(size_t priv_size)
1760{
1761 struct ieee80211_hw *hw;
1762 struct ar9170 *ar;
1763 struct sk_buff *skb;
1764 int i;
1765
1766 /*
1767 * this buffer is used for rx stream reconstruction.
1768 * Under heavy load this device (or the transport layer?)
1769 * tends to split the streams into separate rx descriptors.
1770 */
1771
1772 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1773 if (!skb)
1774 goto err_nomem;
1775
1776 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1777 if (!hw)
1778 goto err_nomem;
1779
1780 ar = hw->priv;
1781 ar->hw = hw;
1782 ar->rx_failover = skb;
1783
1784 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1785 ar->rx_has_plcp = false;
1786
1787 /*
1788 * Here's a hidden pitfall!
1789 *
1790 * All 4 AC queues work perfectly well under _legacy_ operation.
1791 * However as soon as aggregation is enabled, the traffic flow
1792 * gets very bumpy. Therefore we have to _switch_ to a
1793 * software AC with a single HW queue.
1794 */
1795 hw->queues = __AR9170_NUM_TXQ;
1796
1797 mutex_init(&ar->mutex);
1798 spin_lock_init(&ar->beacon_lock);
1799 spin_lock_init(&ar->cmd_lock);
1800 spin_lock_init(&ar->tx_stats_lock);
1801 spin_lock_init(&ar->tx_ampdu_list_lock);
1802 spin_lock_init(&ar->mem_lock);
1803 spin_lock_init(&ar->state_lock);
1804 atomic_set(&ar->pending_restarts, 0);
1805 ar->vifs = 0;
1806 for (i = 0; i < ar->hw->queues; i++) {
1807 skb_queue_head_init(&ar->tx_status[i]);
1808 skb_queue_head_init(&ar->tx_pending[i]);
1809
1810 INIT_LIST_HEAD(&ar->bar_list[i]);
1811 spin_lock_init(&ar->bar_list_lock[i]);
1812 }
1813 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1814 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1815 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1816 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1817 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1818 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1819 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1820 rcu_assign_pointer(ar->tx_ampdu_iter,
1821 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1822
1823 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1824 INIT_LIST_HEAD(&ar->vif_list);
1825 init_completion(&ar->tx_flush);
1826
1827 /* firmware decides which modes we support */
1828 hw->wiphy->interface_modes = 0;
1829
1830 ieee80211_hw_set(hw, RX_INCLUDES_FCS);
1831 ieee80211_hw_set(hw, MFP_CAPABLE);
1832 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
1833 ieee80211_hw_set(hw, SUPPORTS_PS);
1834 ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
1835 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
1836 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
1837 ieee80211_hw_set(hw, SIGNAL_DBM);
1838 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
1839
1840 if (!modparam_noht) {
1841 /*
1842 * see the comment above, why we allow the user
1843 * to disable HT by a module parameter.
1844 */
1845 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
1846 }
1847
1848 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1849 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1850 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1851
1852 hw->max_rates = CARL9170_TX_MAX_RATES;
1853 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1854
1855 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1856 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1857
1858 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1859
1860 return ar;
1861
1862err_nomem:
1863 kfree_skb(skb);
1864 return ERR_PTR(-ENOMEM);
1865}
1866
1867static int carl9170_read_eeprom(struct ar9170 *ar)
1868{
1869#define RW 8 /* number of words to read at once */
1870#define RB (sizeof(u32) * RW)
1871 u8 *eeprom = (void *)&ar->eeprom;
1872 __le32 offsets[RW];
1873 int i, j, err;
1874
1875 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1876
1877 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1878#ifndef __CHECKER__
1879 /* don't want to handle trailing remains */
1880 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1881#endif
1882
1883 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1884 for (j = 0; j < RW; j++)
1885 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1886 RB * i + 4 * j);
1887
1888 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1889 RB, (u8 *) &offsets,
1890 RB, eeprom + RB * i);
1891 if (err)
1892 return err;
1893 }
1894
1895#undef RW
1896#undef RB
1897 return 0;
1898}
1899
1900static int carl9170_parse_eeprom(struct ar9170 *ar)
1901{
1902 struct ath_regulatory *regulatory = &ar->common.regulatory;
1903 unsigned int rx_streams, tx_streams, tx_params = 0;
1904 int bands = 0;
1905 int chans = 0;
1906
1907 if (ar->eeprom.length == cpu_to_le16(0xffff))
1908 return -ENODATA;
1909
1910 rx_streams = hweight8(ar->eeprom.rx_mask);
1911 tx_streams = hweight8(ar->eeprom.tx_mask);
1912
1913 if (rx_streams != tx_streams) {
1914 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1915
1916 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1917 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1918
1919 tx_params = (tx_streams - 1) <<
1920 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1921
1922 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1923 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1924 }
1925
1926 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1927 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
1928 &carl9170_band_2GHz;
1929 chans += carl9170_band_2GHz.n_channels;
1930 bands++;
1931 }
1932 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1933 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
1934 &carl9170_band_5GHz;
1935 chans += carl9170_band_5GHz.n_channels;
1936 bands++;
1937 }
1938
1939 if (!bands)
1940 return -EINVAL;
1941
1942 ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
1943 if (!ar->survey)
1944 return -ENOMEM;
1945 ar->num_channels = chans;
1946
1947 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1948
1949 /* second part of wiphy init */
1950 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1951
1952 return 0;
1953}
1954
1955static void carl9170_reg_notifier(struct wiphy *wiphy,
1956 struct regulatory_request *request)
1957{
1958 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1959 struct ar9170 *ar = hw->priv;
1960
1961 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1962}
1963
1964int carl9170_register(struct ar9170 *ar)
1965{
1966 struct ath_regulatory *regulatory = &ar->common.regulatory;
1967 int err = 0, i;
1968
1969 if (WARN_ON(ar->mem_bitmap))
1970 return -EINVAL;
1971
1972 ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG),
1973 sizeof(unsigned long),
1974 GFP_KERNEL);
1975
1976 if (!ar->mem_bitmap)
1977 return -ENOMEM;
1978
1979 /* try to read EEPROM, init MAC addr */
1980 err = carl9170_read_eeprom(ar);
1981 if (err)
1982 return err;
1983
1984 err = carl9170_parse_eeprom(ar);
1985 if (err)
1986 return err;
1987
1988 err = ath_regd_init(regulatory, ar->hw->wiphy,
1989 carl9170_reg_notifier);
1990 if (err)
1991 return err;
1992
1993 if (modparam_noht) {
1994 carl9170_band_2GHz.ht_cap.ht_supported = false;
1995 carl9170_band_5GHz.ht_cap.ht_supported = false;
1996 }
1997
1998 for (i = 0; i < ar->fw.vif_num; i++) {
1999 ar->vif_priv[i].id = i;
2000 ar->vif_priv[i].vif = NULL;
2001 }
2002
2003 err = ieee80211_register_hw(ar->hw);
2004 if (err)
2005 return err;
2006
2007 /* mac80211 interface is now registered */
2008 ar->registered = true;
2009
2010 if (!ath_is_world_regd(regulatory))
2011 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2012
2013#ifdef CONFIG_CARL9170_DEBUGFS
2014 carl9170_debugfs_register(ar);
2015#endif /* CONFIG_CARL9170_DEBUGFS */
2016
2017 err = carl9170_led_init(ar);
2018 if (err)
2019 goto err_unreg;
2020
2021#ifdef CONFIG_CARL9170_LEDS
2022 err = carl9170_led_register(ar);
2023 if (err)
2024 goto err_unreg;
2025#endif /* CONFIG_CARL9170_LEDS */
2026
2027#ifdef CONFIG_CARL9170_WPC
2028 err = carl9170_register_wps_button(ar);
2029 if (err)
2030 goto err_unreg;
2031#endif /* CONFIG_CARL9170_WPC */
2032
2033#ifdef CONFIG_CARL9170_HWRNG
2034 err = carl9170_register_hwrng(ar);
2035 if (err)
2036 goto err_unreg;
2037#endif /* CONFIG_CARL9170_HWRNG */
2038
2039 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2040 wiphy_name(ar->hw->wiphy));
2041
2042 return 0;
2043
2044err_unreg:
2045 carl9170_unregister(ar);
2046 return err;
2047}
2048
2049void carl9170_unregister(struct ar9170 *ar)
2050{
2051 if (!ar->registered)
2052 return;
2053
2054 ar->registered = false;
2055
2056#ifdef CONFIG_CARL9170_LEDS
2057 carl9170_led_unregister(ar);
2058#endif /* CONFIG_CARL9170_LEDS */
2059
2060#ifdef CONFIG_CARL9170_DEBUGFS
2061 carl9170_debugfs_unregister(ar);
2062#endif /* CONFIG_CARL9170_DEBUGFS */
2063
2064#ifdef CONFIG_CARL9170_WPC
2065 if (ar->wps.pbc) {
2066 input_unregister_device(ar->wps.pbc);
2067 ar->wps.pbc = NULL;
2068 }
2069#endif /* CONFIG_CARL9170_WPC */
2070
2071#ifdef CONFIG_CARL9170_HWRNG
2072 carl9170_unregister_hwrng(ar);
2073#endif /* CONFIG_CARL9170_HWRNG */
2074
2075 carl9170_cancel_worker(ar);
2076 cancel_work_sync(&ar->restart_work);
2077
2078 ieee80211_unregister_hw(ar->hw);
2079}
2080
2081void carl9170_free(struct ar9170 *ar)
2082{
2083 WARN_ON(ar->registered);
2084 WARN_ON(IS_INITIALIZED(ar));
2085
2086 kfree_skb(ar->rx_failover);
2087 ar->rx_failover = NULL;
2088
2089 kfree(ar->mem_bitmap);
2090 ar->mem_bitmap = NULL;
2091
2092 kfree(ar->survey);
2093 ar->survey = NULL;
2094
2095 mutex_destroy(&ar->mutex);
2096
2097 ieee80211_free_hw(ar->hw);
2098}
1/*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/slab.h>
41#include <linux/module.h>
42#include <linux/etherdevice.h>
43#include <linux/random.h>
44#include <net/mac80211.h>
45#include <net/cfg80211.h>
46#include "hw.h"
47#include "carl9170.h"
48#include "cmd.h"
49
50static bool modparam_nohwcrypt;
51module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
52MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
53
54int modparam_noht;
55module_param_named(noht, modparam_noht, int, S_IRUGO);
56MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
57
58#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
59 .bitrate = (_bitrate), \
60 .flags = (_flags), \
61 .hw_value = (_hw_rate) | (_txpidx) << 4, \
62}
63
64struct ieee80211_rate __carl9170_ratetable[] = {
65 RATE(10, 0, 0, 0),
66 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
67 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(60, 0xb, 0, 0),
70 RATE(90, 0xf, 0, 0),
71 RATE(120, 0xa, 0, 0),
72 RATE(180, 0xe, 0, 0),
73 RATE(240, 0x9, 0, 0),
74 RATE(360, 0xd, 1, 0),
75 RATE(480, 0x8, 2, 0),
76 RATE(540, 0xc, 3, 0),
77};
78#undef RATE
79
80#define carl9170_g_ratetable (__carl9170_ratetable + 0)
81#define carl9170_g_ratetable_size 12
82#define carl9170_a_ratetable (__carl9170_ratetable + 4)
83#define carl9170_a_ratetable_size 8
84
85/*
86 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
87 * array in phy.c so that we don't have to do frequency lookups!
88 */
89#define CHAN(_freq, _idx) { \
90 .center_freq = (_freq), \
91 .hw_value = (_idx), \
92 .max_power = 18, /* XXX */ \
93}
94
95static struct ieee80211_channel carl9170_2ghz_chantable[] = {
96 CHAN(2412, 0),
97 CHAN(2417, 1),
98 CHAN(2422, 2),
99 CHAN(2427, 3),
100 CHAN(2432, 4),
101 CHAN(2437, 5),
102 CHAN(2442, 6),
103 CHAN(2447, 7),
104 CHAN(2452, 8),
105 CHAN(2457, 9),
106 CHAN(2462, 10),
107 CHAN(2467, 11),
108 CHAN(2472, 12),
109 CHAN(2484, 13),
110};
111
112static struct ieee80211_channel carl9170_5ghz_chantable[] = {
113 CHAN(4920, 14),
114 CHAN(4940, 15),
115 CHAN(4960, 16),
116 CHAN(4980, 17),
117 CHAN(5040, 18),
118 CHAN(5060, 19),
119 CHAN(5080, 20),
120 CHAN(5180, 21),
121 CHAN(5200, 22),
122 CHAN(5220, 23),
123 CHAN(5240, 24),
124 CHAN(5260, 25),
125 CHAN(5280, 26),
126 CHAN(5300, 27),
127 CHAN(5320, 28),
128 CHAN(5500, 29),
129 CHAN(5520, 30),
130 CHAN(5540, 31),
131 CHAN(5560, 32),
132 CHAN(5580, 33),
133 CHAN(5600, 34),
134 CHAN(5620, 35),
135 CHAN(5640, 36),
136 CHAN(5660, 37),
137 CHAN(5680, 38),
138 CHAN(5700, 39),
139 CHAN(5745, 40),
140 CHAN(5765, 41),
141 CHAN(5785, 42),
142 CHAN(5805, 43),
143 CHAN(5825, 44),
144 CHAN(5170, 45),
145 CHAN(5190, 46),
146 CHAN(5210, 47),
147 CHAN(5230, 48),
148};
149#undef CHAN
150
151#define CARL9170_HT_CAP \
152{ \
153 .ht_supported = true, \
154 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
155 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
156 IEEE80211_HT_CAP_SGI_40 | \
157 IEEE80211_HT_CAP_DSSSCCK40 | \
158 IEEE80211_HT_CAP_SM_PS, \
159 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
160 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
161 .mcs = { \
162 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
163 .rx_highest = cpu_to_le16(300), \
164 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
165 }, \
166}
167
168static struct ieee80211_supported_band carl9170_band_2GHz = {
169 .channels = carl9170_2ghz_chantable,
170 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
171 .bitrates = carl9170_g_ratetable,
172 .n_bitrates = carl9170_g_ratetable_size,
173 .ht_cap = CARL9170_HT_CAP,
174};
175
176static struct ieee80211_supported_band carl9170_band_5GHz = {
177 .channels = carl9170_5ghz_chantable,
178 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
179 .bitrates = carl9170_a_ratetable,
180 .n_bitrates = carl9170_a_ratetable_size,
181 .ht_cap = CARL9170_HT_CAP,
182};
183
184static void carl9170_ampdu_gc(struct ar9170 *ar)
185{
186 struct carl9170_sta_tid *tid_info;
187 LIST_HEAD(tid_gc);
188
189 rcu_read_lock();
190 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
191 spin_lock_bh(&ar->tx_ampdu_list_lock);
192 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
193 tid_info->state = CARL9170_TID_STATE_KILLED;
194 list_del_rcu(&tid_info->list);
195 ar->tx_ampdu_list_len--;
196 list_add_tail(&tid_info->tmp_list, &tid_gc);
197 }
198 spin_unlock_bh(&ar->tx_ampdu_list_lock);
199
200 }
201 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
202 rcu_read_unlock();
203
204 synchronize_rcu();
205
206 while (!list_empty(&tid_gc)) {
207 struct sk_buff *skb;
208 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
209 tmp_list);
210
211 while ((skb = __skb_dequeue(&tid_info->queue)))
212 carl9170_tx_status(ar, skb, false);
213
214 list_del_init(&tid_info->tmp_list);
215 kfree(tid_info);
216 }
217}
218
219static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
220{
221 if (drop_queued) {
222 int i;
223
224 /*
225 * We can only drop frames which have not been uploaded
226 * to the device yet.
227 */
228
229 for (i = 0; i < ar->hw->queues; i++) {
230 struct sk_buff *skb;
231
232 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
233 struct ieee80211_tx_info *info;
234
235 info = IEEE80211_SKB_CB(skb);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 atomic_dec(&ar->tx_ampdu_upload);
238
239 carl9170_tx_status(ar, skb, false);
240 }
241 }
242 }
243
244 /* Wait for all other outstanding frames to timeout. */
245 if (atomic_read(&ar->tx_total_queued))
246 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
247}
248
249static void carl9170_flush_ba(struct ar9170 *ar)
250{
251 struct sk_buff_head free;
252 struct carl9170_sta_tid *tid_info;
253 struct sk_buff *skb;
254
255 __skb_queue_head_init(&free);
256
257 rcu_read_lock();
258 spin_lock_bh(&ar->tx_ampdu_list_lock);
259 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
260 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
261 tid_info->state = CARL9170_TID_STATE_SUSPEND;
262
263 spin_lock(&tid_info->lock);
264 while ((skb = __skb_dequeue(&tid_info->queue)))
265 __skb_queue_tail(&free, skb);
266 spin_unlock(&tid_info->lock);
267 }
268 }
269 spin_unlock_bh(&ar->tx_ampdu_list_lock);
270 rcu_read_unlock();
271
272 while ((skb = __skb_dequeue(&free)))
273 carl9170_tx_status(ar, skb, false);
274}
275
276static void carl9170_zap_queues(struct ar9170 *ar)
277{
278 struct carl9170_vif_info *cvif;
279 unsigned int i;
280
281 carl9170_ampdu_gc(ar);
282
283 carl9170_flush_ba(ar);
284 carl9170_flush(ar, true);
285
286 for (i = 0; i < ar->hw->queues; i++) {
287 spin_lock_bh(&ar->tx_status[i].lock);
288 while (!skb_queue_empty(&ar->tx_status[i])) {
289 struct sk_buff *skb;
290
291 skb = skb_peek(&ar->tx_status[i]);
292 carl9170_tx_get_skb(skb);
293 spin_unlock_bh(&ar->tx_status[i].lock);
294 carl9170_tx_drop(ar, skb);
295 spin_lock_bh(&ar->tx_status[i].lock);
296 carl9170_tx_put_skb(skb);
297 }
298 spin_unlock_bh(&ar->tx_status[i].lock);
299 }
300
301 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
304
305 /* reinitialize queues statistics */
306 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
307 for (i = 0; i < ar->hw->queues; i++)
308 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
309
310 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
311 ar->mem_bitmap[i] = 0;
312
313 rcu_read_lock();
314 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
315 spin_lock_bh(&ar->beacon_lock);
316 dev_kfree_skb_any(cvif->beacon);
317 cvif->beacon = NULL;
318 spin_unlock_bh(&ar->beacon_lock);
319 }
320 rcu_read_unlock();
321
322 atomic_set(&ar->tx_ampdu_upload, 0);
323 atomic_set(&ar->tx_ampdu_scheduler, 0);
324 atomic_set(&ar->tx_total_pending, 0);
325 atomic_set(&ar->tx_total_queued, 0);
326 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
327}
328
329#define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
330do { \
331 queue.aifs = ai_fs; \
332 queue.cw_min = cwmin; \
333 queue.cw_max = cwmax; \
334 queue.txop = _txop; \
335} while (0)
336
337static int carl9170_op_start(struct ieee80211_hw *hw)
338{
339 struct ar9170 *ar = hw->priv;
340 int err, i;
341
342 mutex_lock(&ar->mutex);
343
344 carl9170_zap_queues(ar);
345
346 /* reset QoS defaults */
347 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
352
353 ar->current_factor = ar->current_density = -1;
354 /* "The first key is unique." */
355 ar->usedkeys = 1;
356 ar->filter_state = 0;
357 ar->ps.last_action = jiffies;
358 ar->ps.last_slept = jiffies;
359 ar->erp_mode = CARL9170_ERP_AUTO;
360
361 /* Set "disable hw crypto offload" whenever the module parameter
362 * nohwcrypt is true or if the firmware does not support it.
363 */
364 ar->disable_offload = modparam_nohwcrypt |
365 ar->fw.disable_offload_fw;
366 ar->rx_software_decryption = ar->disable_offload;
367
368 for (i = 0; i < ar->hw->queues; i++) {
369 ar->queue_stop_timeout[i] = jiffies;
370 ar->max_queue_stop_timeout[i] = 0;
371 }
372
373 atomic_set(&ar->mem_allocs, 0);
374
375 err = carl9170_usb_open(ar);
376 if (err)
377 goto out;
378
379 err = carl9170_init_mac(ar);
380 if (err)
381 goto out;
382
383 err = carl9170_set_qos(ar);
384 if (err)
385 goto out;
386
387 if (ar->fw.rx_filter) {
388 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
389 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
390 if (err)
391 goto out;
392 }
393
394 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
395 AR9170_DMA_TRIGGER_RXQ);
396 if (err)
397 goto out;
398
399 /* Clear key-cache */
400 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
401 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
402 0, NULL, 0);
403 if (err)
404 goto out;
405
406 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
407 1, NULL, 0);
408 if (err)
409 goto out;
410
411 if (i < AR9170_CAM_MAX_USER) {
412 err = carl9170_disable_key(ar, i);
413 if (err)
414 goto out;
415 }
416 }
417
418 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
419
420 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
421 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
422
423 ieee80211_wake_queues(ar->hw);
424 err = 0;
425
426out:
427 mutex_unlock(&ar->mutex);
428 return err;
429}
430
431static void carl9170_cancel_worker(struct ar9170 *ar)
432{
433 cancel_delayed_work_sync(&ar->stat_work);
434 cancel_delayed_work_sync(&ar->tx_janitor);
435#ifdef CONFIG_CARL9170_LEDS
436 cancel_delayed_work_sync(&ar->led_work);
437#endif /* CONFIG_CARL9170_LEDS */
438 cancel_work_sync(&ar->ps_work);
439 cancel_work_sync(&ar->ping_work);
440 cancel_work_sync(&ar->ampdu_work);
441}
442
443static void carl9170_op_stop(struct ieee80211_hw *hw)
444{
445 struct ar9170 *ar = hw->priv;
446
447 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
448
449 ieee80211_stop_queues(ar->hw);
450
451 mutex_lock(&ar->mutex);
452 if (IS_ACCEPTING_CMD(ar)) {
453 RCU_INIT_POINTER(ar->beacon_iter, NULL);
454
455 carl9170_led_set_state(ar, 0);
456
457 /* stop DMA */
458 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
459 carl9170_usb_stop(ar);
460 }
461
462 carl9170_zap_queues(ar);
463 mutex_unlock(&ar->mutex);
464
465 carl9170_cancel_worker(ar);
466}
467
468static void carl9170_restart_work(struct work_struct *work)
469{
470 struct ar9170 *ar = container_of(work, struct ar9170,
471 restart_work);
472 int err = -EIO;
473
474 ar->usedkeys = 0;
475 ar->filter_state = 0;
476 carl9170_cancel_worker(ar);
477
478 mutex_lock(&ar->mutex);
479 if (!ar->force_usb_reset) {
480 err = carl9170_usb_restart(ar);
481 if (net_ratelimit()) {
482 if (err)
483 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
484 else
485 dev_info(&ar->udev->dev, "device restarted successfully.\n");
486 }
487 }
488 carl9170_zap_queues(ar);
489 mutex_unlock(&ar->mutex);
490
491 if (!err && !ar->force_usb_reset) {
492 ar->restart_counter++;
493 atomic_set(&ar->pending_restarts, 0);
494
495 ieee80211_restart_hw(ar->hw);
496 } else {
497 /*
498 * The reset was unsuccessful and the device seems to
499 * be dead. But there's still one option: a low-level
500 * usb subsystem reset...
501 */
502
503 carl9170_usb_reset(ar);
504 }
505}
506
507void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
508{
509 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
510
511 /*
512 * Sometimes, an error can trigger several different reset events.
513 * By ignoring these *surplus* reset events, the device won't be
514 * killed again, right after it has recovered.
515 */
516 if (atomic_inc_return(&ar->pending_restarts) > 1) {
517 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
518 return;
519 }
520
521 ieee80211_stop_queues(ar->hw);
522
523 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
524
525 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
526 !WARN_ON(r >= __CARL9170_RR_LAST))
527 ar->last_reason = r;
528
529 if (!ar->registered)
530 return;
531
532 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
533 ar->force_usb_reset = true;
534
535 ieee80211_queue_work(ar->hw, &ar->restart_work);
536
537 /*
538 * At this point, the device instance might have vanished/disabled.
539 * So, don't put any code which access the ar9170 struct
540 * without proper protection.
541 */
542}
543
544static void carl9170_ping_work(struct work_struct *work)
545{
546 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
547 int err;
548
549 if (!IS_STARTED(ar))
550 return;
551
552 mutex_lock(&ar->mutex);
553 err = carl9170_echo_test(ar, 0xdeadbeef);
554 if (err)
555 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
556 mutex_unlock(&ar->mutex);
557}
558
559static int carl9170_init_interface(struct ar9170 *ar,
560 struct ieee80211_vif *vif)
561{
562 struct ath_common *common = &ar->common;
563 int err;
564
565 if (!vif) {
566 WARN_ON_ONCE(IS_STARTED(ar));
567 return 0;
568 }
569
570 memcpy(common->macaddr, vif->addr, ETH_ALEN);
571
572 /* We have to fall back to software crypto, whenever
573 * the user choose to participates in an IBSS. HW
574 * offload for IBSS RSN is not supported by this driver.
575 *
576 * NOTE: If the previous main interface has already
577 * disabled hw crypto offload, we have to keep this
578 * previous disable_offload setting as it was.
579 * Altough ideally, we should notify mac80211 and tell
580 * it to forget about any HW crypto offload for now.
581 */
582 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
583 (vif->type != NL80211_IFTYPE_AP));
584
585 /* While the driver supports HW offload in a single
586 * P2P client configuration, it doesn't support HW
587 * offload in the favourit, concurrent P2P GO+CLIENT
588 * configuration. Hence, HW offload will always be
589 * disabled for P2P.
590 */
591 ar->disable_offload |= vif->p2p;
592
593 ar->rx_software_decryption = ar->disable_offload;
594
595 err = carl9170_set_operating_mode(ar);
596 return err;
597}
598
599static int carl9170_op_add_interface(struct ieee80211_hw *hw,
600 struct ieee80211_vif *vif)
601{
602 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
603 struct ieee80211_vif *main_vif, *old_main = NULL;
604 struct ar9170 *ar = hw->priv;
605 int vif_id = -1, err = 0;
606
607 mutex_lock(&ar->mutex);
608 rcu_read_lock();
609 if (vif_priv->active) {
610 /*
611 * Skip the interface structure initialization,
612 * if the vif survived the _restart call.
613 */
614 vif_id = vif_priv->id;
615 vif_priv->enable_beacon = false;
616
617 spin_lock_bh(&ar->beacon_lock);
618 dev_kfree_skb_any(vif_priv->beacon);
619 vif_priv->beacon = NULL;
620 spin_unlock_bh(&ar->beacon_lock);
621
622 goto init;
623 }
624
625 /* Because the AR9170 HW's MAC doesn't provide full support for
626 * multiple, independent interfaces [of different operation modes].
627 * We have to select ONE main interface [main mode of HW], but we
628 * can have multiple slaves [AKA: entry in the ACK-table].
629 *
630 * The first (from HEAD/TOP) interface in the ar->vif_list is
631 * always the main intf. All following intfs in this list
632 * are considered to be slave intfs.
633 */
634 main_vif = carl9170_get_main_vif(ar);
635
636 if (main_vif) {
637 switch (main_vif->type) {
638 case NL80211_IFTYPE_STATION:
639 if (vif->type == NL80211_IFTYPE_STATION)
640 break;
641
642 /* P2P GO [master] use-case
643 * Because the P2P GO station is selected dynamically
644 * by all participating peers of a WIFI Direct network,
645 * the driver has be able to change the main interface
646 * operating mode on the fly.
647 */
648 if (main_vif->p2p && vif->p2p &&
649 vif->type == NL80211_IFTYPE_AP) {
650 old_main = main_vif;
651 break;
652 }
653
654 err = -EBUSY;
655 rcu_read_unlock();
656
657 goto unlock;
658
659 case NL80211_IFTYPE_MESH_POINT:
660 case NL80211_IFTYPE_AP:
661 if ((vif->type == NL80211_IFTYPE_STATION) ||
662 (vif->type == NL80211_IFTYPE_WDS) ||
663 (vif->type == NL80211_IFTYPE_AP) ||
664 (vif->type == NL80211_IFTYPE_MESH_POINT))
665 break;
666
667 err = -EBUSY;
668 rcu_read_unlock();
669 goto unlock;
670
671 default:
672 rcu_read_unlock();
673 goto unlock;
674 }
675 }
676
677 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
678
679 if (vif_id < 0) {
680 rcu_read_unlock();
681
682 err = -ENOSPC;
683 goto unlock;
684 }
685
686 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
687
688 vif_priv->active = true;
689 vif_priv->id = vif_id;
690 vif_priv->enable_beacon = false;
691 ar->vifs++;
692 if (old_main) {
693 /* We end up in here, if the main interface is being replaced.
694 * Put the new main interface at the HEAD of the list and the
695 * previous inteface will automatically become second in line.
696 */
697 list_add_rcu(&vif_priv->list, &ar->vif_list);
698 } else {
699 /* Add new inteface. If the list is empty, it will become the
700 * main inteface, otherwise it will be slave.
701 */
702 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
703 }
704 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
705
706init:
707 main_vif = carl9170_get_main_vif(ar);
708
709 if (main_vif == vif) {
710 rcu_assign_pointer(ar->beacon_iter, vif_priv);
711 rcu_read_unlock();
712
713 if (old_main) {
714 struct carl9170_vif_info *old_main_priv =
715 (void *) old_main->drv_priv;
716 /* downgrade old main intf to slave intf.
717 * NOTE: We are no longer under rcu_read_lock.
718 * But we are still holding ar->mutex, so the
719 * vif data [id, addr] is safe.
720 */
721 err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
722 old_main->addr);
723 if (err)
724 goto unlock;
725 }
726
727 err = carl9170_init_interface(ar, vif);
728 if (err)
729 goto unlock;
730 } else {
731 rcu_read_unlock();
732 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
733
734 if (err)
735 goto unlock;
736 }
737
738 if (ar->fw.tx_seq_table) {
739 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
740 0);
741 if (err)
742 goto unlock;
743 }
744
745unlock:
746 if (err && (vif_id >= 0)) {
747 vif_priv->active = false;
748 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
749 ar->vifs--;
750 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
751 list_del_rcu(&vif_priv->list);
752 mutex_unlock(&ar->mutex);
753 synchronize_rcu();
754 } else {
755 if (ar->vifs > 1)
756 ar->ps.off_override |= PS_OFF_VIF;
757
758 mutex_unlock(&ar->mutex);
759 }
760
761 return err;
762}
763
764static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
765 struct ieee80211_vif *vif)
766{
767 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
768 struct ieee80211_vif *main_vif;
769 struct ar9170 *ar = hw->priv;
770 unsigned int id;
771
772 mutex_lock(&ar->mutex);
773
774 if (WARN_ON_ONCE(!vif_priv->active))
775 goto unlock;
776
777 ar->vifs--;
778
779 rcu_read_lock();
780 main_vif = carl9170_get_main_vif(ar);
781
782 id = vif_priv->id;
783
784 vif_priv->active = false;
785 WARN_ON(vif_priv->enable_beacon);
786 vif_priv->enable_beacon = false;
787 list_del_rcu(&vif_priv->list);
788 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
789
790 if (vif == main_vif) {
791 rcu_read_unlock();
792
793 if (ar->vifs) {
794 WARN_ON(carl9170_init_interface(ar,
795 carl9170_get_main_vif(ar)));
796 } else {
797 carl9170_set_operating_mode(ar);
798 }
799 } else {
800 rcu_read_unlock();
801
802 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
803 }
804
805 carl9170_update_beacon(ar, false);
806 carl9170_flush_cab(ar, id);
807
808 spin_lock_bh(&ar->beacon_lock);
809 dev_kfree_skb_any(vif_priv->beacon);
810 vif_priv->beacon = NULL;
811 spin_unlock_bh(&ar->beacon_lock);
812
813 bitmap_release_region(&ar->vif_bitmap, id, 0);
814
815 carl9170_set_beacon_timers(ar);
816
817 if (ar->vifs == 1)
818 ar->ps.off_override &= ~PS_OFF_VIF;
819
820unlock:
821 mutex_unlock(&ar->mutex);
822
823 synchronize_rcu();
824}
825
826void carl9170_ps_check(struct ar9170 *ar)
827{
828 ieee80211_queue_work(ar->hw, &ar->ps_work);
829}
830
831/* caller must hold ar->mutex */
832static int carl9170_ps_update(struct ar9170 *ar)
833{
834 bool ps = false;
835 int err = 0;
836
837 if (!ar->ps.off_override)
838 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
839
840 if (ps != ar->ps.state) {
841 err = carl9170_powersave(ar, ps);
842 if (err)
843 return err;
844
845 if (ar->ps.state && !ps) {
846 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
847 ar->ps.last_action);
848 }
849
850 if (ps)
851 ar->ps.last_slept = jiffies;
852
853 ar->ps.last_action = jiffies;
854 ar->ps.state = ps;
855 }
856
857 return 0;
858}
859
860static void carl9170_ps_work(struct work_struct *work)
861{
862 struct ar9170 *ar = container_of(work, struct ar9170,
863 ps_work);
864 mutex_lock(&ar->mutex);
865 if (IS_STARTED(ar))
866 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
867 mutex_unlock(&ar->mutex);
868}
869
870static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
871{
872 int err;
873
874 if (noise) {
875 err = carl9170_get_noisefloor(ar);
876 if (err)
877 return err;
878 }
879
880 if (ar->fw.hw_counters) {
881 err = carl9170_collect_tally(ar);
882 if (err)
883 return err;
884 }
885
886 if (flush)
887 memset(&ar->tally, 0, sizeof(ar->tally));
888
889 return 0;
890}
891
892static void carl9170_stat_work(struct work_struct *work)
893{
894 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
895 int err;
896
897 mutex_lock(&ar->mutex);
898 err = carl9170_update_survey(ar, false, true);
899 mutex_unlock(&ar->mutex);
900
901 if (err)
902 return;
903
904 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
905 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
906}
907
908static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
909{
910 struct ar9170 *ar = hw->priv;
911 int err = 0;
912
913 mutex_lock(&ar->mutex);
914 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
915 /* TODO */
916 err = 0;
917 }
918
919 if (changed & IEEE80211_CONF_CHANGE_PS) {
920 err = carl9170_ps_update(ar);
921 if (err)
922 goto out;
923 }
924
925 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
926 /* TODO */
927 err = 0;
928 }
929
930 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
931 enum nl80211_channel_type channel_type =
932 cfg80211_get_chandef_type(&hw->conf.chandef);
933
934 /* adjust slot time for 5 GHz */
935 err = carl9170_set_slot_time(ar);
936 if (err)
937 goto out;
938
939 err = carl9170_update_survey(ar, true, false);
940 if (err)
941 goto out;
942
943 err = carl9170_set_channel(ar, hw->conf.chandef.chan,
944 channel_type);
945 if (err)
946 goto out;
947
948 err = carl9170_update_survey(ar, false, true);
949 if (err)
950 goto out;
951
952 err = carl9170_set_dyn_sifs_ack(ar);
953 if (err)
954 goto out;
955
956 err = carl9170_set_rts_cts_rate(ar);
957 if (err)
958 goto out;
959 }
960
961 if (changed & IEEE80211_CONF_CHANGE_POWER) {
962 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
963 if (err)
964 goto out;
965 }
966
967out:
968 mutex_unlock(&ar->mutex);
969 return err;
970}
971
972static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
973 struct netdev_hw_addr_list *mc_list)
974{
975 struct netdev_hw_addr *ha;
976 u64 mchash;
977
978 /* always get broadcast frames */
979 mchash = 1ULL << (0xff >> 2);
980
981 netdev_hw_addr_list_for_each(ha, mc_list)
982 mchash |= 1ULL << (ha->addr[5] >> 2);
983
984 return mchash;
985}
986
987static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
988 unsigned int changed_flags,
989 unsigned int *new_flags,
990 u64 multicast)
991{
992 struct ar9170 *ar = hw->priv;
993
994 /* mask supported flags */
995 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
996
997 if (!IS_ACCEPTING_CMD(ar))
998 return;
999
1000 mutex_lock(&ar->mutex);
1001
1002 ar->filter_state = *new_flags;
1003 /*
1004 * We can support more by setting the sniffer bit and
1005 * then checking the error flags, later.
1006 */
1007
1008 if (*new_flags & FIF_ALLMULTI)
1009 multicast = ~0ULL;
1010
1011 if (multicast != ar->cur_mc_hash)
1012 WARN_ON(carl9170_update_multicast(ar, multicast));
1013
1014 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
1015 ar->sniffer_enabled = !!(*new_flags &
1016 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
1017
1018 WARN_ON(carl9170_set_operating_mode(ar));
1019 }
1020
1021 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1022 u32 rx_filter = 0;
1023
1024 if (!ar->fw.ba_filter)
1025 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1026
1027 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1028 rx_filter |= CARL9170_RX_FILTER_BAD;
1029
1030 if (!(*new_flags & FIF_CONTROL))
1031 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1032
1033 if (!(*new_flags & FIF_PSPOLL))
1034 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1035
1036 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
1037 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1038 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1039 }
1040
1041 WARN_ON(carl9170_rx_filter(ar, rx_filter));
1042 }
1043
1044 mutex_unlock(&ar->mutex);
1045}
1046
1047
1048static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1049 struct ieee80211_vif *vif,
1050 struct ieee80211_bss_conf *bss_conf,
1051 u32 changed)
1052{
1053 struct ar9170 *ar = hw->priv;
1054 struct ath_common *common = &ar->common;
1055 int err = 0;
1056 struct carl9170_vif_info *vif_priv;
1057 struct ieee80211_vif *main_vif;
1058
1059 mutex_lock(&ar->mutex);
1060 vif_priv = (void *) vif->drv_priv;
1061 main_vif = carl9170_get_main_vif(ar);
1062 if (WARN_ON(!main_vif))
1063 goto out;
1064
1065 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1066 struct carl9170_vif_info *iter;
1067 int i = 0;
1068
1069 vif_priv->enable_beacon = bss_conf->enable_beacon;
1070 rcu_read_lock();
1071 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1072 if (iter->active && iter->enable_beacon)
1073 i++;
1074
1075 }
1076 rcu_read_unlock();
1077
1078 ar->beacon_enabled = i;
1079 }
1080
1081 if (changed & BSS_CHANGED_BEACON) {
1082 err = carl9170_update_beacon(ar, false);
1083 if (err)
1084 goto out;
1085 }
1086
1087 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1088 BSS_CHANGED_BEACON_INT)) {
1089
1090 if (main_vif != vif) {
1091 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1092 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1093 }
1094
1095 /*
1096 * Therefore a hard limit for the broadcast traffic should
1097 * prevent false alarms.
1098 */
1099 if (vif->type != NL80211_IFTYPE_STATION &&
1100 (bss_conf->beacon_int * bss_conf->dtim_period >=
1101 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1102 err = -EINVAL;
1103 goto out;
1104 }
1105
1106 err = carl9170_set_beacon_timers(ar);
1107 if (err)
1108 goto out;
1109 }
1110
1111 if (changed & BSS_CHANGED_HT) {
1112 /* TODO */
1113 err = 0;
1114 if (err)
1115 goto out;
1116 }
1117
1118 if (main_vif != vif)
1119 goto out;
1120
1121 /*
1122 * The following settings can only be changed by the
1123 * master interface.
1124 */
1125
1126 if (changed & BSS_CHANGED_BSSID) {
1127 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1128 err = carl9170_set_operating_mode(ar);
1129 if (err)
1130 goto out;
1131 }
1132
1133 if (changed & BSS_CHANGED_ASSOC) {
1134 ar->common.curaid = bss_conf->aid;
1135 err = carl9170_set_beacon_timers(ar);
1136 if (err)
1137 goto out;
1138 }
1139
1140 if (changed & BSS_CHANGED_ERP_SLOT) {
1141 err = carl9170_set_slot_time(ar);
1142 if (err)
1143 goto out;
1144 }
1145
1146 if (changed & BSS_CHANGED_BASIC_RATES) {
1147 err = carl9170_set_mac_rates(ar);
1148 if (err)
1149 goto out;
1150 }
1151
1152out:
1153 WARN_ON_ONCE(err && IS_STARTED(ar));
1154 mutex_unlock(&ar->mutex);
1155}
1156
1157static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1158 struct ieee80211_vif *vif)
1159{
1160 struct ar9170 *ar = hw->priv;
1161 struct carl9170_tsf_rsp tsf;
1162 int err;
1163
1164 mutex_lock(&ar->mutex);
1165 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1166 0, NULL, sizeof(tsf), &tsf);
1167 mutex_unlock(&ar->mutex);
1168 if (WARN_ON(err))
1169 return 0;
1170
1171 return le64_to_cpu(tsf.tsf_64);
1172}
1173
1174static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1175 struct ieee80211_vif *vif,
1176 struct ieee80211_sta *sta,
1177 struct ieee80211_key_conf *key)
1178{
1179 struct ar9170 *ar = hw->priv;
1180 int err = 0, i;
1181 u8 ktype;
1182
1183 if (ar->disable_offload || !vif)
1184 return -EOPNOTSUPP;
1185
1186 /* Fall back to software encryption whenever the driver is connected
1187 * to more than one network.
1188 *
1189 * This is very unfortunate, because some machines cannot handle
1190 * the high througput speed in 802.11n networks.
1191 */
1192
1193 if (!is_main_vif(ar, vif)) {
1194 mutex_lock(&ar->mutex);
1195 goto err_softw;
1196 }
1197
1198 /*
1199 * While the hardware supports *catch-all* key, for offloading
1200 * group-key en-/de-cryption. The way of how the hardware
1201 * decides which keyId maps to which key, remains a mystery...
1202 */
1203 if ((vif->type != NL80211_IFTYPE_STATION &&
1204 vif->type != NL80211_IFTYPE_ADHOC) &&
1205 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1206 return -EOPNOTSUPP;
1207
1208 switch (key->cipher) {
1209 case WLAN_CIPHER_SUITE_WEP40:
1210 ktype = AR9170_ENC_ALG_WEP64;
1211 break;
1212 case WLAN_CIPHER_SUITE_WEP104:
1213 ktype = AR9170_ENC_ALG_WEP128;
1214 break;
1215 case WLAN_CIPHER_SUITE_TKIP:
1216 ktype = AR9170_ENC_ALG_TKIP;
1217 break;
1218 case WLAN_CIPHER_SUITE_CCMP:
1219 ktype = AR9170_ENC_ALG_AESCCMP;
1220 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1221 break;
1222 default:
1223 return -EOPNOTSUPP;
1224 }
1225
1226 mutex_lock(&ar->mutex);
1227 if (cmd == SET_KEY) {
1228 if (!IS_STARTED(ar)) {
1229 err = -EOPNOTSUPP;
1230 goto out;
1231 }
1232
1233 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1234 sta = NULL;
1235
1236 i = 64 + key->keyidx;
1237 } else {
1238 for (i = 0; i < 64; i++)
1239 if (!(ar->usedkeys & BIT(i)))
1240 break;
1241 if (i == 64)
1242 goto err_softw;
1243 }
1244
1245 key->hw_key_idx = i;
1246
1247 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1248 ktype, 0, key->key,
1249 min_t(u8, 16, key->keylen));
1250 if (err)
1251 goto out;
1252
1253 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1254 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1255 NULL, ktype, 1,
1256 key->key + 16, 16);
1257 if (err)
1258 goto out;
1259
1260 /*
1261 * hardware is not capable generating MMIC
1262 * of fragmented frames!
1263 */
1264 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1265 }
1266
1267 if (i < 64)
1268 ar->usedkeys |= BIT(i);
1269
1270 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1271 } else {
1272 if (!IS_STARTED(ar)) {
1273 /* The device is gone... together with the key ;-) */
1274 err = 0;
1275 goto out;
1276 }
1277
1278 if (key->hw_key_idx < 64) {
1279 ar->usedkeys &= ~BIT(key->hw_key_idx);
1280 } else {
1281 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1282 AR9170_ENC_ALG_NONE, 0,
1283 NULL, 0);
1284 if (err)
1285 goto out;
1286
1287 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1288 err = carl9170_upload_key(ar, key->hw_key_idx,
1289 NULL,
1290 AR9170_ENC_ALG_NONE,
1291 1, NULL, 0);
1292 if (err)
1293 goto out;
1294 }
1295
1296 }
1297
1298 err = carl9170_disable_key(ar, key->hw_key_idx);
1299 if (err)
1300 goto out;
1301 }
1302
1303out:
1304 mutex_unlock(&ar->mutex);
1305 return err;
1306
1307err_softw:
1308 if (!ar->rx_software_decryption) {
1309 ar->rx_software_decryption = true;
1310 carl9170_set_operating_mode(ar);
1311 }
1312 mutex_unlock(&ar->mutex);
1313 return -ENOSPC;
1314}
1315
1316static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1317 struct ieee80211_vif *vif,
1318 struct ieee80211_sta *sta)
1319{
1320 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1321 unsigned int i;
1322
1323 atomic_set(&sta_info->pending_frames, 0);
1324
1325 if (sta->ht_cap.ht_supported) {
1326 if (sta->ht_cap.ampdu_density > 6) {
1327 /*
1328 * HW does support 16us AMPDU density.
1329 * No HT-Xmit for station.
1330 */
1331
1332 return 0;
1333 }
1334
1335 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1336 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1337
1338 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1339 sta_info->ht_sta = true;
1340 }
1341
1342 return 0;
1343}
1344
1345static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1346 struct ieee80211_vif *vif,
1347 struct ieee80211_sta *sta)
1348{
1349 struct ar9170 *ar = hw->priv;
1350 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1351 unsigned int i;
1352 bool cleanup = false;
1353
1354 if (sta->ht_cap.ht_supported) {
1355
1356 sta_info->ht_sta = false;
1357
1358 rcu_read_lock();
1359 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1360 struct carl9170_sta_tid *tid_info;
1361
1362 tid_info = rcu_dereference(sta_info->agg[i]);
1363 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1364
1365 if (!tid_info)
1366 continue;
1367
1368 spin_lock_bh(&ar->tx_ampdu_list_lock);
1369 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1370 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1371 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1372 cleanup = true;
1373 }
1374 rcu_read_unlock();
1375
1376 if (cleanup)
1377 carl9170_ampdu_gc(ar);
1378 }
1379
1380 return 0;
1381}
1382
1383static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1384 struct ieee80211_vif *vif, u16 queue,
1385 const struct ieee80211_tx_queue_params *param)
1386{
1387 struct ar9170 *ar = hw->priv;
1388 int ret;
1389
1390 mutex_lock(&ar->mutex);
1391 if (queue < ar->hw->queues) {
1392 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1393 ret = carl9170_set_qos(ar);
1394 } else {
1395 ret = -EINVAL;
1396 }
1397
1398 mutex_unlock(&ar->mutex);
1399 return ret;
1400}
1401
1402static void carl9170_ampdu_work(struct work_struct *work)
1403{
1404 struct ar9170 *ar = container_of(work, struct ar9170,
1405 ampdu_work);
1406
1407 if (!IS_STARTED(ar))
1408 return;
1409
1410 mutex_lock(&ar->mutex);
1411 carl9170_ampdu_gc(ar);
1412 mutex_unlock(&ar->mutex);
1413}
1414
1415static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1416 struct ieee80211_vif *vif,
1417 enum ieee80211_ampdu_mlme_action action,
1418 struct ieee80211_sta *sta,
1419 u16 tid, u16 *ssn, u8 buf_size)
1420{
1421 struct ar9170 *ar = hw->priv;
1422 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1423 struct carl9170_sta_tid *tid_info;
1424
1425 if (modparam_noht)
1426 return -EOPNOTSUPP;
1427
1428 switch (action) {
1429 case IEEE80211_AMPDU_TX_START:
1430 if (!sta_info->ht_sta)
1431 return -EOPNOTSUPP;
1432
1433 rcu_read_lock();
1434 if (rcu_dereference(sta_info->agg[tid])) {
1435 rcu_read_unlock();
1436 return -EBUSY;
1437 }
1438
1439 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1440 GFP_ATOMIC);
1441 if (!tid_info) {
1442 rcu_read_unlock();
1443 return -ENOMEM;
1444 }
1445
1446 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1447 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1448 tid_info->tid = tid;
1449 tid_info->max = sta_info->ampdu_max_len;
1450 tid_info->sta = sta;
1451 tid_info->vif = vif;
1452
1453 INIT_LIST_HEAD(&tid_info->list);
1454 INIT_LIST_HEAD(&tid_info->tmp_list);
1455 skb_queue_head_init(&tid_info->queue);
1456 spin_lock_init(&tid_info->lock);
1457
1458 spin_lock_bh(&ar->tx_ampdu_list_lock);
1459 ar->tx_ampdu_list_len++;
1460 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1461 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1462 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1463 rcu_read_unlock();
1464
1465 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1466 break;
1467
1468 case IEEE80211_AMPDU_TX_STOP_CONT:
1469 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1470 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1471 rcu_read_lock();
1472 tid_info = rcu_dereference(sta_info->agg[tid]);
1473 if (tid_info) {
1474 spin_lock_bh(&ar->tx_ampdu_list_lock);
1475 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1476 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1477 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1478 }
1479
1480 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1481 rcu_read_unlock();
1482
1483 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1484 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1485 break;
1486
1487 case IEEE80211_AMPDU_TX_OPERATIONAL:
1488 rcu_read_lock();
1489 tid_info = rcu_dereference(sta_info->agg[tid]);
1490
1491 sta_info->stats[tid].clear = true;
1492 sta_info->stats[tid].req = false;
1493
1494 if (tid_info) {
1495 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1496 tid_info->state = CARL9170_TID_STATE_IDLE;
1497 }
1498 rcu_read_unlock();
1499
1500 if (WARN_ON_ONCE(!tid_info))
1501 return -EFAULT;
1502
1503 break;
1504
1505 case IEEE80211_AMPDU_RX_START:
1506 case IEEE80211_AMPDU_RX_STOP:
1507 /* Handled by hardware */
1508 break;
1509
1510 default:
1511 return -EOPNOTSUPP;
1512 }
1513
1514 return 0;
1515}
1516
1517#ifdef CONFIG_CARL9170_WPC
1518static int carl9170_register_wps_button(struct ar9170 *ar)
1519{
1520 struct input_dev *input;
1521 int err;
1522
1523 if (!(ar->features & CARL9170_WPS_BUTTON))
1524 return 0;
1525
1526 input = input_allocate_device();
1527 if (!input)
1528 return -ENOMEM;
1529
1530 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1531 wiphy_name(ar->hw->wiphy));
1532
1533 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1534 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1535
1536 input->name = ar->wps.name;
1537 input->phys = ar->wps.phys;
1538 input->id.bustype = BUS_USB;
1539 input->dev.parent = &ar->hw->wiphy->dev;
1540
1541 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1542
1543 err = input_register_device(input);
1544 if (err) {
1545 input_free_device(input);
1546 return err;
1547 }
1548
1549 ar->wps.pbc = input;
1550 return 0;
1551}
1552#endif /* CONFIG_CARL9170_WPC */
1553
1554#ifdef CONFIG_CARL9170_HWRNG
1555static int carl9170_rng_get(struct ar9170 *ar)
1556{
1557
1558#define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1559#define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1560
1561 static const __le32 rng_load[RW] = {
1562 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1563
1564 u32 buf[RW];
1565
1566 unsigned int i, off = 0, transfer, count;
1567 int err;
1568
1569 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1570
1571 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1572 return -EAGAIN;
1573
1574 count = ARRAY_SIZE(ar->rng.cache);
1575 while (count) {
1576 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1577 RB, (u8 *) rng_load,
1578 RB, (u8 *) buf);
1579 if (err)
1580 return err;
1581
1582 transfer = min_t(unsigned int, count, RW);
1583 for (i = 0; i < transfer; i++)
1584 ar->rng.cache[off + i] = buf[i];
1585
1586 off += transfer;
1587 count -= transfer;
1588 }
1589
1590 ar->rng.cache_idx = 0;
1591
1592#undef RW
1593#undef RB
1594 return 0;
1595}
1596
1597static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1598{
1599 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1600 int ret = -EIO;
1601
1602 mutex_lock(&ar->mutex);
1603 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1604 ret = carl9170_rng_get(ar);
1605 if (ret) {
1606 mutex_unlock(&ar->mutex);
1607 return ret;
1608 }
1609 }
1610
1611 *data = ar->rng.cache[ar->rng.cache_idx++];
1612 mutex_unlock(&ar->mutex);
1613
1614 return sizeof(u16);
1615}
1616
1617static void carl9170_unregister_hwrng(struct ar9170 *ar)
1618{
1619 if (ar->rng.initialized) {
1620 hwrng_unregister(&ar->rng.rng);
1621 ar->rng.initialized = false;
1622 }
1623}
1624
1625static int carl9170_register_hwrng(struct ar9170 *ar)
1626{
1627 int err;
1628
1629 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1630 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1631 ar->rng.rng.name = ar->rng.name;
1632 ar->rng.rng.data_read = carl9170_rng_read;
1633 ar->rng.rng.priv = (unsigned long)ar;
1634
1635 if (WARN_ON(ar->rng.initialized))
1636 return -EALREADY;
1637
1638 err = hwrng_register(&ar->rng.rng);
1639 if (err) {
1640 dev_err(&ar->udev->dev, "Failed to register the random "
1641 "number generator (%d)\n", err);
1642 return err;
1643 }
1644
1645 ar->rng.initialized = true;
1646
1647 err = carl9170_rng_get(ar);
1648 if (err) {
1649 carl9170_unregister_hwrng(ar);
1650 return err;
1651 }
1652
1653 return 0;
1654}
1655#endif /* CONFIG_CARL9170_HWRNG */
1656
1657static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1658 struct survey_info *survey)
1659{
1660 struct ar9170 *ar = hw->priv;
1661 struct ieee80211_channel *chan;
1662 struct ieee80211_supported_band *band;
1663 int err, b, i;
1664
1665 chan = ar->channel;
1666 if (!chan)
1667 return -ENODEV;
1668
1669 if (idx == chan->hw_value) {
1670 mutex_lock(&ar->mutex);
1671 err = carl9170_update_survey(ar, false, true);
1672 mutex_unlock(&ar->mutex);
1673 if (err)
1674 return err;
1675 }
1676
1677 for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1678 band = ar->hw->wiphy->bands[b];
1679
1680 if (!band)
1681 continue;
1682
1683 for (i = 0; i < band->n_channels; i++) {
1684 if (band->channels[i].hw_value == idx) {
1685 chan = &band->channels[i];
1686 goto found;
1687 }
1688 }
1689 }
1690 return -ENOENT;
1691
1692found:
1693 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1694
1695 survey->channel = chan;
1696 survey->filled = SURVEY_INFO_NOISE_DBM;
1697
1698 if (ar->channel == chan)
1699 survey->filled |= SURVEY_INFO_IN_USE;
1700
1701 if (ar->fw.hw_counters) {
1702 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
1703 SURVEY_INFO_CHANNEL_TIME_BUSY |
1704 SURVEY_INFO_CHANNEL_TIME_TX;
1705 }
1706
1707 return 0;
1708}
1709
1710static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1711{
1712 struct ar9170 *ar = hw->priv;
1713 unsigned int vid;
1714
1715 mutex_lock(&ar->mutex);
1716 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1717 carl9170_flush_cab(ar, vid);
1718
1719 carl9170_flush(ar, drop);
1720 mutex_unlock(&ar->mutex);
1721}
1722
1723static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1724 struct ieee80211_low_level_stats *stats)
1725{
1726 struct ar9170 *ar = hw->priv;
1727
1728 memset(stats, 0, sizeof(*stats));
1729 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1730 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1731 return 0;
1732}
1733
1734static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1735 struct ieee80211_vif *vif,
1736 enum sta_notify_cmd cmd,
1737 struct ieee80211_sta *sta)
1738{
1739 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1740
1741 switch (cmd) {
1742 case STA_NOTIFY_SLEEP:
1743 sta_info->sleeping = true;
1744 if (atomic_read(&sta_info->pending_frames))
1745 ieee80211_sta_block_awake(hw, sta, true);
1746 break;
1747
1748 case STA_NOTIFY_AWAKE:
1749 sta_info->sleeping = false;
1750 break;
1751 }
1752}
1753
1754static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1755{
1756 struct ar9170 *ar = hw->priv;
1757
1758 return !!atomic_read(&ar->tx_total_queued);
1759}
1760
1761static const struct ieee80211_ops carl9170_ops = {
1762 .start = carl9170_op_start,
1763 .stop = carl9170_op_stop,
1764 .tx = carl9170_op_tx,
1765 .flush = carl9170_op_flush,
1766 .add_interface = carl9170_op_add_interface,
1767 .remove_interface = carl9170_op_remove_interface,
1768 .config = carl9170_op_config,
1769 .prepare_multicast = carl9170_op_prepare_multicast,
1770 .configure_filter = carl9170_op_configure_filter,
1771 .conf_tx = carl9170_op_conf_tx,
1772 .bss_info_changed = carl9170_op_bss_info_changed,
1773 .get_tsf = carl9170_op_get_tsf,
1774 .set_key = carl9170_op_set_key,
1775 .sta_add = carl9170_op_sta_add,
1776 .sta_remove = carl9170_op_sta_remove,
1777 .sta_notify = carl9170_op_sta_notify,
1778 .get_survey = carl9170_op_get_survey,
1779 .get_stats = carl9170_op_get_stats,
1780 .ampdu_action = carl9170_op_ampdu_action,
1781 .tx_frames_pending = carl9170_tx_frames_pending,
1782};
1783
1784void *carl9170_alloc(size_t priv_size)
1785{
1786 struct ieee80211_hw *hw;
1787 struct ar9170 *ar;
1788 struct sk_buff *skb;
1789 int i;
1790
1791 /*
1792 * this buffer is used for rx stream reconstruction.
1793 * Under heavy load this device (or the transport layer?)
1794 * tends to split the streams into separate rx descriptors.
1795 */
1796
1797 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1798 if (!skb)
1799 goto err_nomem;
1800
1801 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1802 if (!hw)
1803 goto err_nomem;
1804
1805 ar = hw->priv;
1806 ar->hw = hw;
1807 ar->rx_failover = skb;
1808
1809 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1810 ar->rx_has_plcp = false;
1811
1812 /*
1813 * Here's a hidden pitfall!
1814 *
1815 * All 4 AC queues work perfectly well under _legacy_ operation.
1816 * However as soon as aggregation is enabled, the traffic flow
1817 * gets very bumpy. Therefore we have to _switch_ to a
1818 * software AC with a single HW queue.
1819 */
1820 hw->queues = __AR9170_NUM_TXQ;
1821
1822 mutex_init(&ar->mutex);
1823 spin_lock_init(&ar->beacon_lock);
1824 spin_lock_init(&ar->cmd_lock);
1825 spin_lock_init(&ar->tx_stats_lock);
1826 spin_lock_init(&ar->tx_ampdu_list_lock);
1827 spin_lock_init(&ar->mem_lock);
1828 spin_lock_init(&ar->state_lock);
1829 atomic_set(&ar->pending_restarts, 0);
1830 ar->vifs = 0;
1831 for (i = 0; i < ar->hw->queues; i++) {
1832 skb_queue_head_init(&ar->tx_status[i]);
1833 skb_queue_head_init(&ar->tx_pending[i]);
1834
1835 INIT_LIST_HEAD(&ar->bar_list[i]);
1836 spin_lock_init(&ar->bar_list_lock[i]);
1837 }
1838 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1839 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1840 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1841 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1842 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1843 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1844 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1845 rcu_assign_pointer(ar->tx_ampdu_iter,
1846 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1847
1848 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1849 INIT_LIST_HEAD(&ar->vif_list);
1850 init_completion(&ar->tx_flush);
1851
1852 /* firmware decides which modes we support */
1853 hw->wiphy->interface_modes = 0;
1854
1855 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1856 IEEE80211_HW_MFP_CAPABLE |
1857 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1858 IEEE80211_HW_SUPPORTS_PS |
1859 IEEE80211_HW_PS_NULLFUNC_STACK |
1860 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
1861 IEEE80211_HW_SUPPORTS_RC_TABLE |
1862 IEEE80211_HW_SIGNAL_DBM |
1863 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
1864
1865 if (!modparam_noht) {
1866 /*
1867 * see the comment above, why we allow the user
1868 * to disable HT by a module parameter.
1869 */
1870 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1871 }
1872
1873 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1874 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1875 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1876
1877 hw->max_rates = CARL9170_TX_MAX_RATES;
1878 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1879
1880 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1881 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1882
1883 return ar;
1884
1885err_nomem:
1886 kfree_skb(skb);
1887 return ERR_PTR(-ENOMEM);
1888}
1889
1890static int carl9170_read_eeprom(struct ar9170 *ar)
1891{
1892#define RW 8 /* number of words to read at once */
1893#define RB (sizeof(u32) * RW)
1894 u8 *eeprom = (void *)&ar->eeprom;
1895 __le32 offsets[RW];
1896 int i, j, err;
1897
1898 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1899
1900 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1901#ifndef __CHECKER__
1902 /* don't want to handle trailing remains */
1903 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1904#endif
1905
1906 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1907 for (j = 0; j < RW; j++)
1908 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1909 RB * i + 4 * j);
1910
1911 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1912 RB, (u8 *) &offsets,
1913 RB, eeprom + RB * i);
1914 if (err)
1915 return err;
1916 }
1917
1918#undef RW
1919#undef RB
1920 return 0;
1921}
1922
1923static int carl9170_parse_eeprom(struct ar9170 *ar)
1924{
1925 struct ath_regulatory *regulatory = &ar->common.regulatory;
1926 unsigned int rx_streams, tx_streams, tx_params = 0;
1927 int bands = 0;
1928 int chans = 0;
1929
1930 if (ar->eeprom.length == cpu_to_le16(0xffff))
1931 return -ENODATA;
1932
1933 rx_streams = hweight8(ar->eeprom.rx_mask);
1934 tx_streams = hweight8(ar->eeprom.tx_mask);
1935
1936 if (rx_streams != tx_streams) {
1937 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1938
1939 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1940 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1941
1942 tx_params = (tx_streams - 1) <<
1943 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1944
1945 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1946 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1947 }
1948
1949 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1950 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1951 &carl9170_band_2GHz;
1952 chans += carl9170_band_2GHz.n_channels;
1953 bands++;
1954 }
1955 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1956 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1957 &carl9170_band_5GHz;
1958 chans += carl9170_band_5GHz.n_channels;
1959 bands++;
1960 }
1961
1962 if (!bands)
1963 return -EINVAL;
1964
1965 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1966 if (!ar->survey)
1967 return -ENOMEM;
1968 ar->num_channels = chans;
1969
1970 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1971
1972 /* second part of wiphy init */
1973 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1974
1975 return 0;
1976}
1977
1978static void carl9170_reg_notifier(struct wiphy *wiphy,
1979 struct regulatory_request *request)
1980{
1981 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1982 struct ar9170 *ar = hw->priv;
1983
1984 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1985}
1986
1987int carl9170_register(struct ar9170 *ar)
1988{
1989 struct ath_regulatory *regulatory = &ar->common.regulatory;
1990 int err = 0, i;
1991
1992 if (WARN_ON(ar->mem_bitmap))
1993 return -EINVAL;
1994
1995 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1996 sizeof(unsigned long), GFP_KERNEL);
1997
1998 if (!ar->mem_bitmap)
1999 return -ENOMEM;
2000
2001 /* try to read EEPROM, init MAC addr */
2002 err = carl9170_read_eeprom(ar);
2003 if (err)
2004 return err;
2005
2006 err = carl9170_parse_eeprom(ar);
2007 if (err)
2008 return err;
2009
2010 err = ath_regd_init(regulatory, ar->hw->wiphy,
2011 carl9170_reg_notifier);
2012 if (err)
2013 return err;
2014
2015 if (modparam_noht) {
2016 carl9170_band_2GHz.ht_cap.ht_supported = false;
2017 carl9170_band_5GHz.ht_cap.ht_supported = false;
2018 }
2019
2020 for (i = 0; i < ar->fw.vif_num; i++) {
2021 ar->vif_priv[i].id = i;
2022 ar->vif_priv[i].vif = NULL;
2023 }
2024
2025 err = ieee80211_register_hw(ar->hw);
2026 if (err)
2027 return err;
2028
2029 /* mac80211 interface is now registered */
2030 ar->registered = true;
2031
2032 if (!ath_is_world_regd(regulatory))
2033 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2034
2035#ifdef CONFIG_CARL9170_DEBUGFS
2036 carl9170_debugfs_register(ar);
2037#endif /* CONFIG_CARL9170_DEBUGFS */
2038
2039 err = carl9170_led_init(ar);
2040 if (err)
2041 goto err_unreg;
2042
2043#ifdef CONFIG_CARL9170_LEDS
2044 err = carl9170_led_register(ar);
2045 if (err)
2046 goto err_unreg;
2047#endif /* CONFIG_CARL9170_LEDS */
2048
2049#ifdef CONFIG_CARL9170_WPC
2050 err = carl9170_register_wps_button(ar);
2051 if (err)
2052 goto err_unreg;
2053#endif /* CONFIG_CARL9170_WPC */
2054
2055#ifdef CONFIG_CARL9170_HWRNG
2056 err = carl9170_register_hwrng(ar);
2057 if (err)
2058 goto err_unreg;
2059#endif /* CONFIG_CARL9170_HWRNG */
2060
2061 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2062 wiphy_name(ar->hw->wiphy));
2063
2064 return 0;
2065
2066err_unreg:
2067 carl9170_unregister(ar);
2068 return err;
2069}
2070
2071void carl9170_unregister(struct ar9170 *ar)
2072{
2073 if (!ar->registered)
2074 return;
2075
2076 ar->registered = false;
2077
2078#ifdef CONFIG_CARL9170_LEDS
2079 carl9170_led_unregister(ar);
2080#endif /* CONFIG_CARL9170_LEDS */
2081
2082#ifdef CONFIG_CARL9170_DEBUGFS
2083 carl9170_debugfs_unregister(ar);
2084#endif /* CONFIG_CARL9170_DEBUGFS */
2085
2086#ifdef CONFIG_CARL9170_WPC
2087 if (ar->wps.pbc) {
2088 input_unregister_device(ar->wps.pbc);
2089 ar->wps.pbc = NULL;
2090 }
2091#endif /* CONFIG_CARL9170_WPC */
2092
2093#ifdef CONFIG_CARL9170_HWRNG
2094 carl9170_unregister_hwrng(ar);
2095#endif /* CONFIG_CARL9170_HWRNG */
2096
2097 carl9170_cancel_worker(ar);
2098 cancel_work_sync(&ar->restart_work);
2099
2100 ieee80211_unregister_hw(ar->hw);
2101}
2102
2103void carl9170_free(struct ar9170 *ar)
2104{
2105 WARN_ON(ar->registered);
2106 WARN_ON(IS_INITIALIZED(ar));
2107
2108 kfree_skb(ar->rx_failover);
2109 ar->rx_failover = NULL;
2110
2111 kfree(ar->mem_bitmap);
2112 ar->mem_bitmap = NULL;
2113
2114 kfree(ar->survey);
2115 ar->survey = NULL;
2116
2117 mutex_destroy(&ar->mutex);
2118
2119 ieee80211_free_hw(ar->hw);
2120}