Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Atheros CARL9170 driver
   3 *
   4 * mac80211 interaction code
   5 *
   6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
   7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; see the file COPYING.  If not, see
  21 * http://www.gnu.org/licenses/.
  22 *
  23 * This file incorporates work covered by the following copyright and
  24 * permission notice:
  25 *    Copyright (c) 2007-2008 Atheros Communications, Inc.
  26 *
  27 *    Permission to use, copy, modify, and/or distribute this software for any
  28 *    purpose with or without fee is hereby granted, provided that the above
  29 *    copyright notice and this permission notice appear in all copies.
  30 *
  31 *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  32 *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  33 *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  34 *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  35 *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  36 *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  37 *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  38 */
  39
  40#include <linux/init.h>
  41#include <linux/slab.h>
  42#include <linux/module.h>
  43#include <linux/etherdevice.h>
  44#include <linux/random.h>
  45#include <net/mac80211.h>
  46#include <net/cfg80211.h>
  47#include "hw.h"
  48#include "carl9170.h"
  49#include "cmd.h"
  50
  51static int modparam_nohwcrypt;
  52module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
  53MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
  54
  55int modparam_noht;
  56module_param_named(noht, modparam_noht, int, S_IRUGO);
  57MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
  58
  59#define RATE(_bitrate, _hw_rate, _txpidx, _flags) {	\
  60	.bitrate	= (_bitrate),			\
  61	.flags		= (_flags),			\
  62	.hw_value	= (_hw_rate) | (_txpidx) << 4,	\
  63}
  64
  65struct ieee80211_rate __carl9170_ratetable[] = {
  66	RATE(10, 0, 0, 0),
  67	RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
  68	RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
  69	RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
  70	RATE(60, 0xb, 0, 0),
  71	RATE(90, 0xf, 0, 0),
  72	RATE(120, 0xa, 0, 0),
  73	RATE(180, 0xe, 0, 0),
  74	RATE(240, 0x9, 0, 0),
  75	RATE(360, 0xd, 1, 0),
  76	RATE(480, 0x8, 2, 0),
  77	RATE(540, 0xc, 3, 0),
  78};
  79#undef RATE
  80
  81#define carl9170_g_ratetable	(__carl9170_ratetable + 0)
  82#define carl9170_g_ratetable_size	12
  83#define carl9170_a_ratetable	(__carl9170_ratetable + 4)
  84#define carl9170_a_ratetable_size	8
  85
  86/*
  87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
  88 *     array in phy.c so that we don't have to do frequency lookups!
  89 */
  90#define CHAN(_freq, _idx) {		\
  91	.center_freq	= (_freq),	\
  92	.hw_value	= (_idx),	\
  93	.max_power	= 18, /* XXX */	\
  94}
  95
  96static struct ieee80211_channel carl9170_2ghz_chantable[] = {
  97	CHAN(2412,  0),
  98	CHAN(2417,  1),
  99	CHAN(2422,  2),
 100	CHAN(2427,  3),
 101	CHAN(2432,  4),
 102	CHAN(2437,  5),
 103	CHAN(2442,  6),
 104	CHAN(2447,  7),
 105	CHAN(2452,  8),
 106	CHAN(2457,  9),
 107	CHAN(2462, 10),
 108	CHAN(2467, 11),
 109	CHAN(2472, 12),
 110	CHAN(2484, 13),
 111};
 112
 113static struct ieee80211_channel carl9170_5ghz_chantable[] = {
 114	CHAN(4920, 14),
 115	CHAN(4940, 15),
 116	CHAN(4960, 16),
 117	CHAN(4980, 17),
 118	CHAN(5040, 18),
 119	CHAN(5060, 19),
 120	CHAN(5080, 20),
 121	CHAN(5180, 21),
 122	CHAN(5200, 22),
 123	CHAN(5220, 23),
 124	CHAN(5240, 24),
 125	CHAN(5260, 25),
 126	CHAN(5280, 26),
 127	CHAN(5300, 27),
 128	CHAN(5320, 28),
 129	CHAN(5500, 29),
 130	CHAN(5520, 30),
 131	CHAN(5540, 31),
 132	CHAN(5560, 32),
 133	CHAN(5580, 33),
 134	CHAN(5600, 34),
 135	CHAN(5620, 35),
 136	CHAN(5640, 36),
 137	CHAN(5660, 37),
 138	CHAN(5680, 38),
 139	CHAN(5700, 39),
 140	CHAN(5745, 40),
 141	CHAN(5765, 41),
 142	CHAN(5785, 42),
 143	CHAN(5805, 43),
 144	CHAN(5825, 44),
 145	CHAN(5170, 45),
 146	CHAN(5190, 46),
 147	CHAN(5210, 47),
 148	CHAN(5230, 48),
 149};
 150#undef CHAN
 151
 152#define CARL9170_HT_CAP							\
 153{									\
 154	.ht_supported	= true,						\
 155	.cap		= IEEE80211_HT_CAP_MAX_AMSDU |			\
 156			  IEEE80211_HT_CAP_SUP_WIDTH_20_40 |		\
 157			  IEEE80211_HT_CAP_SGI_40 |			\
 158			  IEEE80211_HT_CAP_DSSSCCK40 |			\
 159			  IEEE80211_HT_CAP_SM_PS,			\
 160	.ampdu_factor	= IEEE80211_HT_MAX_AMPDU_64K,			\
 161	.ampdu_density	= IEEE80211_HT_MPDU_DENSITY_8,			\
 162	.mcs		= {						\
 163		.rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, },	\
 164		.rx_highest = cpu_to_le16(300),				\
 165		.tx_params = IEEE80211_HT_MCS_TX_DEFINED,		\
 166	},								\
 167}
 168
 169static struct ieee80211_supported_band carl9170_band_2GHz = {
 170	.channels	= carl9170_2ghz_chantable,
 171	.n_channels	= ARRAY_SIZE(carl9170_2ghz_chantable),
 172	.bitrates	= carl9170_g_ratetable,
 173	.n_bitrates	= carl9170_g_ratetable_size,
 174	.ht_cap		= CARL9170_HT_CAP,
 175};
 176
 177static struct ieee80211_supported_band carl9170_band_5GHz = {
 178	.channels	= carl9170_5ghz_chantable,
 179	.n_channels	= ARRAY_SIZE(carl9170_5ghz_chantable),
 180	.bitrates	= carl9170_a_ratetable,
 181	.n_bitrates	= carl9170_a_ratetable_size,
 182	.ht_cap		= CARL9170_HT_CAP,
 183};
 184
 185static void carl9170_ampdu_gc(struct ar9170 *ar)
 186{
 187	struct carl9170_sta_tid *tid_info;
 188	LIST_HEAD(tid_gc);
 189
 190	rcu_read_lock();
 191	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
 192		spin_lock_bh(&ar->tx_ampdu_list_lock);
 193		if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
 194			tid_info->state = CARL9170_TID_STATE_KILLED;
 195			list_del_rcu(&tid_info->list);
 196			ar->tx_ampdu_list_len--;
 197			list_add_tail(&tid_info->tmp_list, &tid_gc);
 198		}
 199		spin_unlock_bh(&ar->tx_ampdu_list_lock);
 200
 201	}
 202	rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
 203	rcu_read_unlock();
 204
 205	synchronize_rcu();
 206
 207	while (!list_empty(&tid_gc)) {
 208		struct sk_buff *skb;
 209		tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
 210					    tmp_list);
 211
 212		while ((skb = __skb_dequeue(&tid_info->queue)))
 213			carl9170_tx_status(ar, skb, false);
 214
 215		list_del_init(&tid_info->tmp_list);
 216		kfree(tid_info);
 217	}
 218}
 219
 220static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
 221{
 222	if (drop_queued) {
 223		int i;
 224
 225		/*
 226		 * We can only drop frames which have not been uploaded
 227		 * to the device yet.
 228		 */
 229
 230		for (i = 0; i < ar->hw->queues; i++) {
 231			struct sk_buff *skb;
 232
 233			while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
 234				struct ieee80211_tx_info *info;
 235
 236				info = IEEE80211_SKB_CB(skb);
 237				if (info->flags & IEEE80211_TX_CTL_AMPDU)
 238					atomic_dec(&ar->tx_ampdu_upload);
 239
 240				carl9170_tx_status(ar, skb, false);
 241			}
 242		}
 243	}
 244
 245	/* Wait for all other outstanding frames to timeout. */
 246	if (atomic_read(&ar->tx_total_queued))
 247		WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
 248}
 249
 250static void carl9170_flush_ba(struct ar9170 *ar)
 251{
 252	struct sk_buff_head free;
 253	struct carl9170_sta_tid *tid_info;
 254	struct sk_buff *skb;
 255
 256	__skb_queue_head_init(&free);
 257
 258	rcu_read_lock();
 259	spin_lock_bh(&ar->tx_ampdu_list_lock);
 260	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
 261		if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
 262			tid_info->state = CARL9170_TID_STATE_SUSPEND;
 263
 264			spin_lock(&tid_info->lock);
 265			while ((skb = __skb_dequeue(&tid_info->queue)))
 266				__skb_queue_tail(&free, skb);
 267			spin_unlock(&tid_info->lock);
 268		}
 269	}
 270	spin_unlock_bh(&ar->tx_ampdu_list_lock);
 271	rcu_read_unlock();
 272
 273	while ((skb = __skb_dequeue(&free)))
 274		carl9170_tx_status(ar, skb, false);
 275}
 276
 277static void carl9170_zap_queues(struct ar9170 *ar)
 278{
 279	struct carl9170_vif_info *cvif;
 280	unsigned int i;
 281
 282	carl9170_ampdu_gc(ar);
 283
 284	carl9170_flush_ba(ar);
 285	carl9170_flush(ar, true);
 286
 287	for (i = 0; i < ar->hw->queues; i++) {
 288		spin_lock_bh(&ar->tx_status[i].lock);
 289		while (!skb_queue_empty(&ar->tx_status[i])) {
 290			struct sk_buff *skb;
 291
 292			skb = skb_peek(&ar->tx_status[i]);
 293			carl9170_tx_get_skb(skb);
 294			spin_unlock_bh(&ar->tx_status[i].lock);
 295			carl9170_tx_drop(ar, skb);
 296			spin_lock_bh(&ar->tx_status[i].lock);
 297			carl9170_tx_put_skb(skb);
 298		}
 299		spin_unlock_bh(&ar->tx_status[i].lock);
 300	}
 301
 302	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
 303	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
 304	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
 305
 306	/* reinitialize queues statistics */
 307	memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
 308	for (i = 0; i < ar->hw->queues; i++)
 309		ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
 310
 311	for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
 312		ar->mem_bitmap[i] = 0;
 313
 314	rcu_read_lock();
 315	list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
 316		spin_lock_bh(&ar->beacon_lock);
 317		dev_kfree_skb_any(cvif->beacon);
 318		cvif->beacon = NULL;
 319		spin_unlock_bh(&ar->beacon_lock);
 320	}
 321	rcu_read_unlock();
 322
 323	atomic_set(&ar->tx_ampdu_upload, 0);
 324	atomic_set(&ar->tx_ampdu_scheduler, 0);
 325	atomic_set(&ar->tx_total_pending, 0);
 326	atomic_set(&ar->tx_total_queued, 0);
 327	atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
 328}
 329
 330#define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop)		\
 331do {									\
 332	queue.aifs = ai_fs;						\
 333	queue.cw_min = cwmin;						\
 334	queue.cw_max = cwmax;						\
 335	queue.txop = _txop;						\
 336} while (0)
 337
 338static int carl9170_op_start(struct ieee80211_hw *hw)
 339{
 340	struct ar9170 *ar = hw->priv;
 341	int err, i;
 342
 343	mutex_lock(&ar->mutex);
 344
 345	carl9170_zap_queues(ar);
 346
 347	/* reset QoS defaults */
 348	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3,     7, 47);
 349	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7,    15, 94);
 350	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023,  0);
 351	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023,  0);
 352	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
 353
 354	ar->current_factor = ar->current_density = -1;
 355	/* "The first key is unique." */
 356	ar->usedkeys = 1;
 357	ar->filter_state = 0;
 358	ar->ps.last_action = jiffies;
 359	ar->ps.last_slept = jiffies;
 360	ar->erp_mode = CARL9170_ERP_AUTO;
 361	ar->rx_software_decryption = false;
 362	ar->disable_offload = false;
 
 
 
 
 
 363
 364	for (i = 0; i < ar->hw->queues; i++) {
 365		ar->queue_stop_timeout[i] = jiffies;
 366		ar->max_queue_stop_timeout[i] = 0;
 367	}
 368
 369	atomic_set(&ar->mem_allocs, 0);
 370
 371	err = carl9170_usb_open(ar);
 372	if (err)
 373		goto out;
 374
 375	err = carl9170_init_mac(ar);
 376	if (err)
 377		goto out;
 378
 379	err = carl9170_set_qos(ar);
 380	if (err)
 381		goto out;
 382
 383	if (ar->fw.rx_filter) {
 384		err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
 385			CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
 386		if (err)
 387			goto out;
 388	}
 389
 390	err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
 391				 AR9170_DMA_TRIGGER_RXQ);
 392	if (err)
 393		goto out;
 394
 395	/* Clear key-cache */
 396	for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
 397		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
 398					  0, NULL, 0);
 399		if (err)
 400			goto out;
 401
 402		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
 403					  1, NULL, 0);
 404		if (err)
 405			goto out;
 406
 407		if (i < AR9170_CAM_MAX_USER) {
 408			err = carl9170_disable_key(ar, i);
 409			if (err)
 410				goto out;
 411		}
 412	}
 413
 414	carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
 415
 
 
 
 416	ieee80211_wake_queues(ar->hw);
 417	err = 0;
 418
 419out:
 420	mutex_unlock(&ar->mutex);
 421	return err;
 422}
 423
 424static void carl9170_cancel_worker(struct ar9170 *ar)
 425{
 
 426	cancel_delayed_work_sync(&ar->tx_janitor);
 427#ifdef CONFIG_CARL9170_LEDS
 428	cancel_delayed_work_sync(&ar->led_work);
 429#endif /* CONFIG_CARL9170_LEDS */
 430	cancel_work_sync(&ar->ps_work);
 431	cancel_work_sync(&ar->ping_work);
 432	cancel_work_sync(&ar->ampdu_work);
 433}
 434
 435static void carl9170_op_stop(struct ieee80211_hw *hw)
 436{
 437	struct ar9170 *ar = hw->priv;
 438
 439	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
 440
 441	ieee80211_stop_queues(ar->hw);
 442
 443	mutex_lock(&ar->mutex);
 444	if (IS_ACCEPTING_CMD(ar)) {
 445		rcu_assign_pointer(ar->beacon_iter, NULL);
 446
 447		carl9170_led_set_state(ar, 0);
 448
 449		/* stop DMA */
 450		carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
 451		carl9170_usb_stop(ar);
 452	}
 453
 454	carl9170_zap_queues(ar);
 455	mutex_unlock(&ar->mutex);
 456
 457	carl9170_cancel_worker(ar);
 458}
 459
 460static void carl9170_restart_work(struct work_struct *work)
 461{
 462	struct ar9170 *ar = container_of(work, struct ar9170,
 463					 restart_work);
 464	int err;
 465
 466	ar->usedkeys = 0;
 467	ar->filter_state = 0;
 468	carl9170_cancel_worker(ar);
 469
 470	mutex_lock(&ar->mutex);
 471	err = carl9170_usb_restart(ar);
 472	if (net_ratelimit()) {
 473		if (err) {
 474			dev_err(&ar->udev->dev, "Failed to restart device "
 475				" (%d).\n", err);
 476		 } else {
 477			dev_info(&ar->udev->dev, "device restarted "
 478				 "successfully.\n");
 479		}
 480	}
 481
 482	carl9170_zap_queues(ar);
 483	mutex_unlock(&ar->mutex);
 484	if (!err) {
 
 485		ar->restart_counter++;
 486		atomic_set(&ar->pending_restarts, 0);
 487
 488		ieee80211_restart_hw(ar->hw);
 489	} else {
 490		/*
 491		 * The reset was unsuccessful and the device seems to
 492		 * be dead. But there's still one option: a low-level
 493		 * usb subsystem reset...
 494		 */
 495
 496		carl9170_usb_reset(ar);
 497	}
 498}
 499
 500void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
 501{
 502	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
 503
 504	/*
 505	 * Sometimes, an error can trigger several different reset events.
 506	 * By ignoring these *surplus* reset events, the device won't be
 507	 * killed again, right after it has recovered.
 508	 */
 509	if (atomic_inc_return(&ar->pending_restarts) > 1) {
 510		dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
 511		return;
 512	}
 513
 514	ieee80211_stop_queues(ar->hw);
 515
 516	dev_err(&ar->udev->dev, "restart device (%d)\n", r);
 517
 518	if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
 519	    !WARN_ON(r >= __CARL9170_RR_LAST))
 520		ar->last_reason = r;
 521
 522	if (!ar->registered)
 523		return;
 524
 525	if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
 526		ieee80211_queue_work(ar->hw, &ar->restart_work);
 527	else
 528		carl9170_usb_reset(ar);
 529
 530	/*
 531	 * At this point, the device instance might have vanished/disabled.
 532	 * So, don't put any code which access the ar9170 struct
 533	 * without proper protection.
 534	 */
 535}
 536
 537static void carl9170_ping_work(struct work_struct *work)
 538{
 539	struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
 540	int err;
 541
 542	if (!IS_STARTED(ar))
 543		return;
 544
 545	mutex_lock(&ar->mutex);
 546	err = carl9170_echo_test(ar, 0xdeadbeef);
 547	if (err)
 548		carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
 549	mutex_unlock(&ar->mutex);
 550}
 551
 552static int carl9170_init_interface(struct ar9170 *ar,
 553				   struct ieee80211_vif *vif)
 554{
 555	struct ath_common *common = &ar->common;
 556	int err;
 557
 558	if (!vif) {
 559		WARN_ON_ONCE(IS_STARTED(ar));
 560		return 0;
 561	}
 562
 563	memcpy(common->macaddr, vif->addr, ETH_ALEN);
 564
 565	if (modparam_nohwcrypt ||
 566	    ((vif->type != NL80211_IFTYPE_STATION) &&
 567	     (vif->type != NL80211_IFTYPE_AP))) {
 568		ar->rx_software_decryption = true;
 569		ar->disable_offload = true;
 570	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571
 572	err = carl9170_set_operating_mode(ar);
 573	return err;
 574}
 575
 576static int carl9170_op_add_interface(struct ieee80211_hw *hw,
 577				     struct ieee80211_vif *vif)
 578{
 579	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
 580	struct ieee80211_vif *main_vif;
 581	struct ar9170 *ar = hw->priv;
 582	int vif_id = -1, err = 0;
 583
 584	mutex_lock(&ar->mutex);
 585	rcu_read_lock();
 586	if (vif_priv->active) {
 587		/*
 588		 * Skip the interface structure initialization,
 589		 * if the vif survived the _restart call.
 590		 */
 591		vif_id = vif_priv->id;
 592		vif_priv->enable_beacon = false;
 593
 594		spin_lock_bh(&ar->beacon_lock);
 595		dev_kfree_skb_any(vif_priv->beacon);
 596		vif_priv->beacon = NULL;
 597		spin_unlock_bh(&ar->beacon_lock);
 598
 599		goto init;
 600	}
 601
 
 
 
 
 
 
 
 
 
 602	main_vif = carl9170_get_main_vif(ar);
 603
 604	if (main_vif) {
 605		switch (main_vif->type) {
 606		case NL80211_IFTYPE_STATION:
 607			if (vif->type == NL80211_IFTYPE_STATION)
 608				break;
 609
 
 
 
 
 
 
 
 
 
 
 
 
 610			err = -EBUSY;
 611			rcu_read_unlock();
 612
 613			goto unlock;
 614
 
 615		case NL80211_IFTYPE_AP:
 616			if ((vif->type == NL80211_IFTYPE_STATION) ||
 617			    (vif->type == NL80211_IFTYPE_WDS) ||
 618			    (vif->type == NL80211_IFTYPE_AP))
 
 619				break;
 620
 621			err = -EBUSY;
 622			rcu_read_unlock();
 623			goto unlock;
 624
 625		default:
 626			rcu_read_unlock();
 627			goto unlock;
 628		}
 629	}
 630
 631	vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
 632
 633	if (vif_id < 0) {
 634		rcu_read_unlock();
 635
 636		err = -ENOSPC;
 637		goto unlock;
 638	}
 639
 640	BUG_ON(ar->vif_priv[vif_id].id != vif_id);
 641
 642	vif_priv->active = true;
 643	vif_priv->id = vif_id;
 644	vif_priv->enable_beacon = false;
 645	ar->vifs++;
 646	list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
 
 
 
 
 
 
 
 
 
 
 
 647	rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
 648
 649init:
 650	if (carl9170_get_main_vif(ar) == vif) {
 
 
 651		rcu_assign_pointer(ar->beacon_iter, vif_priv);
 652		rcu_read_unlock();
 653
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654		err = carl9170_init_interface(ar, vif);
 655		if (err)
 656			goto unlock;
 657	} else {
 658		rcu_read_unlock();
 659		err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
 660
 661		if (err)
 662			goto unlock;
 663	}
 664
 665	if (ar->fw.tx_seq_table) {
 666		err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
 667					 0);
 668		if (err)
 669			goto unlock;
 670	}
 671
 672unlock:
 673	if (err && (vif_id >= 0)) {
 674		vif_priv->active = false;
 675		bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
 676		ar->vifs--;
 677		rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
 678		list_del_rcu(&vif_priv->list);
 679		mutex_unlock(&ar->mutex);
 680		synchronize_rcu();
 681	} else {
 682		if (ar->vifs > 1)
 683			ar->ps.off_override |= PS_OFF_VIF;
 684
 685		mutex_unlock(&ar->mutex);
 686	}
 687
 688	return err;
 689}
 690
 691static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
 692					 struct ieee80211_vif *vif)
 693{
 694	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
 695	struct ieee80211_vif *main_vif;
 696	struct ar9170 *ar = hw->priv;
 697	unsigned int id;
 698
 699	mutex_lock(&ar->mutex);
 700
 701	if (WARN_ON_ONCE(!vif_priv->active))
 702		goto unlock;
 703
 704	ar->vifs--;
 705
 706	rcu_read_lock();
 707	main_vif = carl9170_get_main_vif(ar);
 708
 709	id = vif_priv->id;
 710
 711	vif_priv->active = false;
 712	WARN_ON(vif_priv->enable_beacon);
 713	vif_priv->enable_beacon = false;
 714	list_del_rcu(&vif_priv->list);
 715	rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
 716
 717	if (vif == main_vif) {
 718		rcu_read_unlock();
 719
 720		if (ar->vifs) {
 721			WARN_ON(carl9170_init_interface(ar,
 722					carl9170_get_main_vif(ar)));
 723		} else {
 724			carl9170_set_operating_mode(ar);
 725		}
 726	} else {
 727		rcu_read_unlock();
 728
 729		WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
 730	}
 731
 732	carl9170_update_beacon(ar, false);
 733	carl9170_flush_cab(ar, id);
 734
 735	spin_lock_bh(&ar->beacon_lock);
 736	dev_kfree_skb_any(vif_priv->beacon);
 737	vif_priv->beacon = NULL;
 738	spin_unlock_bh(&ar->beacon_lock);
 739
 740	bitmap_release_region(&ar->vif_bitmap, id, 0);
 741
 742	carl9170_set_beacon_timers(ar);
 743
 744	if (ar->vifs == 1)
 745		ar->ps.off_override &= ~PS_OFF_VIF;
 746
 747unlock:
 748	mutex_unlock(&ar->mutex);
 749
 750	synchronize_rcu();
 751}
 752
 753void carl9170_ps_check(struct ar9170 *ar)
 754{
 755	ieee80211_queue_work(ar->hw, &ar->ps_work);
 756}
 757
 758/* caller must hold ar->mutex */
 759static int carl9170_ps_update(struct ar9170 *ar)
 760{
 761	bool ps = false;
 762	int err = 0;
 763
 764	if (!ar->ps.off_override)
 765		ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
 766
 767	if (ps != ar->ps.state) {
 768		err = carl9170_powersave(ar, ps);
 769		if (err)
 770			return err;
 771
 772		if (ar->ps.state && !ps) {
 773			ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
 774				ar->ps.last_action);
 775		}
 776
 777		if (ps)
 778			ar->ps.last_slept = jiffies;
 779
 780		ar->ps.last_action = jiffies;
 781		ar->ps.state = ps;
 782	}
 783
 784	return 0;
 785}
 786
 787static void carl9170_ps_work(struct work_struct *work)
 788{
 789	struct ar9170 *ar = container_of(work, struct ar9170,
 790					 ps_work);
 791	mutex_lock(&ar->mutex);
 792	if (IS_STARTED(ar))
 793		WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
 794	mutex_unlock(&ar->mutex);
 795}
 796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 797
 798static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
 799{
 800	struct ar9170 *ar = hw->priv;
 801	int err = 0;
 802
 803	mutex_lock(&ar->mutex);
 804	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
 805		/* TODO */
 806		err = 0;
 807	}
 808
 809	if (changed & IEEE80211_CONF_CHANGE_PS) {
 810		err = carl9170_ps_update(ar);
 811		if (err)
 812			goto out;
 813	}
 814
 815	if (changed & IEEE80211_CONF_CHANGE_POWER) {
 816		/* TODO */
 817		err = 0;
 818	}
 819
 820	if (changed & IEEE80211_CONF_CHANGE_SMPS) {
 821		/* TODO */
 822		err = 0;
 823	}
 824
 825	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
 
 
 
 826		/* adjust slot time for 5 GHz */
 827		err = carl9170_set_slot_time(ar);
 828		if (err)
 829			goto out;
 830
 831		err = carl9170_set_channel(ar, hw->conf.channel,
 832			hw->conf.channel_type, CARL9170_RFI_NONE);
 
 
 
 
 
 
 
 
 833		if (err)
 834			goto out;
 835
 836		err = carl9170_set_dyn_sifs_ack(ar);
 837		if (err)
 838			goto out;
 839
 840		err = carl9170_set_rts_cts_rate(ar);
 841		if (err)
 842			goto out;
 843	}
 844
 
 
 
 
 
 
 845out:
 846	mutex_unlock(&ar->mutex);
 847	return err;
 848}
 849
 850static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
 851					 struct netdev_hw_addr_list *mc_list)
 852{
 853	struct netdev_hw_addr *ha;
 854	u64 mchash;
 855
 856	/* always get broadcast frames */
 857	mchash = 1ULL << (0xff >> 2);
 858
 859	netdev_hw_addr_list_for_each(ha, mc_list)
 860		mchash |= 1ULL << (ha->addr[5] >> 2);
 861
 862	return mchash;
 863}
 864
 865static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
 866					 unsigned int changed_flags,
 867					 unsigned int *new_flags,
 868					 u64 multicast)
 869{
 870	struct ar9170 *ar = hw->priv;
 871
 872	/* mask supported flags */
 873	*new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
 874
 875	if (!IS_ACCEPTING_CMD(ar))
 876		return;
 877
 878	mutex_lock(&ar->mutex);
 879
 880	ar->filter_state = *new_flags;
 881	/*
 882	 * We can support more by setting the sniffer bit and
 883	 * then checking the error flags, later.
 884	 */
 885
 886	if (*new_flags & FIF_ALLMULTI)
 887		multicast = ~0ULL;
 888
 889	if (multicast != ar->cur_mc_hash)
 890		WARN_ON(carl9170_update_multicast(ar, multicast));
 891
 892	if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
 893		ar->sniffer_enabled = !!(*new_flags &
 894			(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
 895
 896		WARN_ON(carl9170_set_operating_mode(ar));
 897	}
 898
 899	if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
 900		u32 rx_filter = 0;
 901
 
 
 
 902		if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
 903			rx_filter |= CARL9170_RX_FILTER_BAD;
 904
 905		if (!(*new_flags & FIF_CONTROL))
 906			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
 907
 908		if (!(*new_flags & FIF_PSPOLL))
 909			rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
 910
 911		if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
 912			rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
 913			rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
 914		}
 915
 916		WARN_ON(carl9170_rx_filter(ar, rx_filter));
 917	}
 918
 919	mutex_unlock(&ar->mutex);
 920}
 921
 922
 923static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
 924					 struct ieee80211_vif *vif,
 925					 struct ieee80211_bss_conf *bss_conf,
 926					 u32 changed)
 927{
 928	struct ar9170 *ar = hw->priv;
 929	struct ath_common *common = &ar->common;
 930	int err = 0;
 931	struct carl9170_vif_info *vif_priv;
 932	struct ieee80211_vif *main_vif;
 933
 934	mutex_lock(&ar->mutex);
 935	vif_priv = (void *) vif->drv_priv;
 936	main_vif = carl9170_get_main_vif(ar);
 937	if (WARN_ON(!main_vif))
 938		goto out;
 939
 940	if (changed & BSS_CHANGED_BEACON_ENABLED) {
 941		struct carl9170_vif_info *iter;
 942		int i = 0;
 943
 944		vif_priv->enable_beacon = bss_conf->enable_beacon;
 945		rcu_read_lock();
 946		list_for_each_entry_rcu(iter, &ar->vif_list, list) {
 947			if (iter->active && iter->enable_beacon)
 948				i++;
 949
 950		}
 951		rcu_read_unlock();
 952
 953		ar->beacon_enabled = i;
 954	}
 955
 956	if (changed & BSS_CHANGED_BEACON) {
 957		err = carl9170_update_beacon(ar, false);
 958		if (err)
 959			goto out;
 960	}
 961
 962	if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
 963		       BSS_CHANGED_BEACON_INT)) {
 964
 965		if (main_vif != vif) {
 966			bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
 967			bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
 968		}
 969
 970		/*
 971		 * Therefore a hard limit for the broadcast traffic should
 972		 * prevent false alarms.
 973		 */
 974		if (vif->type != NL80211_IFTYPE_STATION &&
 975		    (bss_conf->beacon_int * bss_conf->dtim_period >=
 976		     (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
 977			err = -EINVAL;
 978			goto out;
 979		}
 980
 981		err = carl9170_set_beacon_timers(ar);
 982		if (err)
 983			goto out;
 984	}
 985
 986	if (changed & BSS_CHANGED_HT) {
 987		/* TODO */
 988		err = 0;
 989		if (err)
 990			goto out;
 991	}
 992
 993	if (main_vif != vif)
 994		goto out;
 995
 996	/*
 997	 * The following settings can only be changed by the
 998	 * master interface.
 999	 */
1000
1001	if (changed & BSS_CHANGED_BSSID) {
1002		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1003		err = carl9170_set_operating_mode(ar);
1004		if (err)
1005			goto out;
1006	}
1007
1008	if (changed & BSS_CHANGED_ASSOC) {
1009		ar->common.curaid = bss_conf->aid;
1010		err = carl9170_set_beacon_timers(ar);
1011		if (err)
1012			goto out;
1013	}
1014
1015	if (changed & BSS_CHANGED_ERP_SLOT) {
1016		err = carl9170_set_slot_time(ar);
1017		if (err)
1018			goto out;
1019	}
1020
1021	if (changed & BSS_CHANGED_BASIC_RATES) {
1022		err = carl9170_set_mac_rates(ar);
1023		if (err)
1024			goto out;
1025	}
1026
1027out:
1028	WARN_ON_ONCE(err && IS_STARTED(ar));
1029	mutex_unlock(&ar->mutex);
1030}
1031
1032static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw)
 
1033{
1034	struct ar9170 *ar = hw->priv;
1035	struct carl9170_tsf_rsp tsf;
1036	int err;
1037
1038	mutex_lock(&ar->mutex);
1039	err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1040				0, NULL, sizeof(tsf), &tsf);
1041	mutex_unlock(&ar->mutex);
1042	if (WARN_ON(err))
1043		return 0;
1044
1045	return le64_to_cpu(tsf.tsf_64);
1046}
1047
1048static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1049			       struct ieee80211_vif *vif,
1050			       struct ieee80211_sta *sta,
1051			       struct ieee80211_key_conf *key)
1052{
1053	struct ar9170 *ar = hw->priv;
1054	int err = 0, i;
1055	u8 ktype;
1056
1057	if (ar->disable_offload || !vif)
1058		return -EOPNOTSUPP;
1059
1060	/*
1061	 * We have to fall back to software encryption, whenever
1062	 * the user choose to participates in an IBSS or is connected
1063	 * to more than one network.
1064	 *
1065	 * This is very unfortunate, because some machines cannot handle
1066	 * the high througput speed in 802.11n networks.
1067	 */
1068
1069	if (!is_main_vif(ar, vif)) {
1070		mutex_lock(&ar->mutex);
1071		goto err_softw;
1072	}
1073
1074	/*
1075	 * While the hardware supports *catch-all* key, for offloading
1076	 * group-key en-/de-cryption. The way of how the hardware
1077	 * decides which keyId maps to which key, remains a mystery...
1078	 */
1079	if ((vif->type != NL80211_IFTYPE_STATION &&
1080	     vif->type != NL80211_IFTYPE_ADHOC) &&
1081	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1082		return -EOPNOTSUPP;
1083
1084	switch (key->cipher) {
1085	case WLAN_CIPHER_SUITE_WEP40:
1086		ktype = AR9170_ENC_ALG_WEP64;
1087		break;
1088	case WLAN_CIPHER_SUITE_WEP104:
1089		ktype = AR9170_ENC_ALG_WEP128;
1090		break;
1091	case WLAN_CIPHER_SUITE_TKIP:
1092		ktype = AR9170_ENC_ALG_TKIP;
1093		break;
1094	case WLAN_CIPHER_SUITE_CCMP:
1095		ktype = AR9170_ENC_ALG_AESCCMP;
 
1096		break;
1097	default:
1098		return -EOPNOTSUPP;
1099	}
1100
1101	mutex_lock(&ar->mutex);
1102	if (cmd == SET_KEY) {
1103		if (!IS_STARTED(ar)) {
1104			err = -EOPNOTSUPP;
1105			goto out;
1106		}
1107
1108		if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1109			sta = NULL;
1110
1111			i = 64 + key->keyidx;
1112		} else {
1113			for (i = 0; i < 64; i++)
1114				if (!(ar->usedkeys & BIT(i)))
1115					break;
1116			if (i == 64)
1117				goto err_softw;
1118		}
1119
1120		key->hw_key_idx = i;
1121
1122		err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1123					  ktype, 0, key->key,
1124					  min_t(u8, 16, key->keylen));
1125		if (err)
1126			goto out;
1127
1128		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1129			err = carl9170_upload_key(ar, i, sta ? sta->addr :
1130						  NULL, ktype, 1,
1131						  key->key + 16, 16);
1132			if (err)
1133				goto out;
1134
1135			/*
1136			 * hardware is not capable generating MMIC
1137			 * of fragmented frames!
1138			 */
1139			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1140		}
1141
1142		if (i < 64)
1143			ar->usedkeys |= BIT(i);
1144
1145		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1146	} else {
1147		if (!IS_STARTED(ar)) {
1148			/* The device is gone... together with the key ;-) */
1149			err = 0;
1150			goto out;
1151		}
1152
1153		if (key->hw_key_idx < 64) {
1154			ar->usedkeys &= ~BIT(key->hw_key_idx);
1155		} else {
1156			err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1157						  AR9170_ENC_ALG_NONE, 0,
1158						  NULL, 0);
1159			if (err)
1160				goto out;
1161
1162			if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1163				err = carl9170_upload_key(ar, key->hw_key_idx,
1164							  NULL,
1165							  AR9170_ENC_ALG_NONE,
1166							  1, NULL, 0);
1167				if (err)
1168					goto out;
1169			}
1170
1171		}
1172
1173		err = carl9170_disable_key(ar, key->hw_key_idx);
1174		if (err)
1175			goto out;
1176	}
1177
1178out:
1179	mutex_unlock(&ar->mutex);
1180	return err;
1181
1182err_softw:
1183	if (!ar->rx_software_decryption) {
1184		ar->rx_software_decryption = true;
1185		carl9170_set_operating_mode(ar);
1186	}
1187	mutex_unlock(&ar->mutex);
1188	return -ENOSPC;
1189}
1190
1191static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1192			       struct ieee80211_vif *vif,
1193			       struct ieee80211_sta *sta)
1194{
1195	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1196	unsigned int i;
1197
1198	atomic_set(&sta_info->pending_frames, 0);
1199
1200	if (sta->ht_cap.ht_supported) {
1201		if (sta->ht_cap.ampdu_density > 6) {
1202			/*
1203			 * HW does support 16us AMPDU density.
1204			 * No HT-Xmit for station.
1205			 */
1206
1207			return 0;
1208		}
1209
1210		for (i = 0; i < CARL9170_NUM_TID; i++)
1211			rcu_assign_pointer(sta_info->agg[i], NULL);
1212
1213		sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1214		sta_info->ht_sta = true;
1215	}
1216
1217	return 0;
1218}
1219
1220static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1221				struct ieee80211_vif *vif,
1222				struct ieee80211_sta *sta)
1223{
1224	struct ar9170 *ar = hw->priv;
1225	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1226	unsigned int i;
1227	bool cleanup = false;
1228
1229	if (sta->ht_cap.ht_supported) {
1230
1231		sta_info->ht_sta = false;
1232
1233		rcu_read_lock();
1234		for (i = 0; i < CARL9170_NUM_TID; i++) {
1235			struct carl9170_sta_tid *tid_info;
1236
1237			tid_info = rcu_dereference(sta_info->agg[i]);
1238			rcu_assign_pointer(sta_info->agg[i], NULL);
1239
1240			if (!tid_info)
1241				continue;
1242
1243			spin_lock_bh(&ar->tx_ampdu_list_lock);
1244			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1245				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1246			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1247			cleanup = true;
1248		}
1249		rcu_read_unlock();
1250
1251		if (cleanup)
1252			carl9170_ampdu_gc(ar);
1253	}
1254
1255	return 0;
1256}
1257
1258static int carl9170_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
 
1259			       const struct ieee80211_tx_queue_params *param)
1260{
1261	struct ar9170 *ar = hw->priv;
1262	int ret;
1263
1264	mutex_lock(&ar->mutex);
1265	if (queue < ar->hw->queues) {
1266		memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1267		ret = carl9170_set_qos(ar);
1268	} else {
1269		ret = -EINVAL;
1270	}
1271
1272	mutex_unlock(&ar->mutex);
1273	return ret;
1274}
1275
1276static void carl9170_ampdu_work(struct work_struct *work)
1277{
1278	struct ar9170 *ar = container_of(work, struct ar9170,
1279					 ampdu_work);
1280
1281	if (!IS_STARTED(ar))
1282		return;
1283
1284	mutex_lock(&ar->mutex);
1285	carl9170_ampdu_gc(ar);
1286	mutex_unlock(&ar->mutex);
1287}
1288
1289static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1290				    struct ieee80211_vif *vif,
1291				    enum ieee80211_ampdu_mlme_action action,
1292				    struct ieee80211_sta *sta,
1293				    u16 tid, u16 *ssn, u8 buf_size)
1294{
 
 
 
 
1295	struct ar9170 *ar = hw->priv;
1296	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1297	struct carl9170_sta_tid *tid_info;
1298
1299	if (modparam_noht)
1300		return -EOPNOTSUPP;
1301
1302	switch (action) {
1303	case IEEE80211_AMPDU_TX_START:
1304		if (!sta_info->ht_sta)
1305			return -EOPNOTSUPP;
1306
1307		rcu_read_lock();
1308		if (rcu_dereference(sta_info->agg[tid])) {
1309			rcu_read_unlock();
1310			return -EBUSY;
1311		}
1312
1313		tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1314				   GFP_ATOMIC);
1315		if (!tid_info) {
1316			rcu_read_unlock();
1317			return -ENOMEM;
1318		}
1319
1320		tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1321		tid_info->state = CARL9170_TID_STATE_PROGRESS;
1322		tid_info->tid = tid;
1323		tid_info->max = sta_info->ampdu_max_len;
 
 
1324
1325		INIT_LIST_HEAD(&tid_info->list);
1326		INIT_LIST_HEAD(&tid_info->tmp_list);
1327		skb_queue_head_init(&tid_info->queue);
1328		spin_lock_init(&tid_info->lock);
1329
1330		spin_lock_bh(&ar->tx_ampdu_list_lock);
1331		ar->tx_ampdu_list_len++;
1332		list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1333		rcu_assign_pointer(sta_info->agg[tid], tid_info);
1334		spin_unlock_bh(&ar->tx_ampdu_list_lock);
1335		rcu_read_unlock();
1336
1337		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1338		break;
1339
1340	case IEEE80211_AMPDU_TX_STOP:
 
 
1341		rcu_read_lock();
1342		tid_info = rcu_dereference(sta_info->agg[tid]);
1343		if (tid_info) {
1344			spin_lock_bh(&ar->tx_ampdu_list_lock);
1345			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1346				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1347			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1348		}
1349
1350		rcu_assign_pointer(sta_info->agg[tid], NULL);
1351		rcu_read_unlock();
1352
1353		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1354		ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1355		break;
1356
1357	case IEEE80211_AMPDU_TX_OPERATIONAL:
1358		rcu_read_lock();
1359		tid_info = rcu_dereference(sta_info->agg[tid]);
1360
1361		sta_info->stats[tid].clear = true;
1362		sta_info->stats[tid].req = false;
1363
1364		if (tid_info) {
1365			bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1366			tid_info->state = CARL9170_TID_STATE_IDLE;
1367		}
1368		rcu_read_unlock();
1369
1370		if (WARN_ON_ONCE(!tid_info))
1371			return -EFAULT;
1372
1373		break;
1374
1375	case IEEE80211_AMPDU_RX_START:
1376	case IEEE80211_AMPDU_RX_STOP:
1377		/* Handled by hardware */
1378		break;
1379
1380	default:
1381		return -EOPNOTSUPP;
1382	}
1383
1384	return 0;
1385}
1386
1387#ifdef CONFIG_CARL9170_WPC
1388static int carl9170_register_wps_button(struct ar9170 *ar)
1389{
1390	struct input_dev *input;
1391	int err;
1392
1393	if (!(ar->features & CARL9170_WPS_BUTTON))
1394		return 0;
1395
1396	input = input_allocate_device();
1397	if (!input)
1398		return -ENOMEM;
1399
1400	snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1401		 wiphy_name(ar->hw->wiphy));
1402
1403	snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1404		 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1405
1406	input->name = ar->wps.name;
1407	input->phys = ar->wps.phys;
1408	input->id.bustype = BUS_USB;
1409	input->dev.parent = &ar->hw->wiphy->dev;
1410
1411	input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1412
1413	err = input_register_device(input);
1414	if (err) {
1415		input_free_device(input);
1416		return err;
1417	}
1418
1419	ar->wps.pbc = input;
1420	return 0;
1421}
1422#endif /* CONFIG_CARL9170_WPC */
1423
1424static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1425				struct survey_info *survey)
1426{
1427	struct ar9170 *ar = hw->priv;
 
 
 
 
 
 
 
 
 
1428	int err;
1429
1430	if (idx != 0)
1431		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1432
1433	mutex_lock(&ar->mutex);
1434	err = carl9170_get_noisefloor(ar);
 
 
 
 
 
 
 
 
1435	mutex_unlock(&ar->mutex);
1436	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1437		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1438
1439	survey->channel = ar->channel;
 
 
 
1440	survey->filled = SURVEY_INFO_NOISE_DBM;
1441	survey->noise = ar->noise[0];
 
 
 
 
 
 
 
 
 
1442	return 0;
1443}
1444
1445static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
 
 
1446{
1447	struct ar9170 *ar = hw->priv;
1448	unsigned int vid;
1449
1450	mutex_lock(&ar->mutex);
1451	for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1452		carl9170_flush_cab(ar, vid);
1453
1454	carl9170_flush(ar, drop);
1455	mutex_unlock(&ar->mutex);
1456}
1457
1458static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1459				 struct ieee80211_low_level_stats *stats)
1460{
1461	struct ar9170 *ar = hw->priv;
1462
1463	memset(stats, 0, sizeof(*stats));
1464	stats->dot11ACKFailureCount = ar->tx_ack_failures;
1465	stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1466	return 0;
1467}
1468
1469static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1470				   struct ieee80211_vif *vif,
1471				   enum sta_notify_cmd cmd,
1472				   struct ieee80211_sta *sta)
1473{
1474	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1475
1476	switch (cmd) {
1477	case STA_NOTIFY_SLEEP:
1478		sta_info->sleeping = true;
1479		if (atomic_read(&sta_info->pending_frames))
1480			ieee80211_sta_block_awake(hw, sta, true);
1481		break;
1482
1483	case STA_NOTIFY_AWAKE:
1484		sta_info->sleeping = false;
1485		break;
1486	}
1487}
1488
1489static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1490{
1491	struct ar9170 *ar = hw->priv;
1492
1493	return !!atomic_read(&ar->tx_total_queued);
1494}
1495
1496static const struct ieee80211_ops carl9170_ops = {
1497	.start			= carl9170_op_start,
1498	.stop			= carl9170_op_stop,
1499	.tx			= carl9170_op_tx,
1500	.flush			= carl9170_op_flush,
1501	.add_interface		= carl9170_op_add_interface,
1502	.remove_interface	= carl9170_op_remove_interface,
1503	.config			= carl9170_op_config,
1504	.prepare_multicast	= carl9170_op_prepare_multicast,
1505	.configure_filter	= carl9170_op_configure_filter,
1506	.conf_tx		= carl9170_op_conf_tx,
1507	.bss_info_changed	= carl9170_op_bss_info_changed,
1508	.get_tsf		= carl9170_op_get_tsf,
1509	.set_key		= carl9170_op_set_key,
1510	.sta_add		= carl9170_op_sta_add,
1511	.sta_remove		= carl9170_op_sta_remove,
1512	.sta_notify		= carl9170_op_sta_notify,
1513	.get_survey		= carl9170_op_get_survey,
1514	.get_stats		= carl9170_op_get_stats,
1515	.ampdu_action		= carl9170_op_ampdu_action,
1516	.tx_frames_pending	= carl9170_tx_frames_pending,
1517};
1518
1519void *carl9170_alloc(size_t priv_size)
1520{
1521	struct ieee80211_hw *hw;
1522	struct ar9170 *ar;
1523	struct sk_buff *skb;
1524	int i;
1525
1526	/*
1527	 * this buffer is used for rx stream reconstruction.
1528	 * Under heavy load this device (or the transport layer?)
1529	 * tends to split the streams into separate rx descriptors.
1530	 */
1531
1532	skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1533	if (!skb)
1534		goto err_nomem;
1535
1536	hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1537	if (!hw)
1538		goto err_nomem;
1539
1540	ar = hw->priv;
1541	ar->hw = hw;
1542	ar->rx_failover = skb;
1543
1544	memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1545	ar->rx_has_plcp = false;
1546
1547	/*
1548	 * Here's a hidden pitfall!
1549	 *
1550	 * All 4 AC queues work perfectly well under _legacy_ operation.
1551	 * However as soon as aggregation is enabled, the traffic flow
1552	 * gets very bumpy. Therefore we have to _switch_ to a
1553	 * software AC with a single HW queue.
1554	 */
1555	hw->queues = __AR9170_NUM_TXQ;
1556
1557	mutex_init(&ar->mutex);
1558	spin_lock_init(&ar->beacon_lock);
1559	spin_lock_init(&ar->cmd_lock);
1560	spin_lock_init(&ar->tx_stats_lock);
1561	spin_lock_init(&ar->tx_ampdu_list_lock);
1562	spin_lock_init(&ar->mem_lock);
1563	spin_lock_init(&ar->state_lock);
1564	atomic_set(&ar->pending_restarts, 0);
1565	ar->vifs = 0;
1566	for (i = 0; i < ar->hw->queues; i++) {
1567		skb_queue_head_init(&ar->tx_status[i]);
1568		skb_queue_head_init(&ar->tx_pending[i]);
 
 
 
1569	}
1570	INIT_WORK(&ar->ps_work, carl9170_ps_work);
1571	INIT_WORK(&ar->ping_work, carl9170_ping_work);
1572	INIT_WORK(&ar->restart_work, carl9170_restart_work);
1573	INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
 
1574	INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1575	INIT_LIST_HEAD(&ar->tx_ampdu_list);
1576	rcu_assign_pointer(ar->tx_ampdu_iter,
1577			   (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1578
1579	bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1580	INIT_LIST_HEAD(&ar->vif_list);
1581	init_completion(&ar->tx_flush);
1582
1583	/* firmware decides which modes we support */
1584	hw->wiphy->interface_modes = 0;
1585
1586	hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1587		     IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1588		     IEEE80211_HW_SUPPORTS_PS |
1589		     IEEE80211_HW_PS_NULLFUNC_STACK |
1590		     IEEE80211_HW_NEED_DTIM_PERIOD |
1591		     IEEE80211_HW_SIGNAL_DBM;
 
 
 
1592
1593	if (!modparam_noht) {
1594		/*
1595		 * see the comment above, why we allow the user
1596		 * to disable HT by a module parameter.
1597		 */
1598		hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1599	}
1600
1601	hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1602	hw->sta_data_size = sizeof(struct carl9170_sta_info);
1603	hw->vif_data_size = sizeof(struct carl9170_vif_info);
1604
1605	hw->max_rates = CARL9170_TX_MAX_RATES;
1606	hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1607
1608	for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1609		ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1610
1611	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1612	return ar;
1613
1614err_nomem:
1615	kfree_skb(skb);
1616	return ERR_PTR(-ENOMEM);
1617}
1618
1619static int carl9170_read_eeprom(struct ar9170 *ar)
1620{
1621#define RW	8	/* number of words to read at once */
1622#define RB	(sizeof(u32) * RW)
1623	u8 *eeprom = (void *)&ar->eeprom;
1624	__le32 offsets[RW];
1625	int i, j, err;
1626
1627	BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1628
1629	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1630#ifndef __CHECKER__
1631	/* don't want to handle trailing remains */
1632	BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1633#endif
1634
1635	for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1636		for (j = 0; j < RW; j++)
1637			offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1638						 RB * i + 4 * j);
1639
1640		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1641					RB, (u8 *) &offsets,
1642					RB, eeprom + RB * i);
1643		if (err)
1644			return err;
1645	}
1646
1647#undef RW
1648#undef RB
1649	return 0;
1650}
1651
1652static int carl9170_parse_eeprom(struct ar9170 *ar)
1653{
1654	struct ath_regulatory *regulatory = &ar->common.regulatory;
1655	unsigned int rx_streams, tx_streams, tx_params = 0;
1656	int bands = 0;
 
1657
1658	if (ar->eeprom.length == cpu_to_le16(0xffff))
1659		return -ENODATA;
1660
1661	rx_streams = hweight8(ar->eeprom.rx_mask);
1662	tx_streams = hweight8(ar->eeprom.tx_mask);
1663
1664	if (rx_streams != tx_streams) {
1665		tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1666
1667		WARN_ON(!(tx_streams >= 1 && tx_streams <=
1668			IEEE80211_HT_MCS_TX_MAX_STREAMS));
1669
1670		tx_params = (tx_streams - 1) <<
1671			    IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1672
1673		carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1674		carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1675	}
1676
1677	if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1678		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1679			&carl9170_band_2GHz;
 
1680		bands++;
1681	}
1682	if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1683		ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1684			&carl9170_band_5GHz;
 
1685		bands++;
1686	}
1687
1688	/*
1689	 * I measured this, a bandswitch takes roughly
1690	 * 135 ms and a frequency switch about 80.
1691	 *
1692	 * FIXME: measure these values again once EEPROM settings
1693	 *	  are used, that will influence them!
1694	 */
1695	if (bands == 2)
1696		ar->hw->channel_change_time = 135 * 1000;
1697	else
1698		ar->hw->channel_change_time = 80 * 1000;
1699
1700	regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1701	regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
1702
1703	/* second part of wiphy init */
1704	SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1705
1706	return bands ? 0 : -EINVAL;
1707}
1708
1709static int carl9170_reg_notifier(struct wiphy *wiphy,
1710				 struct regulatory_request *request)
1711{
1712	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1713	struct ar9170 *ar = hw->priv;
1714
1715	return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1716}
1717
1718int carl9170_register(struct ar9170 *ar)
1719{
1720	struct ath_regulatory *regulatory = &ar->common.regulatory;
1721	int err = 0, i;
1722
1723	if (WARN_ON(ar->mem_bitmap))
1724		return -EINVAL;
1725
1726	ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1727				 sizeof(unsigned long), GFP_KERNEL);
1728
1729	if (!ar->mem_bitmap)
1730		return -ENOMEM;
1731
1732	/* try to read EEPROM, init MAC addr */
1733	err = carl9170_read_eeprom(ar);
1734	if (err)
1735		return err;
1736
1737	err = carl9170_fw_fix_eeprom(ar);
1738	if (err)
1739		return err;
1740
1741	err = carl9170_parse_eeprom(ar);
1742	if (err)
1743		return err;
1744
1745	err = ath_regd_init(regulatory, ar->hw->wiphy,
1746			    carl9170_reg_notifier);
1747	if (err)
1748		return err;
1749
1750	if (modparam_noht) {
1751		carl9170_band_2GHz.ht_cap.ht_supported = false;
1752		carl9170_band_5GHz.ht_cap.ht_supported = false;
1753	}
1754
1755	for (i = 0; i < ar->fw.vif_num; i++) {
1756		ar->vif_priv[i].id = i;
1757		ar->vif_priv[i].vif = NULL;
1758	}
1759
1760	err = ieee80211_register_hw(ar->hw);
1761	if (err)
1762		return err;
1763
1764	/* mac80211 interface is now registered */
1765	ar->registered = true;
1766
1767	if (!ath_is_world_regd(regulatory))
1768		regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1769
1770#ifdef CONFIG_CARL9170_DEBUGFS
1771	carl9170_debugfs_register(ar);
1772#endif /* CONFIG_CARL9170_DEBUGFS */
1773
1774	err = carl9170_led_init(ar);
1775	if (err)
1776		goto err_unreg;
1777
1778#ifdef CONFIG_CARL9170_LEDS
1779	err = carl9170_led_register(ar);
1780	if (err)
1781		goto err_unreg;
1782#endif /* CONFIG_CARL9170_LEDS */
1783
1784#ifdef CONFIG_CARL9170_WPC
1785	err = carl9170_register_wps_button(ar);
1786	if (err)
1787		goto err_unreg;
1788#endif /* CONFIG_CARL9170_WPC */
1789
 
 
 
 
 
 
1790	dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
1791		 wiphy_name(ar->hw->wiphy));
1792
1793	return 0;
1794
1795err_unreg:
1796	carl9170_unregister(ar);
1797	return err;
1798}
1799
1800void carl9170_unregister(struct ar9170 *ar)
1801{
1802	if (!ar->registered)
1803		return;
1804
1805	ar->registered = false;
1806
1807#ifdef CONFIG_CARL9170_LEDS
1808	carl9170_led_unregister(ar);
1809#endif /* CONFIG_CARL9170_LEDS */
1810
1811#ifdef CONFIG_CARL9170_DEBUGFS
1812	carl9170_debugfs_unregister(ar);
1813#endif /* CONFIG_CARL9170_DEBUGFS */
1814
1815#ifdef CONFIG_CARL9170_WPC
1816	if (ar->wps.pbc) {
1817		input_unregister_device(ar->wps.pbc);
1818		ar->wps.pbc = NULL;
1819	}
1820#endif /* CONFIG_CARL9170_WPC */
1821
 
 
 
 
1822	carl9170_cancel_worker(ar);
1823	cancel_work_sync(&ar->restart_work);
1824
1825	ieee80211_unregister_hw(ar->hw);
1826}
1827
1828void carl9170_free(struct ar9170 *ar)
1829{
1830	WARN_ON(ar->registered);
1831	WARN_ON(IS_INITIALIZED(ar));
1832
1833	kfree_skb(ar->rx_failover);
1834	ar->rx_failover = NULL;
1835
1836	kfree(ar->mem_bitmap);
1837	ar->mem_bitmap = NULL;
 
 
 
1838
1839	mutex_destroy(&ar->mutex);
1840
1841	ieee80211_free_hw(ar->hw);
1842}
v4.6
   1/*
   2 * Atheros CARL9170 driver
   3 *
   4 * mac80211 interaction code
   5 *
   6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
   7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; see the file COPYING.  If not, see
  21 * http://www.gnu.org/licenses/.
  22 *
  23 * This file incorporates work covered by the following copyright and
  24 * permission notice:
  25 *    Copyright (c) 2007-2008 Atheros Communications, Inc.
  26 *
  27 *    Permission to use, copy, modify, and/or distribute this software for any
  28 *    purpose with or without fee is hereby granted, provided that the above
  29 *    copyright notice and this permission notice appear in all copies.
  30 *
  31 *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  32 *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  33 *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  34 *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  35 *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  36 *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  37 *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  38 */
  39
 
  40#include <linux/slab.h>
  41#include <linux/module.h>
  42#include <linux/etherdevice.h>
  43#include <linux/random.h>
  44#include <net/mac80211.h>
  45#include <net/cfg80211.h>
  46#include "hw.h"
  47#include "carl9170.h"
  48#include "cmd.h"
  49
  50static bool modparam_nohwcrypt;
  51module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
  52MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
  53
  54int modparam_noht;
  55module_param_named(noht, modparam_noht, int, S_IRUGO);
  56MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
  57
  58#define RATE(_bitrate, _hw_rate, _txpidx, _flags) {	\
  59	.bitrate	= (_bitrate),			\
  60	.flags		= (_flags),			\
  61	.hw_value	= (_hw_rate) | (_txpidx) << 4,	\
  62}
  63
  64struct ieee80211_rate __carl9170_ratetable[] = {
  65	RATE(10, 0, 0, 0),
  66	RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
  67	RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
  68	RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
  69	RATE(60, 0xb, 0, 0),
  70	RATE(90, 0xf, 0, 0),
  71	RATE(120, 0xa, 0, 0),
  72	RATE(180, 0xe, 0, 0),
  73	RATE(240, 0x9, 0, 0),
  74	RATE(360, 0xd, 1, 0),
  75	RATE(480, 0x8, 2, 0),
  76	RATE(540, 0xc, 3, 0),
  77};
  78#undef RATE
  79
  80#define carl9170_g_ratetable	(__carl9170_ratetable + 0)
  81#define carl9170_g_ratetable_size	12
  82#define carl9170_a_ratetable	(__carl9170_ratetable + 4)
  83#define carl9170_a_ratetable_size	8
  84
  85/*
  86 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
  87 *     array in phy.c so that we don't have to do frequency lookups!
  88 */
  89#define CHAN(_freq, _idx) {		\
  90	.center_freq	= (_freq),	\
  91	.hw_value	= (_idx),	\
  92	.max_power	= 18, /* XXX */	\
  93}
  94
  95static struct ieee80211_channel carl9170_2ghz_chantable[] = {
  96	CHAN(2412,  0),
  97	CHAN(2417,  1),
  98	CHAN(2422,  2),
  99	CHAN(2427,  3),
 100	CHAN(2432,  4),
 101	CHAN(2437,  5),
 102	CHAN(2442,  6),
 103	CHAN(2447,  7),
 104	CHAN(2452,  8),
 105	CHAN(2457,  9),
 106	CHAN(2462, 10),
 107	CHAN(2467, 11),
 108	CHAN(2472, 12),
 109	CHAN(2484, 13),
 110};
 111
 112static struct ieee80211_channel carl9170_5ghz_chantable[] = {
 113	CHAN(4920, 14),
 114	CHAN(4940, 15),
 115	CHAN(4960, 16),
 116	CHAN(4980, 17),
 117	CHAN(5040, 18),
 118	CHAN(5060, 19),
 119	CHAN(5080, 20),
 120	CHAN(5180, 21),
 121	CHAN(5200, 22),
 122	CHAN(5220, 23),
 123	CHAN(5240, 24),
 124	CHAN(5260, 25),
 125	CHAN(5280, 26),
 126	CHAN(5300, 27),
 127	CHAN(5320, 28),
 128	CHAN(5500, 29),
 129	CHAN(5520, 30),
 130	CHAN(5540, 31),
 131	CHAN(5560, 32),
 132	CHAN(5580, 33),
 133	CHAN(5600, 34),
 134	CHAN(5620, 35),
 135	CHAN(5640, 36),
 136	CHAN(5660, 37),
 137	CHAN(5680, 38),
 138	CHAN(5700, 39),
 139	CHAN(5745, 40),
 140	CHAN(5765, 41),
 141	CHAN(5785, 42),
 142	CHAN(5805, 43),
 143	CHAN(5825, 44),
 144	CHAN(5170, 45),
 145	CHAN(5190, 46),
 146	CHAN(5210, 47),
 147	CHAN(5230, 48),
 148};
 149#undef CHAN
 150
 151#define CARL9170_HT_CAP							\
 152{									\
 153	.ht_supported	= true,						\
 154	.cap		= IEEE80211_HT_CAP_MAX_AMSDU |			\
 155			  IEEE80211_HT_CAP_SUP_WIDTH_20_40 |		\
 156			  IEEE80211_HT_CAP_SGI_40 |			\
 157			  IEEE80211_HT_CAP_DSSSCCK40 |			\
 158			  IEEE80211_HT_CAP_SM_PS,			\
 159	.ampdu_factor	= IEEE80211_HT_MAX_AMPDU_64K,			\
 160	.ampdu_density	= IEEE80211_HT_MPDU_DENSITY_8,			\
 161	.mcs		= {						\
 162		.rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, },	\
 163		.rx_highest = cpu_to_le16(300),				\
 164		.tx_params = IEEE80211_HT_MCS_TX_DEFINED,		\
 165	},								\
 166}
 167
 168static struct ieee80211_supported_band carl9170_band_2GHz = {
 169	.channels	= carl9170_2ghz_chantable,
 170	.n_channels	= ARRAY_SIZE(carl9170_2ghz_chantable),
 171	.bitrates	= carl9170_g_ratetable,
 172	.n_bitrates	= carl9170_g_ratetable_size,
 173	.ht_cap		= CARL9170_HT_CAP,
 174};
 175
 176static struct ieee80211_supported_band carl9170_band_5GHz = {
 177	.channels	= carl9170_5ghz_chantable,
 178	.n_channels	= ARRAY_SIZE(carl9170_5ghz_chantable),
 179	.bitrates	= carl9170_a_ratetable,
 180	.n_bitrates	= carl9170_a_ratetable_size,
 181	.ht_cap		= CARL9170_HT_CAP,
 182};
 183
 184static void carl9170_ampdu_gc(struct ar9170 *ar)
 185{
 186	struct carl9170_sta_tid *tid_info;
 187	LIST_HEAD(tid_gc);
 188
 189	rcu_read_lock();
 190	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
 191		spin_lock_bh(&ar->tx_ampdu_list_lock);
 192		if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
 193			tid_info->state = CARL9170_TID_STATE_KILLED;
 194			list_del_rcu(&tid_info->list);
 195			ar->tx_ampdu_list_len--;
 196			list_add_tail(&tid_info->tmp_list, &tid_gc);
 197		}
 198		spin_unlock_bh(&ar->tx_ampdu_list_lock);
 199
 200	}
 201	rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
 202	rcu_read_unlock();
 203
 204	synchronize_rcu();
 205
 206	while (!list_empty(&tid_gc)) {
 207		struct sk_buff *skb;
 208		tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
 209					    tmp_list);
 210
 211		while ((skb = __skb_dequeue(&tid_info->queue)))
 212			carl9170_tx_status(ar, skb, false);
 213
 214		list_del_init(&tid_info->tmp_list);
 215		kfree(tid_info);
 216	}
 217}
 218
 219static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
 220{
 221	if (drop_queued) {
 222		int i;
 223
 224		/*
 225		 * We can only drop frames which have not been uploaded
 226		 * to the device yet.
 227		 */
 228
 229		for (i = 0; i < ar->hw->queues; i++) {
 230			struct sk_buff *skb;
 231
 232			while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
 233				struct ieee80211_tx_info *info;
 234
 235				info = IEEE80211_SKB_CB(skb);
 236				if (info->flags & IEEE80211_TX_CTL_AMPDU)
 237					atomic_dec(&ar->tx_ampdu_upload);
 238
 239				carl9170_tx_status(ar, skb, false);
 240			}
 241		}
 242	}
 243
 244	/* Wait for all other outstanding frames to timeout. */
 245	if (atomic_read(&ar->tx_total_queued))
 246		WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
 247}
 248
 249static void carl9170_flush_ba(struct ar9170 *ar)
 250{
 251	struct sk_buff_head free;
 252	struct carl9170_sta_tid *tid_info;
 253	struct sk_buff *skb;
 254
 255	__skb_queue_head_init(&free);
 256
 257	rcu_read_lock();
 258	spin_lock_bh(&ar->tx_ampdu_list_lock);
 259	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
 260		if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
 261			tid_info->state = CARL9170_TID_STATE_SUSPEND;
 262
 263			spin_lock(&tid_info->lock);
 264			while ((skb = __skb_dequeue(&tid_info->queue)))
 265				__skb_queue_tail(&free, skb);
 266			spin_unlock(&tid_info->lock);
 267		}
 268	}
 269	spin_unlock_bh(&ar->tx_ampdu_list_lock);
 270	rcu_read_unlock();
 271
 272	while ((skb = __skb_dequeue(&free)))
 273		carl9170_tx_status(ar, skb, false);
 274}
 275
 276static void carl9170_zap_queues(struct ar9170 *ar)
 277{
 278	struct carl9170_vif_info *cvif;
 279	unsigned int i;
 280
 281	carl9170_ampdu_gc(ar);
 282
 283	carl9170_flush_ba(ar);
 284	carl9170_flush(ar, true);
 285
 286	for (i = 0; i < ar->hw->queues; i++) {
 287		spin_lock_bh(&ar->tx_status[i].lock);
 288		while (!skb_queue_empty(&ar->tx_status[i])) {
 289			struct sk_buff *skb;
 290
 291			skb = skb_peek(&ar->tx_status[i]);
 292			carl9170_tx_get_skb(skb);
 293			spin_unlock_bh(&ar->tx_status[i].lock);
 294			carl9170_tx_drop(ar, skb);
 295			spin_lock_bh(&ar->tx_status[i].lock);
 296			carl9170_tx_put_skb(skb);
 297		}
 298		spin_unlock_bh(&ar->tx_status[i].lock);
 299	}
 300
 301	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
 302	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
 303	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
 304
 305	/* reinitialize queues statistics */
 306	memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
 307	for (i = 0; i < ar->hw->queues; i++)
 308		ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
 309
 310	for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
 311		ar->mem_bitmap[i] = 0;
 312
 313	rcu_read_lock();
 314	list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
 315		spin_lock_bh(&ar->beacon_lock);
 316		dev_kfree_skb_any(cvif->beacon);
 317		cvif->beacon = NULL;
 318		spin_unlock_bh(&ar->beacon_lock);
 319	}
 320	rcu_read_unlock();
 321
 322	atomic_set(&ar->tx_ampdu_upload, 0);
 323	atomic_set(&ar->tx_ampdu_scheduler, 0);
 324	atomic_set(&ar->tx_total_pending, 0);
 325	atomic_set(&ar->tx_total_queued, 0);
 326	atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
 327}
 328
 329#define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop)		\
 330do {									\
 331	queue.aifs = ai_fs;						\
 332	queue.cw_min = cwmin;						\
 333	queue.cw_max = cwmax;						\
 334	queue.txop = _txop;						\
 335} while (0)
 336
 337static int carl9170_op_start(struct ieee80211_hw *hw)
 338{
 339	struct ar9170 *ar = hw->priv;
 340	int err, i;
 341
 342	mutex_lock(&ar->mutex);
 343
 344	carl9170_zap_queues(ar);
 345
 346	/* reset QoS defaults */
 347	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3,     7, 47);
 348	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7,    15, 94);
 349	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023,  0);
 350	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023,  0);
 351	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
 352
 353	ar->current_factor = ar->current_density = -1;
 354	/* "The first key is unique." */
 355	ar->usedkeys = 1;
 356	ar->filter_state = 0;
 357	ar->ps.last_action = jiffies;
 358	ar->ps.last_slept = jiffies;
 359	ar->erp_mode = CARL9170_ERP_AUTO;
 360
 361	/* Set "disable hw crypto offload" whenever the module parameter
 362	 * nohwcrypt is true or if the firmware does not support it.
 363	 */
 364	ar->disable_offload = modparam_nohwcrypt |
 365		ar->fw.disable_offload_fw;
 366	ar->rx_software_decryption = ar->disable_offload;
 367
 368	for (i = 0; i < ar->hw->queues; i++) {
 369		ar->queue_stop_timeout[i] = jiffies;
 370		ar->max_queue_stop_timeout[i] = 0;
 371	}
 372
 373	atomic_set(&ar->mem_allocs, 0);
 374
 375	err = carl9170_usb_open(ar);
 376	if (err)
 377		goto out;
 378
 379	err = carl9170_init_mac(ar);
 380	if (err)
 381		goto out;
 382
 383	err = carl9170_set_qos(ar);
 384	if (err)
 385		goto out;
 386
 387	if (ar->fw.rx_filter) {
 388		err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
 389			CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
 390		if (err)
 391			goto out;
 392	}
 393
 394	err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
 395				 AR9170_DMA_TRIGGER_RXQ);
 396	if (err)
 397		goto out;
 398
 399	/* Clear key-cache */
 400	for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
 401		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
 402					  0, NULL, 0);
 403		if (err)
 404			goto out;
 405
 406		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
 407					  1, NULL, 0);
 408		if (err)
 409			goto out;
 410
 411		if (i < AR9170_CAM_MAX_USER) {
 412			err = carl9170_disable_key(ar, i);
 413			if (err)
 414				goto out;
 415		}
 416	}
 417
 418	carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
 419
 420	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
 421		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
 422
 423	ieee80211_wake_queues(ar->hw);
 424	err = 0;
 425
 426out:
 427	mutex_unlock(&ar->mutex);
 428	return err;
 429}
 430
 431static void carl9170_cancel_worker(struct ar9170 *ar)
 432{
 433	cancel_delayed_work_sync(&ar->stat_work);
 434	cancel_delayed_work_sync(&ar->tx_janitor);
 435#ifdef CONFIG_CARL9170_LEDS
 436	cancel_delayed_work_sync(&ar->led_work);
 437#endif /* CONFIG_CARL9170_LEDS */
 438	cancel_work_sync(&ar->ps_work);
 439	cancel_work_sync(&ar->ping_work);
 440	cancel_work_sync(&ar->ampdu_work);
 441}
 442
 443static void carl9170_op_stop(struct ieee80211_hw *hw)
 444{
 445	struct ar9170 *ar = hw->priv;
 446
 447	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
 448
 449	ieee80211_stop_queues(ar->hw);
 450
 451	mutex_lock(&ar->mutex);
 452	if (IS_ACCEPTING_CMD(ar)) {
 453		RCU_INIT_POINTER(ar->beacon_iter, NULL);
 454
 455		carl9170_led_set_state(ar, 0);
 456
 457		/* stop DMA */
 458		carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
 459		carl9170_usb_stop(ar);
 460	}
 461
 462	carl9170_zap_queues(ar);
 463	mutex_unlock(&ar->mutex);
 464
 465	carl9170_cancel_worker(ar);
 466}
 467
 468static void carl9170_restart_work(struct work_struct *work)
 469{
 470	struct ar9170 *ar = container_of(work, struct ar9170,
 471					 restart_work);
 472	int err = -EIO;
 473
 474	ar->usedkeys = 0;
 475	ar->filter_state = 0;
 476	carl9170_cancel_worker(ar);
 477
 478	mutex_lock(&ar->mutex);
 479	if (!ar->force_usb_reset) {
 480		err = carl9170_usb_restart(ar);
 481		if (net_ratelimit()) {
 482			if (err)
 483				dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
 484			else
 485				dev_info(&ar->udev->dev, "device restarted successfully.\n");
 
 486		}
 487	}
 
 488	carl9170_zap_queues(ar);
 489	mutex_unlock(&ar->mutex);
 490
 491	if (!err && !ar->force_usb_reset) {
 492		ar->restart_counter++;
 493		atomic_set(&ar->pending_restarts, 0);
 494
 495		ieee80211_restart_hw(ar->hw);
 496	} else {
 497		/*
 498		 * The reset was unsuccessful and the device seems to
 499		 * be dead. But there's still one option: a low-level
 500		 * usb subsystem reset...
 501		 */
 502
 503		carl9170_usb_reset(ar);
 504	}
 505}
 506
 507void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
 508{
 509	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
 510
 511	/*
 512	 * Sometimes, an error can trigger several different reset events.
 513	 * By ignoring these *surplus* reset events, the device won't be
 514	 * killed again, right after it has recovered.
 515	 */
 516	if (atomic_inc_return(&ar->pending_restarts) > 1) {
 517		dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
 518		return;
 519	}
 520
 521	ieee80211_stop_queues(ar->hw);
 522
 523	dev_err(&ar->udev->dev, "restart device (%d)\n", r);
 524
 525	if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
 526	    !WARN_ON(r >= __CARL9170_RR_LAST))
 527		ar->last_reason = r;
 528
 529	if (!ar->registered)
 530		return;
 531
 532	if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
 533		ar->force_usb_reset = true;
 534
 535	ieee80211_queue_work(ar->hw, &ar->restart_work);
 536
 537	/*
 538	 * At this point, the device instance might have vanished/disabled.
 539	 * So, don't put any code which access the ar9170 struct
 540	 * without proper protection.
 541	 */
 542}
 543
 544static void carl9170_ping_work(struct work_struct *work)
 545{
 546	struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
 547	int err;
 548
 549	if (!IS_STARTED(ar))
 550		return;
 551
 552	mutex_lock(&ar->mutex);
 553	err = carl9170_echo_test(ar, 0xdeadbeef);
 554	if (err)
 555		carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
 556	mutex_unlock(&ar->mutex);
 557}
 558
 559static int carl9170_init_interface(struct ar9170 *ar,
 560				   struct ieee80211_vif *vif)
 561{
 562	struct ath_common *common = &ar->common;
 563	int err;
 564
 565	if (!vif) {
 566		WARN_ON_ONCE(IS_STARTED(ar));
 567		return 0;
 568	}
 569
 570	memcpy(common->macaddr, vif->addr, ETH_ALEN);
 571
 572	/* We have to fall back to software crypto, whenever
 573	 * the user choose to participates in an IBSS. HW
 574	 * offload for IBSS RSN is not supported by this driver.
 575	 *
 576	 * NOTE: If the previous main interface has already
 577	 * disabled hw crypto offload, we have to keep this
 578	 * previous disable_offload setting as it was.
 579	 * Altough ideally, we should notify mac80211 and tell
 580	 * it to forget about any HW crypto offload for now.
 581	 */
 582	ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
 583	    (vif->type != NL80211_IFTYPE_AP));
 584
 585	/* While the driver supports HW offload in a single
 586	 * P2P client configuration, it doesn't support HW
 587	 * offload in the favourit, concurrent P2P GO+CLIENT
 588	 * configuration. Hence, HW offload will always be
 589	 * disabled for P2P.
 590	 */
 591	ar->disable_offload |= vif->p2p;
 592
 593	ar->rx_software_decryption = ar->disable_offload;
 594
 595	err = carl9170_set_operating_mode(ar);
 596	return err;
 597}
 598
 599static int carl9170_op_add_interface(struct ieee80211_hw *hw,
 600				     struct ieee80211_vif *vif)
 601{
 602	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
 603	struct ieee80211_vif *main_vif, *old_main = NULL;
 604	struct ar9170 *ar = hw->priv;
 605	int vif_id = -1, err = 0;
 606
 607	mutex_lock(&ar->mutex);
 608	rcu_read_lock();
 609	if (vif_priv->active) {
 610		/*
 611		 * Skip the interface structure initialization,
 612		 * if the vif survived the _restart call.
 613		 */
 614		vif_id = vif_priv->id;
 615		vif_priv->enable_beacon = false;
 616
 617		spin_lock_bh(&ar->beacon_lock);
 618		dev_kfree_skb_any(vif_priv->beacon);
 619		vif_priv->beacon = NULL;
 620		spin_unlock_bh(&ar->beacon_lock);
 621
 622		goto init;
 623	}
 624
 625	/* Because the AR9170 HW's MAC doesn't provide full support for
 626	 * multiple, independent interfaces [of different operation modes].
 627	 * We have to select ONE main interface [main mode of HW], but we
 628	 * can have multiple slaves [AKA: entry in the ACK-table].
 629	 *
 630	 * The first (from HEAD/TOP) interface in the ar->vif_list is
 631	 * always the main intf. All following intfs in this list
 632	 * are considered to be slave intfs.
 633	 */
 634	main_vif = carl9170_get_main_vif(ar);
 635
 636	if (main_vif) {
 637		switch (main_vif->type) {
 638		case NL80211_IFTYPE_STATION:
 639			if (vif->type == NL80211_IFTYPE_STATION)
 640				break;
 641
 642			/* P2P GO [master] use-case
 643			 * Because the P2P GO station is selected dynamically
 644			 * by all participating peers of a WIFI Direct network,
 645			 * the driver has be able to change the main interface
 646			 * operating mode on the fly.
 647			 */
 648			if (main_vif->p2p && vif->p2p &&
 649			    vif->type == NL80211_IFTYPE_AP) {
 650				old_main = main_vif;
 651				break;
 652			}
 653
 654			err = -EBUSY;
 655			rcu_read_unlock();
 656
 657			goto unlock;
 658
 659		case NL80211_IFTYPE_MESH_POINT:
 660		case NL80211_IFTYPE_AP:
 661			if ((vif->type == NL80211_IFTYPE_STATION) ||
 662			    (vif->type == NL80211_IFTYPE_WDS) ||
 663			    (vif->type == NL80211_IFTYPE_AP) ||
 664			    (vif->type == NL80211_IFTYPE_MESH_POINT))
 665				break;
 666
 667			err = -EBUSY;
 668			rcu_read_unlock();
 669			goto unlock;
 670
 671		default:
 672			rcu_read_unlock();
 673			goto unlock;
 674		}
 675	}
 676
 677	vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
 678
 679	if (vif_id < 0) {
 680		rcu_read_unlock();
 681
 682		err = -ENOSPC;
 683		goto unlock;
 684	}
 685
 686	BUG_ON(ar->vif_priv[vif_id].id != vif_id);
 687
 688	vif_priv->active = true;
 689	vif_priv->id = vif_id;
 690	vif_priv->enable_beacon = false;
 691	ar->vifs++;
 692	if (old_main) {
 693		/* We end up in here, if the main interface is being replaced.
 694		 * Put the new main interface at the HEAD of the list and the
 695		 * previous inteface will automatically become second in line.
 696		 */
 697		list_add_rcu(&vif_priv->list, &ar->vif_list);
 698	} else {
 699		/* Add new inteface. If the list is empty, it will become the
 700		 * main inteface, otherwise it will be slave.
 701		 */
 702		list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
 703	}
 704	rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
 705
 706init:
 707	main_vif = carl9170_get_main_vif(ar);
 708
 709	if (main_vif == vif) {
 710		rcu_assign_pointer(ar->beacon_iter, vif_priv);
 711		rcu_read_unlock();
 712
 713		if (old_main) {
 714			struct carl9170_vif_info *old_main_priv =
 715				(void *) old_main->drv_priv;
 716			/* downgrade old main intf to slave intf.
 717			 * NOTE: We are no longer under rcu_read_lock.
 718			 * But we are still holding ar->mutex, so the
 719			 * vif data [id, addr] is safe.
 720			 */
 721			err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
 722						       old_main->addr);
 723			if (err)
 724				goto unlock;
 725		}
 726
 727		err = carl9170_init_interface(ar, vif);
 728		if (err)
 729			goto unlock;
 730	} else {
 731		rcu_read_unlock();
 732		err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
 733
 734		if (err)
 735			goto unlock;
 736	}
 737
 738	if (ar->fw.tx_seq_table) {
 739		err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
 740					 0);
 741		if (err)
 742			goto unlock;
 743	}
 744
 745unlock:
 746	if (err && (vif_id >= 0)) {
 747		vif_priv->active = false;
 748		bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
 749		ar->vifs--;
 750		RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
 751		list_del_rcu(&vif_priv->list);
 752		mutex_unlock(&ar->mutex);
 753		synchronize_rcu();
 754	} else {
 755		if (ar->vifs > 1)
 756			ar->ps.off_override |= PS_OFF_VIF;
 757
 758		mutex_unlock(&ar->mutex);
 759	}
 760
 761	return err;
 762}
 763
 764static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
 765					 struct ieee80211_vif *vif)
 766{
 767	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
 768	struct ieee80211_vif *main_vif;
 769	struct ar9170 *ar = hw->priv;
 770	unsigned int id;
 771
 772	mutex_lock(&ar->mutex);
 773
 774	if (WARN_ON_ONCE(!vif_priv->active))
 775		goto unlock;
 776
 777	ar->vifs--;
 778
 779	rcu_read_lock();
 780	main_vif = carl9170_get_main_vif(ar);
 781
 782	id = vif_priv->id;
 783
 784	vif_priv->active = false;
 785	WARN_ON(vif_priv->enable_beacon);
 786	vif_priv->enable_beacon = false;
 787	list_del_rcu(&vif_priv->list);
 788	RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
 789
 790	if (vif == main_vif) {
 791		rcu_read_unlock();
 792
 793		if (ar->vifs) {
 794			WARN_ON(carl9170_init_interface(ar,
 795					carl9170_get_main_vif(ar)));
 796		} else {
 797			carl9170_set_operating_mode(ar);
 798		}
 799	} else {
 800		rcu_read_unlock();
 801
 802		WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
 803	}
 804
 805	carl9170_update_beacon(ar, false);
 806	carl9170_flush_cab(ar, id);
 807
 808	spin_lock_bh(&ar->beacon_lock);
 809	dev_kfree_skb_any(vif_priv->beacon);
 810	vif_priv->beacon = NULL;
 811	spin_unlock_bh(&ar->beacon_lock);
 812
 813	bitmap_release_region(&ar->vif_bitmap, id, 0);
 814
 815	carl9170_set_beacon_timers(ar);
 816
 817	if (ar->vifs == 1)
 818		ar->ps.off_override &= ~PS_OFF_VIF;
 819
 820unlock:
 821	mutex_unlock(&ar->mutex);
 822
 823	synchronize_rcu();
 824}
 825
 826void carl9170_ps_check(struct ar9170 *ar)
 827{
 828	ieee80211_queue_work(ar->hw, &ar->ps_work);
 829}
 830
 831/* caller must hold ar->mutex */
 832static int carl9170_ps_update(struct ar9170 *ar)
 833{
 834	bool ps = false;
 835	int err = 0;
 836
 837	if (!ar->ps.off_override)
 838		ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
 839
 840	if (ps != ar->ps.state) {
 841		err = carl9170_powersave(ar, ps);
 842		if (err)
 843			return err;
 844
 845		if (ar->ps.state && !ps) {
 846			ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
 847				ar->ps.last_action);
 848		}
 849
 850		if (ps)
 851			ar->ps.last_slept = jiffies;
 852
 853		ar->ps.last_action = jiffies;
 854		ar->ps.state = ps;
 855	}
 856
 857	return 0;
 858}
 859
 860static void carl9170_ps_work(struct work_struct *work)
 861{
 862	struct ar9170 *ar = container_of(work, struct ar9170,
 863					 ps_work);
 864	mutex_lock(&ar->mutex);
 865	if (IS_STARTED(ar))
 866		WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
 867	mutex_unlock(&ar->mutex);
 868}
 869
 870static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
 871{
 872	int err;
 873
 874	if (noise) {
 875		err = carl9170_get_noisefloor(ar);
 876		if (err)
 877			return err;
 878	}
 879
 880	if (ar->fw.hw_counters) {
 881		err = carl9170_collect_tally(ar);
 882		if (err)
 883			return err;
 884	}
 885
 886	if (flush)
 887		memset(&ar->tally, 0, sizeof(ar->tally));
 888
 889	return 0;
 890}
 891
 892static void carl9170_stat_work(struct work_struct *work)
 893{
 894	struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
 895	int err;
 896
 897	mutex_lock(&ar->mutex);
 898	err = carl9170_update_survey(ar, false, true);
 899	mutex_unlock(&ar->mutex);
 900
 901	if (err)
 902		return;
 903
 904	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
 905		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
 906}
 907
 908static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
 909{
 910	struct ar9170 *ar = hw->priv;
 911	int err = 0;
 912
 913	mutex_lock(&ar->mutex);
 914	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
 915		/* TODO */
 916		err = 0;
 917	}
 918
 919	if (changed & IEEE80211_CONF_CHANGE_PS) {
 920		err = carl9170_ps_update(ar);
 921		if (err)
 922			goto out;
 923	}
 924
 
 
 
 
 
 925	if (changed & IEEE80211_CONF_CHANGE_SMPS) {
 926		/* TODO */
 927		err = 0;
 928	}
 929
 930	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
 931		enum nl80211_channel_type channel_type =
 932			cfg80211_get_chandef_type(&hw->conf.chandef);
 933
 934		/* adjust slot time for 5 GHz */
 935		err = carl9170_set_slot_time(ar);
 936		if (err)
 937			goto out;
 938
 939		err = carl9170_update_survey(ar, true, false);
 940		if (err)
 941			goto out;
 942
 943		err = carl9170_set_channel(ar, hw->conf.chandef.chan,
 944					   channel_type);
 945		if (err)
 946			goto out;
 947
 948		err = carl9170_update_survey(ar, false, true);
 949		if (err)
 950			goto out;
 951
 952		err = carl9170_set_dyn_sifs_ack(ar);
 953		if (err)
 954			goto out;
 955
 956		err = carl9170_set_rts_cts_rate(ar);
 957		if (err)
 958			goto out;
 959	}
 960
 961	if (changed & IEEE80211_CONF_CHANGE_POWER) {
 962		err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
 963		if (err)
 964			goto out;
 965	}
 966
 967out:
 968	mutex_unlock(&ar->mutex);
 969	return err;
 970}
 971
 972static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
 973					 struct netdev_hw_addr_list *mc_list)
 974{
 975	struct netdev_hw_addr *ha;
 976	u64 mchash;
 977
 978	/* always get broadcast frames */
 979	mchash = 1ULL << (0xff >> 2);
 980
 981	netdev_hw_addr_list_for_each(ha, mc_list)
 982		mchash |= 1ULL << (ha->addr[5] >> 2);
 983
 984	return mchash;
 985}
 986
 987static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
 988					 unsigned int changed_flags,
 989					 unsigned int *new_flags,
 990					 u64 multicast)
 991{
 992	struct ar9170 *ar = hw->priv;
 993
 994	/* mask supported flags */
 995	*new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
 996
 997	if (!IS_ACCEPTING_CMD(ar))
 998		return;
 999
1000	mutex_lock(&ar->mutex);
1001
1002	ar->filter_state = *new_flags;
1003	/*
1004	 * We can support more by setting the sniffer bit and
1005	 * then checking the error flags, later.
1006	 */
1007
1008	if (*new_flags & FIF_ALLMULTI)
1009		multicast = ~0ULL;
1010
1011	if (multicast != ar->cur_mc_hash)
1012		WARN_ON(carl9170_update_multicast(ar, multicast));
1013
1014	if (changed_flags & FIF_OTHER_BSS) {
1015		ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
 
1016
1017		WARN_ON(carl9170_set_operating_mode(ar));
1018	}
1019
1020	if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1021		u32 rx_filter = 0;
1022
1023		if (!ar->fw.ba_filter)
1024			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1025
1026		if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1027			rx_filter |= CARL9170_RX_FILTER_BAD;
1028
1029		if (!(*new_flags & FIF_CONTROL))
1030			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1031
1032		if (!(*new_flags & FIF_PSPOLL))
1033			rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1034
1035		if (!(*new_flags & FIF_OTHER_BSS)) {
1036			rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1037			rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1038		}
1039
1040		WARN_ON(carl9170_rx_filter(ar, rx_filter));
1041	}
1042
1043	mutex_unlock(&ar->mutex);
1044}
1045
1046
1047static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1048					 struct ieee80211_vif *vif,
1049					 struct ieee80211_bss_conf *bss_conf,
1050					 u32 changed)
1051{
1052	struct ar9170 *ar = hw->priv;
1053	struct ath_common *common = &ar->common;
1054	int err = 0;
1055	struct carl9170_vif_info *vif_priv;
1056	struct ieee80211_vif *main_vif;
1057
1058	mutex_lock(&ar->mutex);
1059	vif_priv = (void *) vif->drv_priv;
1060	main_vif = carl9170_get_main_vif(ar);
1061	if (WARN_ON(!main_vif))
1062		goto out;
1063
1064	if (changed & BSS_CHANGED_BEACON_ENABLED) {
1065		struct carl9170_vif_info *iter;
1066		int i = 0;
1067
1068		vif_priv->enable_beacon = bss_conf->enable_beacon;
1069		rcu_read_lock();
1070		list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1071			if (iter->active && iter->enable_beacon)
1072				i++;
1073
1074		}
1075		rcu_read_unlock();
1076
1077		ar->beacon_enabled = i;
1078	}
1079
1080	if (changed & BSS_CHANGED_BEACON) {
1081		err = carl9170_update_beacon(ar, false);
1082		if (err)
1083			goto out;
1084	}
1085
1086	if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1087		       BSS_CHANGED_BEACON_INT)) {
1088
1089		if (main_vif != vif) {
1090			bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1091			bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1092		}
1093
1094		/*
1095		 * Therefore a hard limit for the broadcast traffic should
1096		 * prevent false alarms.
1097		 */
1098		if (vif->type != NL80211_IFTYPE_STATION &&
1099		    (bss_conf->beacon_int * bss_conf->dtim_period >=
1100		     (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1101			err = -EINVAL;
1102			goto out;
1103		}
1104
1105		err = carl9170_set_beacon_timers(ar);
1106		if (err)
1107			goto out;
1108	}
1109
1110	if (changed & BSS_CHANGED_HT) {
1111		/* TODO */
1112		err = 0;
1113		if (err)
1114			goto out;
1115	}
1116
1117	if (main_vif != vif)
1118		goto out;
1119
1120	/*
1121	 * The following settings can only be changed by the
1122	 * master interface.
1123	 */
1124
1125	if (changed & BSS_CHANGED_BSSID) {
1126		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1127		err = carl9170_set_operating_mode(ar);
1128		if (err)
1129			goto out;
1130	}
1131
1132	if (changed & BSS_CHANGED_ASSOC) {
1133		ar->common.curaid = bss_conf->aid;
1134		err = carl9170_set_beacon_timers(ar);
1135		if (err)
1136			goto out;
1137	}
1138
1139	if (changed & BSS_CHANGED_ERP_SLOT) {
1140		err = carl9170_set_slot_time(ar);
1141		if (err)
1142			goto out;
1143	}
1144
1145	if (changed & BSS_CHANGED_BASIC_RATES) {
1146		err = carl9170_set_mac_rates(ar);
1147		if (err)
1148			goto out;
1149	}
1150
1151out:
1152	WARN_ON_ONCE(err && IS_STARTED(ar));
1153	mutex_unlock(&ar->mutex);
1154}
1155
1156static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1157			       struct ieee80211_vif *vif)
1158{
1159	struct ar9170 *ar = hw->priv;
1160	struct carl9170_tsf_rsp tsf;
1161	int err;
1162
1163	mutex_lock(&ar->mutex);
1164	err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1165				0, NULL, sizeof(tsf), &tsf);
1166	mutex_unlock(&ar->mutex);
1167	if (WARN_ON(err))
1168		return 0;
1169
1170	return le64_to_cpu(tsf.tsf_64);
1171}
1172
1173static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1174			       struct ieee80211_vif *vif,
1175			       struct ieee80211_sta *sta,
1176			       struct ieee80211_key_conf *key)
1177{
1178	struct ar9170 *ar = hw->priv;
1179	int err = 0, i;
1180	u8 ktype;
1181
1182	if (ar->disable_offload || !vif)
1183		return -EOPNOTSUPP;
1184
1185	/* Fall back to software encryption whenever the driver is connected
 
 
1186	 * to more than one network.
1187	 *
1188	 * This is very unfortunate, because some machines cannot handle
1189	 * the high througput speed in 802.11n networks.
1190	 */
1191
1192	if (!is_main_vif(ar, vif)) {
1193		mutex_lock(&ar->mutex);
1194		goto err_softw;
1195	}
1196
1197	/*
1198	 * While the hardware supports *catch-all* key, for offloading
1199	 * group-key en-/de-cryption. The way of how the hardware
1200	 * decides which keyId maps to which key, remains a mystery...
1201	 */
1202	if ((vif->type != NL80211_IFTYPE_STATION &&
1203	     vif->type != NL80211_IFTYPE_ADHOC) &&
1204	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1205		return -EOPNOTSUPP;
1206
1207	switch (key->cipher) {
1208	case WLAN_CIPHER_SUITE_WEP40:
1209		ktype = AR9170_ENC_ALG_WEP64;
1210		break;
1211	case WLAN_CIPHER_SUITE_WEP104:
1212		ktype = AR9170_ENC_ALG_WEP128;
1213		break;
1214	case WLAN_CIPHER_SUITE_TKIP:
1215		ktype = AR9170_ENC_ALG_TKIP;
1216		break;
1217	case WLAN_CIPHER_SUITE_CCMP:
1218		ktype = AR9170_ENC_ALG_AESCCMP;
1219		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1220		break;
1221	default:
1222		return -EOPNOTSUPP;
1223	}
1224
1225	mutex_lock(&ar->mutex);
1226	if (cmd == SET_KEY) {
1227		if (!IS_STARTED(ar)) {
1228			err = -EOPNOTSUPP;
1229			goto out;
1230		}
1231
1232		if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1233			sta = NULL;
1234
1235			i = 64 + key->keyidx;
1236		} else {
1237			for (i = 0; i < 64; i++)
1238				if (!(ar->usedkeys & BIT(i)))
1239					break;
1240			if (i == 64)
1241				goto err_softw;
1242		}
1243
1244		key->hw_key_idx = i;
1245
1246		err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1247					  ktype, 0, key->key,
1248					  min_t(u8, 16, key->keylen));
1249		if (err)
1250			goto out;
1251
1252		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1253			err = carl9170_upload_key(ar, i, sta ? sta->addr :
1254						  NULL, ktype, 1,
1255						  key->key + 16, 16);
1256			if (err)
1257				goto out;
1258
1259			/*
1260			 * hardware is not capable generating MMIC
1261			 * of fragmented frames!
1262			 */
1263			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1264		}
1265
1266		if (i < 64)
1267			ar->usedkeys |= BIT(i);
1268
1269		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1270	} else {
1271		if (!IS_STARTED(ar)) {
1272			/* The device is gone... together with the key ;-) */
1273			err = 0;
1274			goto out;
1275		}
1276
1277		if (key->hw_key_idx < 64) {
1278			ar->usedkeys &= ~BIT(key->hw_key_idx);
1279		} else {
1280			err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1281						  AR9170_ENC_ALG_NONE, 0,
1282						  NULL, 0);
1283			if (err)
1284				goto out;
1285
1286			if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1287				err = carl9170_upload_key(ar, key->hw_key_idx,
1288							  NULL,
1289							  AR9170_ENC_ALG_NONE,
1290							  1, NULL, 0);
1291				if (err)
1292					goto out;
1293			}
1294
1295		}
1296
1297		err = carl9170_disable_key(ar, key->hw_key_idx);
1298		if (err)
1299			goto out;
1300	}
1301
1302out:
1303	mutex_unlock(&ar->mutex);
1304	return err;
1305
1306err_softw:
1307	if (!ar->rx_software_decryption) {
1308		ar->rx_software_decryption = true;
1309		carl9170_set_operating_mode(ar);
1310	}
1311	mutex_unlock(&ar->mutex);
1312	return -ENOSPC;
1313}
1314
1315static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1316			       struct ieee80211_vif *vif,
1317			       struct ieee80211_sta *sta)
1318{
1319	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1320	unsigned int i;
1321
1322	atomic_set(&sta_info->pending_frames, 0);
1323
1324	if (sta->ht_cap.ht_supported) {
1325		if (sta->ht_cap.ampdu_density > 6) {
1326			/*
1327			 * HW does support 16us AMPDU density.
1328			 * No HT-Xmit for station.
1329			 */
1330
1331			return 0;
1332		}
1333
1334		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1335			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1336
1337		sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1338		sta_info->ht_sta = true;
1339	}
1340
1341	return 0;
1342}
1343
1344static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1345				struct ieee80211_vif *vif,
1346				struct ieee80211_sta *sta)
1347{
1348	struct ar9170 *ar = hw->priv;
1349	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1350	unsigned int i;
1351	bool cleanup = false;
1352
1353	if (sta->ht_cap.ht_supported) {
1354
1355		sta_info->ht_sta = false;
1356
1357		rcu_read_lock();
1358		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1359			struct carl9170_sta_tid *tid_info;
1360
1361			tid_info = rcu_dereference(sta_info->agg[i]);
1362			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1363
1364			if (!tid_info)
1365				continue;
1366
1367			spin_lock_bh(&ar->tx_ampdu_list_lock);
1368			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1369				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1370			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1371			cleanup = true;
1372		}
1373		rcu_read_unlock();
1374
1375		if (cleanup)
1376			carl9170_ampdu_gc(ar);
1377	}
1378
1379	return 0;
1380}
1381
1382static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1383			       struct ieee80211_vif *vif, u16 queue,
1384			       const struct ieee80211_tx_queue_params *param)
1385{
1386	struct ar9170 *ar = hw->priv;
1387	int ret;
1388
1389	mutex_lock(&ar->mutex);
1390	if (queue < ar->hw->queues) {
1391		memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1392		ret = carl9170_set_qos(ar);
1393	} else {
1394		ret = -EINVAL;
1395	}
1396
1397	mutex_unlock(&ar->mutex);
1398	return ret;
1399}
1400
1401static void carl9170_ampdu_work(struct work_struct *work)
1402{
1403	struct ar9170 *ar = container_of(work, struct ar9170,
1404					 ampdu_work);
1405
1406	if (!IS_STARTED(ar))
1407		return;
1408
1409	mutex_lock(&ar->mutex);
1410	carl9170_ampdu_gc(ar);
1411	mutex_unlock(&ar->mutex);
1412}
1413
1414static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1415				    struct ieee80211_vif *vif,
1416				    struct ieee80211_ampdu_params *params)
 
 
1417{
1418	struct ieee80211_sta *sta = params->sta;
1419	enum ieee80211_ampdu_mlme_action action = params->action;
1420	u16 tid = params->tid;
1421	u16 *ssn = &params->ssn;
1422	struct ar9170 *ar = hw->priv;
1423	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1424	struct carl9170_sta_tid *tid_info;
1425
1426	if (modparam_noht)
1427		return -EOPNOTSUPP;
1428
1429	switch (action) {
1430	case IEEE80211_AMPDU_TX_START:
1431		if (!sta_info->ht_sta)
1432			return -EOPNOTSUPP;
1433
 
 
 
 
 
 
1434		tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1435				   GFP_ATOMIC);
1436		if (!tid_info)
 
1437			return -ENOMEM;
 
1438
1439		tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1440		tid_info->state = CARL9170_TID_STATE_PROGRESS;
1441		tid_info->tid = tid;
1442		tid_info->max = sta_info->ampdu_max_len;
1443		tid_info->sta = sta;
1444		tid_info->vif = vif;
1445
1446		INIT_LIST_HEAD(&tid_info->list);
1447		INIT_LIST_HEAD(&tid_info->tmp_list);
1448		skb_queue_head_init(&tid_info->queue);
1449		spin_lock_init(&tid_info->lock);
1450
1451		spin_lock_bh(&ar->tx_ampdu_list_lock);
1452		ar->tx_ampdu_list_len++;
1453		list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1454		rcu_assign_pointer(sta_info->agg[tid], tid_info);
1455		spin_unlock_bh(&ar->tx_ampdu_list_lock);
 
1456
1457		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1458		break;
1459
1460	case IEEE80211_AMPDU_TX_STOP_CONT:
1461	case IEEE80211_AMPDU_TX_STOP_FLUSH:
1462	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1463		rcu_read_lock();
1464		tid_info = rcu_dereference(sta_info->agg[tid]);
1465		if (tid_info) {
1466			spin_lock_bh(&ar->tx_ampdu_list_lock);
1467			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1468				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1469			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1470		}
1471
1472		RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1473		rcu_read_unlock();
1474
1475		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1476		ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1477		break;
1478
1479	case IEEE80211_AMPDU_TX_OPERATIONAL:
1480		rcu_read_lock();
1481		tid_info = rcu_dereference(sta_info->agg[tid]);
1482
1483		sta_info->stats[tid].clear = true;
1484		sta_info->stats[tid].req = false;
1485
1486		if (tid_info) {
1487			bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1488			tid_info->state = CARL9170_TID_STATE_IDLE;
1489		}
1490		rcu_read_unlock();
1491
1492		if (WARN_ON_ONCE(!tid_info))
1493			return -EFAULT;
1494
1495		break;
1496
1497	case IEEE80211_AMPDU_RX_START:
1498	case IEEE80211_AMPDU_RX_STOP:
1499		/* Handled by hardware */
1500		break;
1501
1502	default:
1503		return -EOPNOTSUPP;
1504	}
1505
1506	return 0;
1507}
1508
1509#ifdef CONFIG_CARL9170_WPC
1510static int carl9170_register_wps_button(struct ar9170 *ar)
1511{
1512	struct input_dev *input;
1513	int err;
1514
1515	if (!(ar->features & CARL9170_WPS_BUTTON))
1516		return 0;
1517
1518	input = input_allocate_device();
1519	if (!input)
1520		return -ENOMEM;
1521
1522	snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1523		 wiphy_name(ar->hw->wiphy));
1524
1525	snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1526		 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1527
1528	input->name = ar->wps.name;
1529	input->phys = ar->wps.phys;
1530	input->id.bustype = BUS_USB;
1531	input->dev.parent = &ar->hw->wiphy->dev;
1532
1533	input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1534
1535	err = input_register_device(input);
1536	if (err) {
1537		input_free_device(input);
1538		return err;
1539	}
1540
1541	ar->wps.pbc = input;
1542	return 0;
1543}
1544#endif /* CONFIG_CARL9170_WPC */
1545
1546#ifdef CONFIG_CARL9170_HWRNG
1547static int carl9170_rng_get(struct ar9170 *ar)
1548{
1549
1550#define RW	(CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1551#define RB	(CARL9170_MAX_CMD_PAYLOAD_LEN)
1552
1553	static const __le32 rng_load[RW] = {
1554		[0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1555
1556	u32 buf[RW];
1557
1558	unsigned int i, off = 0, transfer, count;
1559	int err;
1560
1561	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1562
1563	if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1564		return -EAGAIN;
1565
1566	count = ARRAY_SIZE(ar->rng.cache);
1567	while (count) {
1568		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1569					RB, (u8 *) rng_load,
1570					RB, (u8 *) buf);
1571		if (err)
1572			return err;
1573
1574		transfer = min_t(unsigned int, count, RW);
1575		for (i = 0; i < transfer; i++)
1576			ar->rng.cache[off + i] = buf[i];
1577
1578		off += transfer;
1579		count -= transfer;
1580	}
1581
1582	ar->rng.cache_idx = 0;
1583
1584#undef RW
1585#undef RB
1586	return 0;
1587}
1588
1589static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1590{
1591	struct ar9170 *ar = (struct ar9170 *)rng->priv;
1592	int ret = -EIO;
1593
1594	mutex_lock(&ar->mutex);
1595	if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1596		ret = carl9170_rng_get(ar);
1597		if (ret) {
1598			mutex_unlock(&ar->mutex);
1599			return ret;
1600		}
1601	}
1602
1603	*data = ar->rng.cache[ar->rng.cache_idx++];
1604	mutex_unlock(&ar->mutex);
1605
1606	return sizeof(u16);
1607}
1608
1609static void carl9170_unregister_hwrng(struct ar9170 *ar)
1610{
1611	if (ar->rng.initialized) {
1612		hwrng_unregister(&ar->rng.rng);
1613		ar->rng.initialized = false;
1614	}
1615}
1616
1617static int carl9170_register_hwrng(struct ar9170 *ar)
1618{
1619	int err;
1620
1621	snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1622		 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1623	ar->rng.rng.name = ar->rng.name;
1624	ar->rng.rng.data_read = carl9170_rng_read;
1625	ar->rng.rng.priv = (unsigned long)ar;
1626
1627	if (WARN_ON(ar->rng.initialized))
1628		return -EALREADY;
1629
1630	err = hwrng_register(&ar->rng.rng);
1631	if (err) {
1632		dev_err(&ar->udev->dev, "Failed to register the random "
1633			"number generator (%d)\n", err);
1634		return err;
1635	}
1636
1637	ar->rng.initialized = true;
1638
1639	err = carl9170_rng_get(ar);
1640	if (err) {
1641		carl9170_unregister_hwrng(ar);
1642		return err;
1643	}
1644
1645	return 0;
1646}
1647#endif /* CONFIG_CARL9170_HWRNG */
1648
1649static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1650				struct survey_info *survey)
1651{
1652	struct ar9170 *ar = hw->priv;
1653	struct ieee80211_channel *chan;
1654	struct ieee80211_supported_band *band;
1655	int err, b, i;
1656
1657	chan = ar->channel;
1658	if (!chan)
1659		return -ENODEV;
1660
1661	if (idx == chan->hw_value) {
1662		mutex_lock(&ar->mutex);
1663		err = carl9170_update_survey(ar, false, true);
1664		mutex_unlock(&ar->mutex);
1665		if (err)
1666			return err;
1667	}
1668
1669	for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1670		band = ar->hw->wiphy->bands[b];
1671
1672		if (!band)
1673			continue;
1674
1675		for (i = 0; i < band->n_channels; i++) {
1676			if (band->channels[i].hw_value == idx) {
1677				chan = &band->channels[i];
1678				goto found;
1679			}
1680		}
1681	}
1682	return -ENOENT;
1683
1684found:
1685	memcpy(survey, &ar->survey[idx], sizeof(*survey));
1686
1687	survey->channel = chan;
1688	survey->filled = SURVEY_INFO_NOISE_DBM;
1689
1690	if (ar->channel == chan)
1691		survey->filled |= SURVEY_INFO_IN_USE;
1692
1693	if (ar->fw.hw_counters) {
1694		survey->filled |= SURVEY_INFO_TIME |
1695				  SURVEY_INFO_TIME_BUSY |
1696				  SURVEY_INFO_TIME_TX;
1697	}
1698
1699	return 0;
1700}
1701
1702static void carl9170_op_flush(struct ieee80211_hw *hw,
1703			      struct ieee80211_vif *vif,
1704			      u32 queues, bool drop)
1705{
1706	struct ar9170 *ar = hw->priv;
1707	unsigned int vid;
1708
1709	mutex_lock(&ar->mutex);
1710	for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1711		carl9170_flush_cab(ar, vid);
1712
1713	carl9170_flush(ar, drop);
1714	mutex_unlock(&ar->mutex);
1715}
1716
1717static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1718				 struct ieee80211_low_level_stats *stats)
1719{
1720	struct ar9170 *ar = hw->priv;
1721
1722	memset(stats, 0, sizeof(*stats));
1723	stats->dot11ACKFailureCount = ar->tx_ack_failures;
1724	stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1725	return 0;
1726}
1727
1728static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1729				   struct ieee80211_vif *vif,
1730				   enum sta_notify_cmd cmd,
1731				   struct ieee80211_sta *sta)
1732{
1733	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1734
1735	switch (cmd) {
1736	case STA_NOTIFY_SLEEP:
1737		sta_info->sleeping = true;
1738		if (atomic_read(&sta_info->pending_frames))
1739			ieee80211_sta_block_awake(hw, sta, true);
1740		break;
1741
1742	case STA_NOTIFY_AWAKE:
1743		sta_info->sleeping = false;
1744		break;
1745	}
1746}
1747
1748static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1749{
1750	struct ar9170 *ar = hw->priv;
1751
1752	return !!atomic_read(&ar->tx_total_queued);
1753}
1754
1755static const struct ieee80211_ops carl9170_ops = {
1756	.start			= carl9170_op_start,
1757	.stop			= carl9170_op_stop,
1758	.tx			= carl9170_op_tx,
1759	.flush			= carl9170_op_flush,
1760	.add_interface		= carl9170_op_add_interface,
1761	.remove_interface	= carl9170_op_remove_interface,
1762	.config			= carl9170_op_config,
1763	.prepare_multicast	= carl9170_op_prepare_multicast,
1764	.configure_filter	= carl9170_op_configure_filter,
1765	.conf_tx		= carl9170_op_conf_tx,
1766	.bss_info_changed	= carl9170_op_bss_info_changed,
1767	.get_tsf		= carl9170_op_get_tsf,
1768	.set_key		= carl9170_op_set_key,
1769	.sta_add		= carl9170_op_sta_add,
1770	.sta_remove		= carl9170_op_sta_remove,
1771	.sta_notify		= carl9170_op_sta_notify,
1772	.get_survey		= carl9170_op_get_survey,
1773	.get_stats		= carl9170_op_get_stats,
1774	.ampdu_action		= carl9170_op_ampdu_action,
1775	.tx_frames_pending	= carl9170_tx_frames_pending,
1776};
1777
1778void *carl9170_alloc(size_t priv_size)
1779{
1780	struct ieee80211_hw *hw;
1781	struct ar9170 *ar;
1782	struct sk_buff *skb;
1783	int i;
1784
1785	/*
1786	 * this buffer is used for rx stream reconstruction.
1787	 * Under heavy load this device (or the transport layer?)
1788	 * tends to split the streams into separate rx descriptors.
1789	 */
1790
1791	skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1792	if (!skb)
1793		goto err_nomem;
1794
1795	hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1796	if (!hw)
1797		goto err_nomem;
1798
1799	ar = hw->priv;
1800	ar->hw = hw;
1801	ar->rx_failover = skb;
1802
1803	memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1804	ar->rx_has_plcp = false;
1805
1806	/*
1807	 * Here's a hidden pitfall!
1808	 *
1809	 * All 4 AC queues work perfectly well under _legacy_ operation.
1810	 * However as soon as aggregation is enabled, the traffic flow
1811	 * gets very bumpy. Therefore we have to _switch_ to a
1812	 * software AC with a single HW queue.
1813	 */
1814	hw->queues = __AR9170_NUM_TXQ;
1815
1816	mutex_init(&ar->mutex);
1817	spin_lock_init(&ar->beacon_lock);
1818	spin_lock_init(&ar->cmd_lock);
1819	spin_lock_init(&ar->tx_stats_lock);
1820	spin_lock_init(&ar->tx_ampdu_list_lock);
1821	spin_lock_init(&ar->mem_lock);
1822	spin_lock_init(&ar->state_lock);
1823	atomic_set(&ar->pending_restarts, 0);
1824	ar->vifs = 0;
1825	for (i = 0; i < ar->hw->queues; i++) {
1826		skb_queue_head_init(&ar->tx_status[i]);
1827		skb_queue_head_init(&ar->tx_pending[i]);
1828
1829		INIT_LIST_HEAD(&ar->bar_list[i]);
1830		spin_lock_init(&ar->bar_list_lock[i]);
1831	}
1832	INIT_WORK(&ar->ps_work, carl9170_ps_work);
1833	INIT_WORK(&ar->ping_work, carl9170_ping_work);
1834	INIT_WORK(&ar->restart_work, carl9170_restart_work);
1835	INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1836	INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1837	INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1838	INIT_LIST_HEAD(&ar->tx_ampdu_list);
1839	rcu_assign_pointer(ar->tx_ampdu_iter,
1840			   (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1841
1842	bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1843	INIT_LIST_HEAD(&ar->vif_list);
1844	init_completion(&ar->tx_flush);
1845
1846	/* firmware decides which modes we support */
1847	hw->wiphy->interface_modes = 0;
1848
1849	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
1850	ieee80211_hw_set(hw, MFP_CAPABLE);
1851	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
1852	ieee80211_hw_set(hw, SUPPORTS_PS);
1853	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
1854	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
1855	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
1856	ieee80211_hw_set(hw, SIGNAL_DBM);
1857	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
1858
1859	if (!modparam_noht) {
1860		/*
1861		 * see the comment above, why we allow the user
1862		 * to disable HT by a module parameter.
1863		 */
1864		ieee80211_hw_set(hw, AMPDU_AGGREGATION);
1865	}
1866
1867	hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1868	hw->sta_data_size = sizeof(struct carl9170_sta_info);
1869	hw->vif_data_size = sizeof(struct carl9170_vif_info);
1870
1871	hw->max_rates = CARL9170_TX_MAX_RATES;
1872	hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1873
1874	for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1875		ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1876
 
1877	return ar;
1878
1879err_nomem:
1880	kfree_skb(skb);
1881	return ERR_PTR(-ENOMEM);
1882}
1883
1884static int carl9170_read_eeprom(struct ar9170 *ar)
1885{
1886#define RW	8	/* number of words to read at once */
1887#define RB	(sizeof(u32) * RW)
1888	u8 *eeprom = (void *)&ar->eeprom;
1889	__le32 offsets[RW];
1890	int i, j, err;
1891
1892	BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1893
1894	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1895#ifndef __CHECKER__
1896	/* don't want to handle trailing remains */
1897	BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1898#endif
1899
1900	for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1901		for (j = 0; j < RW; j++)
1902			offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1903						 RB * i + 4 * j);
1904
1905		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1906					RB, (u8 *) &offsets,
1907					RB, eeprom + RB * i);
1908		if (err)
1909			return err;
1910	}
1911
1912#undef RW
1913#undef RB
1914	return 0;
1915}
1916
1917static int carl9170_parse_eeprom(struct ar9170 *ar)
1918{
1919	struct ath_regulatory *regulatory = &ar->common.regulatory;
1920	unsigned int rx_streams, tx_streams, tx_params = 0;
1921	int bands = 0;
1922	int chans = 0;
1923
1924	if (ar->eeprom.length == cpu_to_le16(0xffff))
1925		return -ENODATA;
1926
1927	rx_streams = hweight8(ar->eeprom.rx_mask);
1928	tx_streams = hweight8(ar->eeprom.tx_mask);
1929
1930	if (rx_streams != tx_streams) {
1931		tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1932
1933		WARN_ON(!(tx_streams >= 1 && tx_streams <=
1934			IEEE80211_HT_MCS_TX_MAX_STREAMS));
1935
1936		tx_params = (tx_streams - 1) <<
1937			    IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1938
1939		carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1940		carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1941	}
1942
1943	if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1944		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1945			&carl9170_band_2GHz;
1946		chans += carl9170_band_2GHz.n_channels;
1947		bands++;
1948	}
1949	if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1950		ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1951			&carl9170_band_5GHz;
1952		chans += carl9170_band_5GHz.n_channels;
1953		bands++;
1954	}
1955
1956	if (!bands)
1957		return -EINVAL;
1958
1959	ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1960	if (!ar->survey)
1961		return -ENOMEM;
1962	ar->num_channels = chans;
 
 
 
 
1963
1964	regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
 
1965
1966	/* second part of wiphy init */
1967	SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1968
1969	return 0;
1970}
1971
1972static void carl9170_reg_notifier(struct wiphy *wiphy,
1973				  struct regulatory_request *request)
1974{
1975	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1976	struct ar9170 *ar = hw->priv;
1977
1978	ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1979}
1980
1981int carl9170_register(struct ar9170 *ar)
1982{
1983	struct ath_regulatory *regulatory = &ar->common.regulatory;
1984	int err = 0, i;
1985
1986	if (WARN_ON(ar->mem_bitmap))
1987		return -EINVAL;
1988
1989	ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1990				 sizeof(unsigned long), GFP_KERNEL);
1991
1992	if (!ar->mem_bitmap)
1993		return -ENOMEM;
1994
1995	/* try to read EEPROM, init MAC addr */
1996	err = carl9170_read_eeprom(ar);
1997	if (err)
1998		return err;
1999
 
 
 
 
2000	err = carl9170_parse_eeprom(ar);
2001	if (err)
2002		return err;
2003
2004	err = ath_regd_init(regulatory, ar->hw->wiphy,
2005			    carl9170_reg_notifier);
2006	if (err)
2007		return err;
2008
2009	if (modparam_noht) {
2010		carl9170_band_2GHz.ht_cap.ht_supported = false;
2011		carl9170_band_5GHz.ht_cap.ht_supported = false;
2012	}
2013
2014	for (i = 0; i < ar->fw.vif_num; i++) {
2015		ar->vif_priv[i].id = i;
2016		ar->vif_priv[i].vif = NULL;
2017	}
2018
2019	err = ieee80211_register_hw(ar->hw);
2020	if (err)
2021		return err;
2022
2023	/* mac80211 interface is now registered */
2024	ar->registered = true;
2025
2026	if (!ath_is_world_regd(regulatory))
2027		regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2028
2029#ifdef CONFIG_CARL9170_DEBUGFS
2030	carl9170_debugfs_register(ar);
2031#endif /* CONFIG_CARL9170_DEBUGFS */
2032
2033	err = carl9170_led_init(ar);
2034	if (err)
2035		goto err_unreg;
2036
2037#ifdef CONFIG_CARL9170_LEDS
2038	err = carl9170_led_register(ar);
2039	if (err)
2040		goto err_unreg;
2041#endif /* CONFIG_CARL9170_LEDS */
2042
2043#ifdef CONFIG_CARL9170_WPC
2044	err = carl9170_register_wps_button(ar);
2045	if (err)
2046		goto err_unreg;
2047#endif /* CONFIG_CARL9170_WPC */
2048
2049#ifdef CONFIG_CARL9170_HWRNG
2050	err = carl9170_register_hwrng(ar);
2051	if (err)
2052		goto err_unreg;
2053#endif /* CONFIG_CARL9170_HWRNG */
2054
2055	dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2056		 wiphy_name(ar->hw->wiphy));
2057
2058	return 0;
2059
2060err_unreg:
2061	carl9170_unregister(ar);
2062	return err;
2063}
2064
2065void carl9170_unregister(struct ar9170 *ar)
2066{
2067	if (!ar->registered)
2068		return;
2069
2070	ar->registered = false;
2071
2072#ifdef CONFIG_CARL9170_LEDS
2073	carl9170_led_unregister(ar);
2074#endif /* CONFIG_CARL9170_LEDS */
2075
2076#ifdef CONFIG_CARL9170_DEBUGFS
2077	carl9170_debugfs_unregister(ar);
2078#endif /* CONFIG_CARL9170_DEBUGFS */
2079
2080#ifdef CONFIG_CARL9170_WPC
2081	if (ar->wps.pbc) {
2082		input_unregister_device(ar->wps.pbc);
2083		ar->wps.pbc = NULL;
2084	}
2085#endif /* CONFIG_CARL9170_WPC */
2086
2087#ifdef CONFIG_CARL9170_HWRNG
2088	carl9170_unregister_hwrng(ar);
2089#endif /* CONFIG_CARL9170_HWRNG */
2090
2091	carl9170_cancel_worker(ar);
2092	cancel_work_sync(&ar->restart_work);
2093
2094	ieee80211_unregister_hw(ar->hw);
2095}
2096
2097void carl9170_free(struct ar9170 *ar)
2098{
2099	WARN_ON(ar->registered);
2100	WARN_ON(IS_INITIALIZED(ar));
2101
2102	kfree_skb(ar->rx_failover);
2103	ar->rx_failover = NULL;
2104
2105	kfree(ar->mem_bitmap);
2106	ar->mem_bitmap = NULL;
2107
2108	kfree(ar->survey);
2109	ar->survey = NULL;
2110
2111	mutex_destroy(&ar->mutex);
2112
2113	ieee80211_free_hw(ar->hw);
2114}