Loading...
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/dma-mapping.h>
18#include <linux/slab.h>
19#include <linux/ath9k_platform.h>
20
21#include "ath9k.h"
22
23static char *dev_info = "ath9k";
24
25MODULE_AUTHOR("Atheros Communications");
26MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
27MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
28MODULE_LICENSE("Dual BSD/GPL");
29
30static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
31module_param_named(debug, ath9k_debug, uint, 0);
32MODULE_PARM_DESC(debug, "Debugging mask");
33
34int ath9k_modparam_nohwcrypt;
35module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
36MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
37
38int led_blink;
39module_param_named(blink, led_blink, int, 0444);
40MODULE_PARM_DESC(blink, "Enable LED blink on activity");
41
42static int ath9k_btcoex_enable;
43module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
44MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
45
46bool is_ath9k_unloaded;
47/* We use the hw_value as an index into our private channel structure */
48
49#define CHAN2G(_freq, _idx) { \
50 .band = IEEE80211_BAND_2GHZ, \
51 .center_freq = (_freq), \
52 .hw_value = (_idx), \
53 .max_power = 20, \
54}
55
56#define CHAN5G(_freq, _idx) { \
57 .band = IEEE80211_BAND_5GHZ, \
58 .center_freq = (_freq), \
59 .hw_value = (_idx), \
60 .max_power = 20, \
61}
62
63/* Some 2 GHz radios are actually tunable on 2312-2732
64 * on 5 MHz steps, we support the channels which we know
65 * we have calibration data for all cards though to make
66 * this static */
67static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
68 CHAN2G(2412, 0), /* Channel 1 */
69 CHAN2G(2417, 1), /* Channel 2 */
70 CHAN2G(2422, 2), /* Channel 3 */
71 CHAN2G(2427, 3), /* Channel 4 */
72 CHAN2G(2432, 4), /* Channel 5 */
73 CHAN2G(2437, 5), /* Channel 6 */
74 CHAN2G(2442, 6), /* Channel 7 */
75 CHAN2G(2447, 7), /* Channel 8 */
76 CHAN2G(2452, 8), /* Channel 9 */
77 CHAN2G(2457, 9), /* Channel 10 */
78 CHAN2G(2462, 10), /* Channel 11 */
79 CHAN2G(2467, 11), /* Channel 12 */
80 CHAN2G(2472, 12), /* Channel 13 */
81 CHAN2G(2484, 13), /* Channel 14 */
82};
83
84/* Some 5 GHz radios are actually tunable on XXXX-YYYY
85 * on 5 MHz steps, we support the channels which we know
86 * we have calibration data for all cards though to make
87 * this static */
88static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
89 /* _We_ call this UNII 1 */
90 CHAN5G(5180, 14), /* Channel 36 */
91 CHAN5G(5200, 15), /* Channel 40 */
92 CHAN5G(5220, 16), /* Channel 44 */
93 CHAN5G(5240, 17), /* Channel 48 */
94 /* _We_ call this UNII 2 */
95 CHAN5G(5260, 18), /* Channel 52 */
96 CHAN5G(5280, 19), /* Channel 56 */
97 CHAN5G(5300, 20), /* Channel 60 */
98 CHAN5G(5320, 21), /* Channel 64 */
99 /* _We_ call this "Middle band" */
100 CHAN5G(5500, 22), /* Channel 100 */
101 CHAN5G(5520, 23), /* Channel 104 */
102 CHAN5G(5540, 24), /* Channel 108 */
103 CHAN5G(5560, 25), /* Channel 112 */
104 CHAN5G(5580, 26), /* Channel 116 */
105 CHAN5G(5600, 27), /* Channel 120 */
106 CHAN5G(5620, 28), /* Channel 124 */
107 CHAN5G(5640, 29), /* Channel 128 */
108 CHAN5G(5660, 30), /* Channel 132 */
109 CHAN5G(5680, 31), /* Channel 136 */
110 CHAN5G(5700, 32), /* Channel 140 */
111 /* _We_ call this UNII 3 */
112 CHAN5G(5745, 33), /* Channel 149 */
113 CHAN5G(5765, 34), /* Channel 153 */
114 CHAN5G(5785, 35), /* Channel 157 */
115 CHAN5G(5805, 36), /* Channel 161 */
116 CHAN5G(5825, 37), /* Channel 165 */
117};
118
119/* Atheros hardware rate code addition for short premble */
120#define SHPCHECK(__hw_rate, __flags) \
121 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
122
123#define RATE(_bitrate, _hw_rate, _flags) { \
124 .bitrate = (_bitrate), \
125 .flags = (_flags), \
126 .hw_value = (_hw_rate), \
127 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
128}
129
130static struct ieee80211_rate ath9k_legacy_rates[] = {
131 RATE(10, 0x1b, 0),
132 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
133 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
134 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
135 RATE(60, 0x0b, 0),
136 RATE(90, 0x0f, 0),
137 RATE(120, 0x0a, 0),
138 RATE(180, 0x0e, 0),
139 RATE(240, 0x09, 0),
140 RATE(360, 0x0d, 0),
141 RATE(480, 0x08, 0),
142 RATE(540, 0x0c, 0),
143};
144
145#ifdef CONFIG_MAC80211_LEDS
146static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
147 { .throughput = 0 * 1024, .blink_time = 334 },
148 { .throughput = 1 * 1024, .blink_time = 260 },
149 { .throughput = 5 * 1024, .blink_time = 220 },
150 { .throughput = 10 * 1024, .blink_time = 190 },
151 { .throughput = 20 * 1024, .blink_time = 170 },
152 { .throughput = 50 * 1024, .blink_time = 150 },
153 { .throughput = 70 * 1024, .blink_time = 130 },
154 { .throughput = 100 * 1024, .blink_time = 110 },
155 { .throughput = 200 * 1024, .blink_time = 80 },
156 { .throughput = 300 * 1024, .blink_time = 50 },
157};
158#endif
159
160static void ath9k_deinit_softc(struct ath_softc *sc);
161
162/*
163 * Read and write, they both share the same lock. We do this to serialize
164 * reads and writes on Atheros 802.11n PCI devices only. This is required
165 * as the FIFO on these devices can only accept sanely 2 requests.
166 */
167
168static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
169{
170 struct ath_hw *ah = (struct ath_hw *) hw_priv;
171 struct ath_common *common = ath9k_hw_common(ah);
172 struct ath_softc *sc = (struct ath_softc *) common->priv;
173
174 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
175 unsigned long flags;
176 spin_lock_irqsave(&sc->sc_serial_rw, flags);
177 iowrite32(val, sc->mem + reg_offset);
178 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
179 } else
180 iowrite32(val, sc->mem + reg_offset);
181}
182
183static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
184{
185 struct ath_hw *ah = (struct ath_hw *) hw_priv;
186 struct ath_common *common = ath9k_hw_common(ah);
187 struct ath_softc *sc = (struct ath_softc *) common->priv;
188 u32 val;
189
190 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
191 unsigned long flags;
192 spin_lock_irqsave(&sc->sc_serial_rw, flags);
193 val = ioread32(sc->mem + reg_offset);
194 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
195 } else
196 val = ioread32(sc->mem + reg_offset);
197 return val;
198}
199
200static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
201 u32 set, u32 clr)
202{
203 u32 val;
204
205 val = ioread32(sc->mem + reg_offset);
206 val &= ~clr;
207 val |= set;
208 iowrite32(val, sc->mem + reg_offset);
209
210 return val;
211}
212
213static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
214{
215 struct ath_hw *ah = (struct ath_hw *) hw_priv;
216 struct ath_common *common = ath9k_hw_common(ah);
217 struct ath_softc *sc = (struct ath_softc *) common->priv;
218 unsigned long uninitialized_var(flags);
219 u32 val;
220
221 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
222 spin_lock_irqsave(&sc->sc_serial_rw, flags);
223 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
224 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
225 } else
226 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
227
228 return val;
229}
230
231/**************************/
232/* Initialization */
233/**************************/
234
235static void setup_ht_cap(struct ath_softc *sc,
236 struct ieee80211_sta_ht_cap *ht_info)
237{
238 struct ath_hw *ah = sc->sc_ah;
239 struct ath_common *common = ath9k_hw_common(ah);
240 u8 tx_streams, rx_streams;
241 int i, max_streams;
242
243 ht_info->ht_supported = true;
244 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
245 IEEE80211_HT_CAP_SM_PS |
246 IEEE80211_HT_CAP_SGI_40 |
247 IEEE80211_HT_CAP_DSSSCCK40;
248
249 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
250 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
251
252 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
253 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
254
255 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
256 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
257
258 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
259 max_streams = 1;
260 else if (AR_SREV_9300_20_OR_LATER(ah))
261 max_streams = 3;
262 else
263 max_streams = 2;
264
265 if (AR_SREV_9280_20_OR_LATER(ah)) {
266 if (max_streams >= 2)
267 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
268 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
269 }
270
271 /* set up supported mcs set */
272 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
273 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
274 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
275
276 ath_dbg(common, ATH_DBG_CONFIG,
277 "TX streams %d, RX streams: %d\n",
278 tx_streams, rx_streams);
279
280 if (tx_streams != rx_streams) {
281 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
282 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
283 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
284 }
285
286 for (i = 0; i < rx_streams; i++)
287 ht_info->mcs.rx_mask[i] = 0xff;
288
289 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
290}
291
292static int ath9k_reg_notifier(struct wiphy *wiphy,
293 struct regulatory_request *request)
294{
295 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
296 struct ath_softc *sc = hw->priv;
297 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
298
299 return ath_reg_notifier_apply(wiphy, request, reg);
300}
301
302/*
303 * This function will allocate both the DMA descriptor structure, and the
304 * buffers it contains. These are used to contain the descriptors used
305 * by the system.
306*/
307int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
308 struct list_head *head, const char *name,
309 int nbuf, int ndesc, bool is_tx)
310{
311 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
312 u8 *ds;
313 struct ath_buf *bf;
314 int i, bsize, error, desc_len;
315
316 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
317 name, nbuf, ndesc);
318
319 INIT_LIST_HEAD(head);
320
321 if (is_tx)
322 desc_len = sc->sc_ah->caps.tx_desc_len;
323 else
324 desc_len = sizeof(struct ath_desc);
325
326 /* ath_desc must be a multiple of DWORDs */
327 if ((desc_len % 4) != 0) {
328 ath_err(common, "ath_desc not DWORD aligned\n");
329 BUG_ON((desc_len % 4) != 0);
330 error = -ENOMEM;
331 goto fail;
332 }
333
334 dd->dd_desc_len = desc_len * nbuf * ndesc;
335
336 /*
337 * Need additional DMA memory because we can't use
338 * descriptors that cross the 4K page boundary. Assume
339 * one skipped descriptor per 4K page.
340 */
341 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
342 u32 ndesc_skipped =
343 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
344 u32 dma_len;
345
346 while (ndesc_skipped) {
347 dma_len = ndesc_skipped * desc_len;
348 dd->dd_desc_len += dma_len;
349
350 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
351 }
352 }
353
354 /* allocate descriptors */
355 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
356 &dd->dd_desc_paddr, GFP_KERNEL);
357 if (dd->dd_desc == NULL) {
358 error = -ENOMEM;
359 goto fail;
360 }
361 ds = (u8 *) dd->dd_desc;
362 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
363 name, ds, (u32) dd->dd_desc_len,
364 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
365
366 /* allocate buffers */
367 bsize = sizeof(struct ath_buf) * nbuf;
368 bf = kzalloc(bsize, GFP_KERNEL);
369 if (bf == NULL) {
370 error = -ENOMEM;
371 goto fail2;
372 }
373 dd->dd_bufptr = bf;
374
375 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
376 bf->bf_desc = ds;
377 bf->bf_daddr = DS2PHYS(dd, ds);
378
379 if (!(sc->sc_ah->caps.hw_caps &
380 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
381 /*
382 * Skip descriptor addresses which can cause 4KB
383 * boundary crossing (addr + length) with a 32 dword
384 * descriptor fetch.
385 */
386 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
387 BUG_ON((caddr_t) bf->bf_desc >=
388 ((caddr_t) dd->dd_desc +
389 dd->dd_desc_len));
390
391 ds += (desc_len * ndesc);
392 bf->bf_desc = ds;
393 bf->bf_daddr = DS2PHYS(dd, ds);
394 }
395 }
396 list_add_tail(&bf->list, head);
397 }
398 return 0;
399fail2:
400 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
401 dd->dd_desc_paddr);
402fail:
403 memset(dd, 0, sizeof(*dd));
404 return error;
405}
406
407void ath9k_init_crypto(struct ath_softc *sc)
408{
409 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
410 int i = 0;
411
412 /* Get the hardware key cache size. */
413 common->keymax = AR_KEYTABLE_SIZE;
414
415 /*
416 * Reset the key cache since some parts do not
417 * reset the contents on initial power up.
418 */
419 for (i = 0; i < common->keymax; i++)
420 ath_hw_keyreset(common, (u16) i);
421
422 /*
423 * Check whether the separate key cache entries
424 * are required to handle both tx+rx MIC keys.
425 * With split mic keys the number of stations is limited
426 * to 27 otherwise 59.
427 */
428 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
429 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
430}
431
432static int ath9k_init_btcoex(struct ath_softc *sc)
433{
434 struct ath_txq *txq;
435 int r;
436
437 switch (sc->sc_ah->btcoex_hw.scheme) {
438 case ATH_BTCOEX_CFG_NONE:
439 break;
440 case ATH_BTCOEX_CFG_2WIRE:
441 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
442 break;
443 case ATH_BTCOEX_CFG_3WIRE:
444 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
445 r = ath_init_btcoex_timer(sc);
446 if (r)
447 return -1;
448 txq = sc->tx.txq_map[WME_AC_BE];
449 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
450 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
451 break;
452 default:
453 WARN_ON(1);
454 break;
455 }
456
457 return 0;
458}
459
460static int ath9k_init_queues(struct ath_softc *sc)
461{
462 int i = 0;
463
464 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
465 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
466
467 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
468 ath_cabq_update(sc);
469
470 for (i = 0; i < WME_NUM_AC; i++) {
471 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
472 sc->tx.txq_map[i]->mac80211_qnum = i;
473 }
474 return 0;
475}
476
477static int ath9k_init_channels_rates(struct ath_softc *sc)
478{
479 void *channels;
480
481 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
482 ARRAY_SIZE(ath9k_5ghz_chantable) !=
483 ATH9K_NUM_CHANNELS);
484
485 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
486 channels = kmemdup(ath9k_2ghz_chantable,
487 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
488 if (!channels)
489 return -ENOMEM;
490
491 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
492 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
493 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
494 ARRAY_SIZE(ath9k_2ghz_chantable);
495 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
496 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
497 ARRAY_SIZE(ath9k_legacy_rates);
498 }
499
500 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
501 channels = kmemdup(ath9k_5ghz_chantable,
502 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
503 if (!channels) {
504 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
505 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
506 return -ENOMEM;
507 }
508
509 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
510 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
511 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
512 ARRAY_SIZE(ath9k_5ghz_chantable);
513 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
514 ath9k_legacy_rates + 4;
515 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
516 ARRAY_SIZE(ath9k_legacy_rates) - 4;
517 }
518 return 0;
519}
520
521static void ath9k_init_misc(struct ath_softc *sc)
522{
523 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
524 int i = 0;
525 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
526
527 sc->config.txpowlimit = ATH_TXPOWER_MAX;
528
529 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
530 sc->sc_flags |= SC_OP_TXAGGR;
531 sc->sc_flags |= SC_OP_RXAGGR;
532 }
533
534 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
535 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
536
537 ath9k_hw_set_diversity(sc->sc_ah, true);
538 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
539
540 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
541
542 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
543
544 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
545 sc->beacon.bslot[i] = NULL;
546
547 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
548 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
549}
550
551static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
552 const struct ath_bus_ops *bus_ops)
553{
554 struct ath9k_platform_data *pdata = sc->dev->platform_data;
555 struct ath_hw *ah = NULL;
556 struct ath_common *common;
557 int ret = 0, i;
558 int csz = 0;
559
560 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
561 if (!ah)
562 return -ENOMEM;
563
564 ah->hw = sc->hw;
565 ah->hw_version.devid = devid;
566 ah->hw_version.subsysid = subsysid;
567 ah->reg_ops.read = ath9k_ioread32;
568 ah->reg_ops.write = ath9k_iowrite32;
569 ah->reg_ops.rmw = ath9k_reg_rmw;
570 sc->sc_ah = ah;
571
572 if (!pdata) {
573 ah->ah_flags |= AH_USE_EEPROM;
574 sc->sc_ah->led_pin = -1;
575 } else {
576 sc->sc_ah->gpio_mask = pdata->gpio_mask;
577 sc->sc_ah->gpio_val = pdata->gpio_val;
578 sc->sc_ah->led_pin = pdata->led_pin;
579 ah->is_clk_25mhz = pdata->is_clk_25mhz;
580 ah->get_mac_revision = pdata->get_mac_revision;
581 ah->external_reset = pdata->external_reset;
582 }
583
584 common = ath9k_hw_common(ah);
585 common->ops = &ah->reg_ops;
586 common->bus_ops = bus_ops;
587 common->ah = ah;
588 common->hw = sc->hw;
589 common->priv = sc;
590 common->debug_mask = ath9k_debug;
591 common->btcoex_enabled = ath9k_btcoex_enable == 1;
592 common->disable_ani = false;
593 spin_lock_init(&common->cc_lock);
594
595 spin_lock_init(&sc->sc_serial_rw);
596 spin_lock_init(&sc->sc_pm_lock);
597 mutex_init(&sc->mutex);
598#ifdef CONFIG_ATH9K_DEBUGFS
599 spin_lock_init(&sc->nodes_lock);
600 INIT_LIST_HEAD(&sc->nodes);
601#endif
602 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
603 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
604 (unsigned long)sc);
605
606 /*
607 * Cache line size is used to size and align various
608 * structures used to communicate with the hardware.
609 */
610 ath_read_cachesize(common, &csz);
611 common->cachelsz = csz << 2; /* convert to bytes */
612
613 /* Initializes the hardware for all supported chipsets */
614 ret = ath9k_hw_init(ah);
615 if (ret)
616 goto err_hw;
617
618 if (pdata && pdata->macaddr)
619 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
620
621 ret = ath9k_init_queues(sc);
622 if (ret)
623 goto err_queues;
624
625 ret = ath9k_init_btcoex(sc);
626 if (ret)
627 goto err_btcoex;
628
629 ret = ath9k_init_channels_rates(sc);
630 if (ret)
631 goto err_btcoex;
632
633 ath9k_init_crypto(sc);
634 ath9k_init_misc(sc);
635
636 return 0;
637
638err_btcoex:
639 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
640 if (ATH_TXQ_SETUP(sc, i))
641 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
642err_queues:
643 ath9k_hw_deinit(ah);
644err_hw:
645
646 kfree(ah);
647 sc->sc_ah = NULL;
648
649 return ret;
650}
651
652static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
653{
654 struct ieee80211_supported_band *sband;
655 struct ieee80211_channel *chan;
656 struct ath_hw *ah = sc->sc_ah;
657 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
658 int i;
659
660 sband = &sc->sbands[band];
661 for (i = 0; i < sband->n_channels; i++) {
662 chan = &sband->channels[i];
663 ah->curchan = &ah->channels[chan->hw_value];
664 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
665 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
666 chan->max_power = reg->max_power_level / 2;
667 }
668}
669
670static void ath9k_init_txpower_limits(struct ath_softc *sc)
671{
672 struct ath_hw *ah = sc->sc_ah;
673 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
674 struct ath9k_channel *curchan = ah->curchan;
675
676 ah->txchainmask = common->tx_chainmask;
677 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
678 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
679 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
680 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
681
682 ah->curchan = curchan;
683}
684
685void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
686{
687 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
688
689 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
690 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
691 IEEE80211_HW_SIGNAL_DBM |
692 IEEE80211_HW_SUPPORTS_PS |
693 IEEE80211_HW_PS_NULLFUNC_STACK |
694 IEEE80211_HW_SPECTRUM_MGMT |
695 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
696
697 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
698 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
699
700 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
701 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
702
703 hw->wiphy->interface_modes =
704 BIT(NL80211_IFTYPE_P2P_GO) |
705 BIT(NL80211_IFTYPE_P2P_CLIENT) |
706 BIT(NL80211_IFTYPE_AP) |
707 BIT(NL80211_IFTYPE_WDS) |
708 BIT(NL80211_IFTYPE_STATION) |
709 BIT(NL80211_IFTYPE_ADHOC) |
710 BIT(NL80211_IFTYPE_MESH_POINT);
711
712 if (AR_SREV_5416(sc->sc_ah))
713 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
714
715 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
716
717 hw->queues = 4;
718 hw->max_rates = 4;
719 hw->channel_change_time = 5000;
720 hw->max_listen_interval = 10;
721 hw->max_rate_tries = 10;
722 hw->sta_data_size = sizeof(struct ath_node);
723 hw->vif_data_size = sizeof(struct ath_vif);
724
725#ifdef CONFIG_ATH9K_RATE_CONTROL
726 hw->rate_control_algorithm = "ath9k_rate_control";
727#endif
728
729 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
730 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
731 &sc->sbands[IEEE80211_BAND_2GHZ];
732 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
733 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
734 &sc->sbands[IEEE80211_BAND_5GHZ];
735
736 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
737 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
738 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
739 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
740 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
741 }
742
743 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
744}
745
746int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
747 const struct ath_bus_ops *bus_ops)
748{
749 struct ieee80211_hw *hw = sc->hw;
750 struct ath_common *common;
751 struct ath_hw *ah;
752 int error = 0;
753 struct ath_regulatory *reg;
754
755 /* Bring up device */
756 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
757 if (error != 0)
758 goto error_init;
759
760 ah = sc->sc_ah;
761 common = ath9k_hw_common(ah);
762 ath9k_set_hw_capab(sc, hw);
763
764 /* Initialize regulatory */
765 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
766 ath9k_reg_notifier);
767 if (error)
768 goto error_regd;
769
770 reg = &common->regulatory;
771
772 /* Setup TX DMA */
773 error = ath_tx_init(sc, ATH_TXBUF);
774 if (error != 0)
775 goto error_tx;
776
777 /* Setup RX DMA */
778 error = ath_rx_init(sc, ATH_RXBUF);
779 if (error != 0)
780 goto error_rx;
781
782 ath9k_init_txpower_limits(sc);
783
784#ifdef CONFIG_MAC80211_LEDS
785 /* must be initialized before ieee80211_register_hw */
786 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
787 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
788 ARRAY_SIZE(ath9k_tpt_blink));
789#endif
790
791 /* Register with mac80211 */
792 error = ieee80211_register_hw(hw);
793 if (error)
794 goto error_register;
795
796 error = ath9k_init_debug(ah);
797 if (error) {
798 ath_err(common, "Unable to create debugfs files\n");
799 goto error_world;
800 }
801
802 /* Handle world regulatory */
803 if (!ath_is_world_regd(reg)) {
804 error = regulatory_hint(hw->wiphy, reg->alpha2);
805 if (error)
806 goto error_world;
807 }
808
809 INIT_WORK(&sc->hw_check_work, ath_hw_check);
810 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
811 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
812 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
813
814 ath_init_leds(sc);
815 ath_start_rfkill_poll(sc);
816
817 return 0;
818
819error_world:
820 ieee80211_unregister_hw(hw);
821error_register:
822 ath_rx_cleanup(sc);
823error_rx:
824 ath_tx_cleanup(sc);
825error_tx:
826 /* Nothing */
827error_regd:
828 ath9k_deinit_softc(sc);
829error_init:
830 return error;
831}
832
833/*****************************/
834/* De-Initialization */
835/*****************************/
836
837static void ath9k_deinit_softc(struct ath_softc *sc)
838{
839 int i = 0;
840
841 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
842 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
843
844 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
845 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
846
847 if ((sc->btcoex.no_stomp_timer) &&
848 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
849 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
850
851 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
852 if (ATH_TXQ_SETUP(sc, i))
853 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
854
855 ath9k_hw_deinit(sc->sc_ah);
856
857 kfree(sc->sc_ah);
858 sc->sc_ah = NULL;
859}
860
861void ath9k_deinit_device(struct ath_softc *sc)
862{
863 struct ieee80211_hw *hw = sc->hw;
864
865 ath9k_ps_wakeup(sc);
866
867 wiphy_rfkill_stop_polling(sc->hw->wiphy);
868 ath_deinit_leds(sc);
869
870 ath9k_ps_restore(sc);
871
872 ieee80211_unregister_hw(hw);
873 ath_rx_cleanup(sc);
874 ath_tx_cleanup(sc);
875 ath9k_deinit_softc(sc);
876}
877
878void ath_descdma_cleanup(struct ath_softc *sc,
879 struct ath_descdma *dd,
880 struct list_head *head)
881{
882 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
883 dd->dd_desc_paddr);
884
885 INIT_LIST_HEAD(head);
886 kfree(dd->dd_bufptr);
887 memset(dd, 0, sizeof(*dd));
888}
889
890/************************/
891/* Module Hooks */
892/************************/
893
894static int __init ath9k_init(void)
895{
896 int error;
897
898 /* Register rate control algorithm */
899 error = ath_rate_control_register();
900 if (error != 0) {
901 printk(KERN_ERR
902 "ath9k: Unable to register rate control "
903 "algorithm: %d\n",
904 error);
905 goto err_out;
906 }
907
908 error = ath_pci_init();
909 if (error < 0) {
910 printk(KERN_ERR
911 "ath9k: No PCI devices found, driver not installed.\n");
912 error = -ENODEV;
913 goto err_rate_unregister;
914 }
915
916 error = ath_ahb_init();
917 if (error < 0) {
918 error = -ENODEV;
919 goto err_pci_exit;
920 }
921
922 return 0;
923
924 err_pci_exit:
925 ath_pci_exit();
926
927 err_rate_unregister:
928 ath_rate_control_unregister();
929 err_out:
930 return error;
931}
932module_init(ath9k_init);
933
934static void __exit ath9k_exit(void)
935{
936 is_ath9k_unloaded = true;
937 ath_ahb_exit();
938 ath_pci_exit();
939 ath_rate_control_unregister();
940 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
941}
942module_exit(ath9k_exit);
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/dma-mapping.h>
20#include <linux/slab.h>
21#include <linux/ath9k_platform.h>
22#include <linux/module.h>
23
24#include "ath9k.h"
25
26static char *dev_info = "ath9k";
27
28MODULE_AUTHOR("Atheros Communications");
29MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
30MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
31MODULE_LICENSE("Dual BSD/GPL");
32
33static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
34module_param_named(debug, ath9k_debug, uint, 0);
35MODULE_PARM_DESC(debug, "Debugging mask");
36
37int ath9k_modparam_nohwcrypt;
38module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
39MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
40
41int led_blink;
42module_param_named(blink, led_blink, int, 0444);
43MODULE_PARM_DESC(blink, "Enable LED blink on activity");
44
45static int ath9k_btcoex_enable;
46module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
47MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
48
49bool is_ath9k_unloaded;
50/* We use the hw_value as an index into our private channel structure */
51
52#define CHAN2G(_freq, _idx) { \
53 .band = IEEE80211_BAND_2GHZ, \
54 .center_freq = (_freq), \
55 .hw_value = (_idx), \
56 .max_power = 20, \
57}
58
59#define CHAN5G(_freq, _idx) { \
60 .band = IEEE80211_BAND_5GHZ, \
61 .center_freq = (_freq), \
62 .hw_value = (_idx), \
63 .max_power = 20, \
64}
65
66/* Some 2 GHz radios are actually tunable on 2312-2732
67 * on 5 MHz steps, we support the channels which we know
68 * we have calibration data for all cards though to make
69 * this static */
70static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
71 CHAN2G(2412, 0), /* Channel 1 */
72 CHAN2G(2417, 1), /* Channel 2 */
73 CHAN2G(2422, 2), /* Channel 3 */
74 CHAN2G(2427, 3), /* Channel 4 */
75 CHAN2G(2432, 4), /* Channel 5 */
76 CHAN2G(2437, 5), /* Channel 6 */
77 CHAN2G(2442, 6), /* Channel 7 */
78 CHAN2G(2447, 7), /* Channel 8 */
79 CHAN2G(2452, 8), /* Channel 9 */
80 CHAN2G(2457, 9), /* Channel 10 */
81 CHAN2G(2462, 10), /* Channel 11 */
82 CHAN2G(2467, 11), /* Channel 12 */
83 CHAN2G(2472, 12), /* Channel 13 */
84 CHAN2G(2484, 13), /* Channel 14 */
85};
86
87/* Some 5 GHz radios are actually tunable on XXXX-YYYY
88 * on 5 MHz steps, we support the channels which we know
89 * we have calibration data for all cards though to make
90 * this static */
91static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
92 /* _We_ call this UNII 1 */
93 CHAN5G(5180, 14), /* Channel 36 */
94 CHAN5G(5200, 15), /* Channel 40 */
95 CHAN5G(5220, 16), /* Channel 44 */
96 CHAN5G(5240, 17), /* Channel 48 */
97 /* _We_ call this UNII 2 */
98 CHAN5G(5260, 18), /* Channel 52 */
99 CHAN5G(5280, 19), /* Channel 56 */
100 CHAN5G(5300, 20), /* Channel 60 */
101 CHAN5G(5320, 21), /* Channel 64 */
102 /* _We_ call this "Middle band" */
103 CHAN5G(5500, 22), /* Channel 100 */
104 CHAN5G(5520, 23), /* Channel 104 */
105 CHAN5G(5540, 24), /* Channel 108 */
106 CHAN5G(5560, 25), /* Channel 112 */
107 CHAN5G(5580, 26), /* Channel 116 */
108 CHAN5G(5600, 27), /* Channel 120 */
109 CHAN5G(5620, 28), /* Channel 124 */
110 CHAN5G(5640, 29), /* Channel 128 */
111 CHAN5G(5660, 30), /* Channel 132 */
112 CHAN5G(5680, 31), /* Channel 136 */
113 CHAN5G(5700, 32), /* Channel 140 */
114 /* _We_ call this UNII 3 */
115 CHAN5G(5745, 33), /* Channel 149 */
116 CHAN5G(5765, 34), /* Channel 153 */
117 CHAN5G(5785, 35), /* Channel 157 */
118 CHAN5G(5805, 36), /* Channel 161 */
119 CHAN5G(5825, 37), /* Channel 165 */
120};
121
122/* Atheros hardware rate code addition for short premble */
123#define SHPCHECK(__hw_rate, __flags) \
124 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
125
126#define RATE(_bitrate, _hw_rate, _flags) { \
127 .bitrate = (_bitrate), \
128 .flags = (_flags), \
129 .hw_value = (_hw_rate), \
130 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
131}
132
133static struct ieee80211_rate ath9k_legacy_rates[] = {
134 RATE(10, 0x1b, 0),
135 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
136 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
137 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
138 RATE(60, 0x0b, 0),
139 RATE(90, 0x0f, 0),
140 RATE(120, 0x0a, 0),
141 RATE(180, 0x0e, 0),
142 RATE(240, 0x09, 0),
143 RATE(360, 0x0d, 0),
144 RATE(480, 0x08, 0),
145 RATE(540, 0x0c, 0),
146};
147
148#ifdef CONFIG_MAC80211_LEDS
149static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
150 { .throughput = 0 * 1024, .blink_time = 334 },
151 { .throughput = 1 * 1024, .blink_time = 260 },
152 { .throughput = 5 * 1024, .blink_time = 220 },
153 { .throughput = 10 * 1024, .blink_time = 190 },
154 { .throughput = 20 * 1024, .blink_time = 170 },
155 { .throughput = 50 * 1024, .blink_time = 150 },
156 { .throughput = 70 * 1024, .blink_time = 130 },
157 { .throughput = 100 * 1024, .blink_time = 110 },
158 { .throughput = 200 * 1024, .blink_time = 80 },
159 { .throughput = 300 * 1024, .blink_time = 50 },
160};
161#endif
162
163static void ath9k_deinit_softc(struct ath_softc *sc);
164
165/*
166 * Read and write, they both share the same lock. We do this to serialize
167 * reads and writes on Atheros 802.11n PCI devices only. This is required
168 * as the FIFO on these devices can only accept sanely 2 requests.
169 */
170
171static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
172{
173 struct ath_hw *ah = (struct ath_hw *) hw_priv;
174 struct ath_common *common = ath9k_hw_common(ah);
175 struct ath_softc *sc = (struct ath_softc *) common->priv;
176
177 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
178 unsigned long flags;
179 spin_lock_irqsave(&sc->sc_serial_rw, flags);
180 iowrite32(val, sc->mem + reg_offset);
181 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
182 } else
183 iowrite32(val, sc->mem + reg_offset);
184}
185
186static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
187{
188 struct ath_hw *ah = (struct ath_hw *) hw_priv;
189 struct ath_common *common = ath9k_hw_common(ah);
190 struct ath_softc *sc = (struct ath_softc *) common->priv;
191 u32 val;
192
193 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
194 unsigned long flags;
195 spin_lock_irqsave(&sc->sc_serial_rw, flags);
196 val = ioread32(sc->mem + reg_offset);
197 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
198 } else
199 val = ioread32(sc->mem + reg_offset);
200 return val;
201}
202
203static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
204 u32 set, u32 clr)
205{
206 u32 val;
207
208 val = ioread32(sc->mem + reg_offset);
209 val &= ~clr;
210 val |= set;
211 iowrite32(val, sc->mem + reg_offset);
212
213 return val;
214}
215
216static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
217{
218 struct ath_hw *ah = (struct ath_hw *) hw_priv;
219 struct ath_common *common = ath9k_hw_common(ah);
220 struct ath_softc *sc = (struct ath_softc *) common->priv;
221 unsigned long uninitialized_var(flags);
222 u32 val;
223
224 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
225 spin_lock_irqsave(&sc->sc_serial_rw, flags);
226 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
227 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
228 } else
229 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
230
231 return val;
232}
233
234/**************************/
235/* Initialization */
236/**************************/
237
238static void setup_ht_cap(struct ath_softc *sc,
239 struct ieee80211_sta_ht_cap *ht_info)
240{
241 struct ath_hw *ah = sc->sc_ah;
242 struct ath_common *common = ath9k_hw_common(ah);
243 u8 tx_streams, rx_streams;
244 int i, max_streams;
245
246 ht_info->ht_supported = true;
247 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
248 IEEE80211_HT_CAP_SM_PS |
249 IEEE80211_HT_CAP_SGI_40 |
250 IEEE80211_HT_CAP_DSSSCCK40;
251
252 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
253 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
254
255 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
256 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
257
258 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
259 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
260
261 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
262 max_streams = 1;
263 else if (AR_SREV_9462(ah))
264 max_streams = 2;
265 else if (AR_SREV_9300_20_OR_LATER(ah))
266 max_streams = 3;
267 else
268 max_streams = 2;
269
270 if (AR_SREV_9280_20_OR_LATER(ah)) {
271 if (max_streams >= 2)
272 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
273 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
274 }
275
276 /* set up supported mcs set */
277 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
278 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
279 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
280
281 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
282 tx_streams, rx_streams);
283
284 if (tx_streams != rx_streams) {
285 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
286 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
287 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
288 }
289
290 for (i = 0; i < rx_streams; i++)
291 ht_info->mcs.rx_mask[i] = 0xff;
292
293 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
294}
295
296static int ath9k_reg_notifier(struct wiphy *wiphy,
297 struct regulatory_request *request)
298{
299 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
300 struct ath_softc *sc = hw->priv;
301 struct ath_hw *ah = sc->sc_ah;
302 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
303 int ret;
304
305 ret = ath_reg_notifier_apply(wiphy, request, reg);
306
307 /* Set tx power */
308 if (ah->curchan) {
309 sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
310 ath9k_ps_wakeup(sc);
311 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
312 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
313 ath9k_ps_restore(sc);
314 }
315
316 return ret;
317}
318
319/*
320 * This function will allocate both the DMA descriptor structure, and the
321 * buffers it contains. These are used to contain the descriptors used
322 * by the system.
323*/
324int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
325 struct list_head *head, const char *name,
326 int nbuf, int ndesc, bool is_tx)
327{
328 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
329 u8 *ds;
330 struct ath_buf *bf;
331 int i, bsize, error, desc_len;
332
333 ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
334 name, nbuf, ndesc);
335
336 INIT_LIST_HEAD(head);
337
338 if (is_tx)
339 desc_len = sc->sc_ah->caps.tx_desc_len;
340 else
341 desc_len = sizeof(struct ath_desc);
342
343 /* ath_desc must be a multiple of DWORDs */
344 if ((desc_len % 4) != 0) {
345 ath_err(common, "ath_desc not DWORD aligned\n");
346 BUG_ON((desc_len % 4) != 0);
347 error = -ENOMEM;
348 goto fail;
349 }
350
351 dd->dd_desc_len = desc_len * nbuf * ndesc;
352
353 /*
354 * Need additional DMA memory because we can't use
355 * descriptors that cross the 4K page boundary. Assume
356 * one skipped descriptor per 4K page.
357 */
358 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
359 u32 ndesc_skipped =
360 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
361 u32 dma_len;
362
363 while (ndesc_skipped) {
364 dma_len = ndesc_skipped * desc_len;
365 dd->dd_desc_len += dma_len;
366
367 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
368 }
369 }
370
371 /* allocate descriptors */
372 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
373 &dd->dd_desc_paddr, GFP_KERNEL);
374 if (dd->dd_desc == NULL) {
375 error = -ENOMEM;
376 goto fail;
377 }
378 ds = (u8 *) dd->dd_desc;
379 ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
380 name, ds, (u32) dd->dd_desc_len,
381 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
382
383 /* allocate buffers */
384 bsize = sizeof(struct ath_buf) * nbuf;
385 bf = kzalloc(bsize, GFP_KERNEL);
386 if (bf == NULL) {
387 error = -ENOMEM;
388 goto fail2;
389 }
390 dd->dd_bufptr = bf;
391
392 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
393 bf->bf_desc = ds;
394 bf->bf_daddr = DS2PHYS(dd, ds);
395
396 if (!(sc->sc_ah->caps.hw_caps &
397 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
398 /*
399 * Skip descriptor addresses which can cause 4KB
400 * boundary crossing (addr + length) with a 32 dword
401 * descriptor fetch.
402 */
403 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
404 BUG_ON((caddr_t) bf->bf_desc >=
405 ((caddr_t) dd->dd_desc +
406 dd->dd_desc_len));
407
408 ds += (desc_len * ndesc);
409 bf->bf_desc = ds;
410 bf->bf_daddr = DS2PHYS(dd, ds);
411 }
412 }
413 list_add_tail(&bf->list, head);
414 }
415 return 0;
416fail2:
417 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
418 dd->dd_desc_paddr);
419fail:
420 memset(dd, 0, sizeof(*dd));
421 return error;
422}
423
424static int ath9k_init_queues(struct ath_softc *sc)
425{
426 int i = 0;
427
428 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
429 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
430
431 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
432 ath_cabq_update(sc);
433
434 for (i = 0; i < WME_NUM_AC; i++) {
435 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
436 sc->tx.txq_map[i]->mac80211_qnum = i;
437 }
438 return 0;
439}
440
441static int ath9k_init_channels_rates(struct ath_softc *sc)
442{
443 void *channels;
444
445 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
446 ARRAY_SIZE(ath9k_5ghz_chantable) !=
447 ATH9K_NUM_CHANNELS);
448
449 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
450 channels = kmemdup(ath9k_2ghz_chantable,
451 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
452 if (!channels)
453 return -ENOMEM;
454
455 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
456 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
457 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
458 ARRAY_SIZE(ath9k_2ghz_chantable);
459 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
460 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
461 ARRAY_SIZE(ath9k_legacy_rates);
462 }
463
464 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
465 channels = kmemdup(ath9k_5ghz_chantable,
466 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
467 if (!channels) {
468 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
469 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
470 return -ENOMEM;
471 }
472
473 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
474 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
475 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
476 ARRAY_SIZE(ath9k_5ghz_chantable);
477 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
478 ath9k_legacy_rates + 4;
479 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
480 ARRAY_SIZE(ath9k_legacy_rates) - 4;
481 }
482 return 0;
483}
484
485static void ath9k_init_misc(struct ath_softc *sc)
486{
487 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
488 int i = 0;
489
490 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
491
492 sc->config.txpowlimit = ATH_TXPOWER_MAX;
493 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
494 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
495
496 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
497 sc->beacon.bslot[i] = NULL;
498
499 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
500 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
501}
502
503static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
504 const struct ath_bus_ops *bus_ops)
505{
506 struct ath9k_platform_data *pdata = sc->dev->platform_data;
507 struct ath_hw *ah = NULL;
508 struct ath_common *common;
509 int ret = 0, i;
510 int csz = 0;
511
512 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
513 if (!ah)
514 return -ENOMEM;
515
516 ah->hw = sc->hw;
517 ah->hw_version.devid = devid;
518 ah->reg_ops.read = ath9k_ioread32;
519 ah->reg_ops.write = ath9k_iowrite32;
520 ah->reg_ops.rmw = ath9k_reg_rmw;
521 atomic_set(&ah->intr_ref_cnt, -1);
522 sc->sc_ah = ah;
523
524 sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET);
525
526 if (!pdata) {
527 ah->ah_flags |= AH_USE_EEPROM;
528 sc->sc_ah->led_pin = -1;
529 } else {
530 sc->sc_ah->gpio_mask = pdata->gpio_mask;
531 sc->sc_ah->gpio_val = pdata->gpio_val;
532 sc->sc_ah->led_pin = pdata->led_pin;
533 ah->is_clk_25mhz = pdata->is_clk_25mhz;
534 ah->get_mac_revision = pdata->get_mac_revision;
535 ah->external_reset = pdata->external_reset;
536 }
537
538 common = ath9k_hw_common(ah);
539 common->ops = &ah->reg_ops;
540 common->bus_ops = bus_ops;
541 common->ah = ah;
542 common->hw = sc->hw;
543 common->priv = sc;
544 common->debug_mask = ath9k_debug;
545 common->btcoex_enabled = ath9k_btcoex_enable == 1;
546 common->disable_ani = false;
547 spin_lock_init(&common->cc_lock);
548
549 spin_lock_init(&sc->sc_serial_rw);
550 spin_lock_init(&sc->sc_pm_lock);
551 mutex_init(&sc->mutex);
552#ifdef CONFIG_ATH9K_DEBUGFS
553 spin_lock_init(&sc->nodes_lock);
554 INIT_LIST_HEAD(&sc->nodes);
555#endif
556#ifdef CONFIG_ATH9K_MAC_DEBUG
557 spin_lock_init(&sc->debug.samp_lock);
558#endif
559 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
560 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
561 (unsigned long)sc);
562
563 /*
564 * Cache line size is used to size and align various
565 * structures used to communicate with the hardware.
566 */
567 ath_read_cachesize(common, &csz);
568 common->cachelsz = csz << 2; /* convert to bytes */
569
570 /* Initializes the hardware for all supported chipsets */
571 ret = ath9k_hw_init(ah);
572 if (ret)
573 goto err_hw;
574
575 if (pdata && pdata->macaddr)
576 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
577
578 ret = ath9k_init_queues(sc);
579 if (ret)
580 goto err_queues;
581
582 ret = ath9k_init_btcoex(sc);
583 if (ret)
584 goto err_btcoex;
585
586 ret = ath9k_init_channels_rates(sc);
587 if (ret)
588 goto err_btcoex;
589
590 ath9k_cmn_init_crypto(sc->sc_ah);
591 ath9k_init_misc(sc);
592
593 return 0;
594
595err_btcoex:
596 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
597 if (ATH_TXQ_SETUP(sc, i))
598 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
599err_queues:
600 ath9k_hw_deinit(ah);
601err_hw:
602
603 kfree(ah);
604 sc->sc_ah = NULL;
605
606 return ret;
607}
608
609static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
610{
611 struct ieee80211_supported_band *sband;
612 struct ieee80211_channel *chan;
613 struct ath_hw *ah = sc->sc_ah;
614 int i;
615
616 sband = &sc->sbands[band];
617 for (i = 0; i < sband->n_channels; i++) {
618 chan = &sband->channels[i];
619 ah->curchan = &ah->channels[chan->hw_value];
620 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
621 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
622 }
623}
624
625static void ath9k_init_txpower_limits(struct ath_softc *sc)
626{
627 struct ath_hw *ah = sc->sc_ah;
628 struct ath9k_channel *curchan = ah->curchan;
629
630 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
631 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
632 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
633 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
634
635 ah->curchan = curchan;
636}
637
638void ath9k_reload_chainmask_settings(struct ath_softc *sc)
639{
640 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
641 return;
642
643 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
644 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
645 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
646 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
647}
648
649static const struct ieee80211_iface_limit if_limits[] = {
650 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
651 BIT(NL80211_IFTYPE_P2P_CLIENT) |
652 BIT(NL80211_IFTYPE_WDS) },
653 { .max = 8, .types =
654#ifdef CONFIG_MAC80211_MESH
655 BIT(NL80211_IFTYPE_MESH_POINT) |
656#endif
657 BIT(NL80211_IFTYPE_AP) |
658 BIT(NL80211_IFTYPE_P2P_GO) },
659};
660
661static const struct ieee80211_iface_combination if_comb = {
662 .limits = if_limits,
663 .n_limits = ARRAY_SIZE(if_limits),
664 .max_interfaces = 2048,
665 .num_different_channels = 1,
666};
667
668void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
669{
670 struct ath_hw *ah = sc->sc_ah;
671 struct ath_common *common = ath9k_hw_common(ah);
672
673 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
674 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
675 IEEE80211_HW_SIGNAL_DBM |
676 IEEE80211_HW_SUPPORTS_PS |
677 IEEE80211_HW_PS_NULLFUNC_STACK |
678 IEEE80211_HW_SPECTRUM_MGMT |
679 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
680
681 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
682 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
683
684 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
685 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
686
687 hw->wiphy->interface_modes =
688 BIT(NL80211_IFTYPE_P2P_GO) |
689 BIT(NL80211_IFTYPE_P2P_CLIENT) |
690 BIT(NL80211_IFTYPE_AP) |
691 BIT(NL80211_IFTYPE_WDS) |
692 BIT(NL80211_IFTYPE_STATION) |
693 BIT(NL80211_IFTYPE_ADHOC) |
694 BIT(NL80211_IFTYPE_MESH_POINT);
695
696 hw->wiphy->iface_combinations = &if_comb;
697 hw->wiphy->n_iface_combinations = 1;
698
699 if (AR_SREV_5416(sc->sc_ah))
700 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
701
702 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
703 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
704 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
705
706 hw->queues = 4;
707 hw->max_rates = 4;
708 hw->channel_change_time = 5000;
709 hw->max_listen_interval = 1;
710 hw->max_rate_tries = 10;
711 hw->sta_data_size = sizeof(struct ath_node);
712 hw->vif_data_size = sizeof(struct ath_vif);
713
714 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
715 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
716
717 /* single chain devices with rx diversity */
718 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
719 hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
720
721 sc->ant_rx = hw->wiphy->available_antennas_rx;
722 sc->ant_tx = hw->wiphy->available_antennas_tx;
723
724#ifdef CONFIG_ATH9K_RATE_CONTROL
725 hw->rate_control_algorithm = "ath9k_rate_control";
726#endif
727
728 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
729 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
730 &sc->sbands[IEEE80211_BAND_2GHZ];
731 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
732 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
733 &sc->sbands[IEEE80211_BAND_5GHZ];
734
735 ath9k_reload_chainmask_settings(sc);
736
737 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
738}
739
740int ath9k_init_device(u16 devid, struct ath_softc *sc,
741 const struct ath_bus_ops *bus_ops)
742{
743 struct ieee80211_hw *hw = sc->hw;
744 struct ath_common *common;
745 struct ath_hw *ah;
746 int error = 0;
747 struct ath_regulatory *reg;
748
749 /* Bring up device */
750 error = ath9k_init_softc(devid, sc, bus_ops);
751 if (error != 0)
752 goto error_init;
753
754 ah = sc->sc_ah;
755 common = ath9k_hw_common(ah);
756 ath9k_set_hw_capab(sc, hw);
757
758 /* Initialize regulatory */
759 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
760 ath9k_reg_notifier);
761 if (error)
762 goto error_regd;
763
764 reg = &common->regulatory;
765
766 /* Setup TX DMA */
767 error = ath_tx_init(sc, ATH_TXBUF);
768 if (error != 0)
769 goto error_tx;
770
771 /* Setup RX DMA */
772 error = ath_rx_init(sc, ATH_RXBUF);
773 if (error != 0)
774 goto error_rx;
775
776 ath9k_init_txpower_limits(sc);
777
778#ifdef CONFIG_MAC80211_LEDS
779 /* must be initialized before ieee80211_register_hw */
780 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
781 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
782 ARRAY_SIZE(ath9k_tpt_blink));
783#endif
784
785 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
786 INIT_WORK(&sc->hw_check_work, ath_hw_check);
787 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
788 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
789
790 /* Register with mac80211 */
791 error = ieee80211_register_hw(hw);
792 if (error)
793 goto error_register;
794
795 error = ath9k_init_debug(ah);
796 if (error) {
797 ath_err(common, "Unable to create debugfs files\n");
798 goto error_world;
799 }
800
801 /* Handle world regulatory */
802 if (!ath_is_world_regd(reg)) {
803 error = regulatory_hint(hw->wiphy, reg->alpha2);
804 if (error)
805 goto error_world;
806 }
807
808 setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
809 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
810
811 ath_init_leds(sc);
812 ath_start_rfkill_poll(sc);
813
814 return 0;
815
816error_world:
817 ieee80211_unregister_hw(hw);
818error_register:
819 ath_rx_cleanup(sc);
820error_rx:
821 ath_tx_cleanup(sc);
822error_tx:
823 /* Nothing */
824error_regd:
825 ath9k_deinit_softc(sc);
826error_init:
827 return error;
828}
829
830/*****************************/
831/* De-Initialization */
832/*****************************/
833
834static void ath9k_deinit_softc(struct ath_softc *sc)
835{
836 int i = 0;
837
838 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
839 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
840
841 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
842 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
843
844 ath9k_deinit_btcoex(sc);
845
846 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
847 if (ATH_TXQ_SETUP(sc, i))
848 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
849
850 ath9k_hw_deinit(sc->sc_ah);
851 if (sc->dfs_detector != NULL)
852 sc->dfs_detector->exit(sc->dfs_detector);
853
854 kfree(sc->sc_ah);
855 sc->sc_ah = NULL;
856}
857
858void ath9k_deinit_device(struct ath_softc *sc)
859{
860 struct ieee80211_hw *hw = sc->hw;
861
862 ath9k_ps_wakeup(sc);
863
864 wiphy_rfkill_stop_polling(sc->hw->wiphy);
865 ath_deinit_leds(sc);
866
867 ath9k_ps_restore(sc);
868
869 ieee80211_unregister_hw(hw);
870 ath_rx_cleanup(sc);
871 ath_tx_cleanup(sc);
872 ath9k_deinit_softc(sc);
873}
874
875void ath_descdma_cleanup(struct ath_softc *sc,
876 struct ath_descdma *dd,
877 struct list_head *head)
878{
879 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
880 dd->dd_desc_paddr);
881
882 INIT_LIST_HEAD(head);
883 kfree(dd->dd_bufptr);
884 memset(dd, 0, sizeof(*dd));
885}
886
887/************************/
888/* Module Hooks */
889/************************/
890
891static int __init ath9k_init(void)
892{
893 int error;
894
895 /* Register rate control algorithm */
896 error = ath_rate_control_register();
897 if (error != 0) {
898 pr_err("Unable to register rate control algorithm: %d\n",
899 error);
900 goto err_out;
901 }
902
903 error = ath_pci_init();
904 if (error < 0) {
905 pr_err("No PCI devices found, driver not installed\n");
906 error = -ENODEV;
907 goto err_rate_unregister;
908 }
909
910 error = ath_ahb_init();
911 if (error < 0) {
912 error = -ENODEV;
913 goto err_pci_exit;
914 }
915
916 return 0;
917
918 err_pci_exit:
919 ath_pci_exit();
920
921 err_rate_unregister:
922 ath_rate_control_unregister();
923 err_out:
924 return error;
925}
926module_init(ath9k_init);
927
928static void __exit ath9k_exit(void)
929{
930 is_ath9k_unloaded = true;
931 ath_ahb_exit();
932 ath_pci_exit();
933 ath_rate_control_unregister();
934 pr_info("%s: Driver unloaded\n", dev_info);
935}
936module_exit(ath9k_exit);