1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include <linux/slab.h> 19 #include <linux/ath9k_platform.h> 20 21 #include "ath9k.h" 22 23 static char *dev_info = "ath9k"; 24 25 MODULE_AUTHOR("Atheros Communications"); 26 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 27 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 28 MODULE_LICENSE("Dual BSD/GPL"); 29 30 static unsigned int ath9k_debug = ATH_DBG_DEFAULT; 31 module_param_named(debug, ath9k_debug, uint, 0); 32 MODULE_PARM_DESC(debug, "Debugging mask"); 33 34 int ath9k_modparam_nohwcrypt; 35 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444); 36 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 37 38 int led_blink; 39 module_param_named(blink, led_blink, int, 0444); 40 MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 41 42 static int ath9k_btcoex_enable; 43 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); 44 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 45 46 bool is_ath9k_unloaded; 47 /* We use the hw_value as an index into our private channel structure */ 48 49 #define CHAN2G(_freq, _idx) { \ 50 .band = IEEE80211_BAND_2GHZ, \ 51 .center_freq = (_freq), \ 52 .hw_value = (_idx), \ 53 .max_power = 20, \ 54 } 55 56 #define CHAN5G(_freq, _idx) { \ 57 .band = IEEE80211_BAND_5GHZ, \ 58 .center_freq = (_freq), \ 59 .hw_value = (_idx), \ 60 .max_power = 20, \ 61 } 62 63 /* Some 2 GHz radios are actually tunable on 2312-2732 64 * on 5 MHz steps, we support the channels which we know 65 * we have calibration data for all cards though to make 66 * this static */ 67 static const struct ieee80211_channel ath9k_2ghz_chantable[] = { 68 CHAN2G(2412, 0), /* Channel 1 */ 69 CHAN2G(2417, 1), /* Channel 2 */ 70 CHAN2G(2422, 2), /* Channel 3 */ 71 CHAN2G(2427, 3), /* Channel 4 */ 72 CHAN2G(2432, 4), /* Channel 5 */ 73 CHAN2G(2437, 5), /* Channel 6 */ 74 CHAN2G(2442, 6), /* Channel 7 */ 75 CHAN2G(2447, 7), /* Channel 8 */ 76 CHAN2G(2452, 8), /* Channel 9 */ 77 CHAN2G(2457, 9), /* Channel 10 */ 78 CHAN2G(2462, 10), /* Channel 11 */ 79 CHAN2G(2467, 11), /* Channel 12 */ 80 CHAN2G(2472, 12), /* Channel 13 */ 81 CHAN2G(2484, 13), /* Channel 14 */ 82 }; 83 84 /* Some 5 GHz radios are actually tunable on XXXX-YYYY 85 * on 5 MHz steps, we support the channels which we know 86 * we have calibration data for all cards though to make 87 * this static */ 88 static const struct ieee80211_channel ath9k_5ghz_chantable[] = { 89 /* _We_ call this UNII 1 */ 90 CHAN5G(5180, 14), /* Channel 36 */ 91 CHAN5G(5200, 15), /* Channel 40 */ 92 CHAN5G(5220, 16), /* Channel 44 */ 93 CHAN5G(5240, 17), /* Channel 48 */ 94 /* _We_ call this UNII 2 */ 95 CHAN5G(5260, 18), /* Channel 52 */ 96 CHAN5G(5280, 19), /* Channel 56 */ 97 CHAN5G(5300, 20), /* Channel 60 */ 98 CHAN5G(5320, 21), /* Channel 64 */ 99 /* _We_ call this "Middle band" */ 100 CHAN5G(5500, 22), /* Channel 100 */ 101 CHAN5G(5520, 23), /* Channel 104 */ 102 CHAN5G(5540, 24), /* Channel 108 */ 103 CHAN5G(5560, 25), /* Channel 112 */ 104 CHAN5G(5580, 26), /* Channel 116 */ 105 CHAN5G(5600, 27), /* Channel 120 */ 106 CHAN5G(5620, 28), /* Channel 124 */ 107 CHAN5G(5640, 29), /* Channel 128 */ 108 CHAN5G(5660, 30), /* Channel 132 */ 109 CHAN5G(5680, 31), /* Channel 136 */ 110 CHAN5G(5700, 32), /* Channel 140 */ 111 /* _We_ call this UNII 3 */ 112 CHAN5G(5745, 33), /* Channel 149 */ 113 CHAN5G(5765, 34), /* Channel 153 */ 114 CHAN5G(5785, 35), /* Channel 157 */ 115 CHAN5G(5805, 36), /* Channel 161 */ 116 CHAN5G(5825, 37), /* Channel 165 */ 117 }; 118 119 /* Atheros hardware rate code addition for short premble */ 120 #define SHPCHECK(__hw_rate, __flags) \ 121 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0) 122 123 #define RATE(_bitrate, _hw_rate, _flags) { \ 124 .bitrate = (_bitrate), \ 125 .flags = (_flags), \ 126 .hw_value = (_hw_rate), \ 127 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \ 128 } 129 130 static struct ieee80211_rate ath9k_legacy_rates[] = { 131 RATE(10, 0x1b, 0), 132 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), 133 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), 134 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), 135 RATE(60, 0x0b, 0), 136 RATE(90, 0x0f, 0), 137 RATE(120, 0x0a, 0), 138 RATE(180, 0x0e, 0), 139 RATE(240, 0x09, 0), 140 RATE(360, 0x0d, 0), 141 RATE(480, 0x08, 0), 142 RATE(540, 0x0c, 0), 143 }; 144 145 #ifdef CONFIG_MAC80211_LEDS 146 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = { 147 { .throughput = 0 * 1024, .blink_time = 334 }, 148 { .throughput = 1 * 1024, .blink_time = 260 }, 149 { .throughput = 5 * 1024, .blink_time = 220 }, 150 { .throughput = 10 * 1024, .blink_time = 190 }, 151 { .throughput = 20 * 1024, .blink_time = 170 }, 152 { .throughput = 50 * 1024, .blink_time = 150 }, 153 { .throughput = 70 * 1024, .blink_time = 130 }, 154 { .throughput = 100 * 1024, .blink_time = 110 }, 155 { .throughput = 200 * 1024, .blink_time = 80 }, 156 { .throughput = 300 * 1024, .blink_time = 50 }, 157 }; 158 #endif 159 160 static void ath9k_deinit_softc(struct ath_softc *sc); 161 162 /* 163 * Read and write, they both share the same lock. We do this to serialize 164 * reads and writes on Atheros 802.11n PCI devices only. This is required 165 * as the FIFO on these devices can only accept sanely 2 requests. 166 */ 167 168 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 169 { 170 struct ath_hw *ah = (struct ath_hw *) hw_priv; 171 struct ath_common *common = ath9k_hw_common(ah); 172 struct ath_softc *sc = (struct ath_softc *) common->priv; 173 174 if (ah->config.serialize_regmode == SER_REG_MODE_ON) { 175 unsigned long flags; 176 spin_lock_irqsave(&sc->sc_serial_rw, flags); 177 iowrite32(val, sc->mem + reg_offset); 178 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 179 } else 180 iowrite32(val, sc->mem + reg_offset); 181 } 182 183 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset) 184 { 185 struct ath_hw *ah = (struct ath_hw *) hw_priv; 186 struct ath_common *common = ath9k_hw_common(ah); 187 struct ath_softc *sc = (struct ath_softc *) common->priv; 188 u32 val; 189 190 if (ah->config.serialize_regmode == SER_REG_MODE_ON) { 191 unsigned long flags; 192 spin_lock_irqsave(&sc->sc_serial_rw, flags); 193 val = ioread32(sc->mem + reg_offset); 194 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 195 } else 196 val = ioread32(sc->mem + reg_offset); 197 return val; 198 } 199 200 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr) 201 { 202 struct ath_hw *ah = (struct ath_hw *) hw_priv; 203 struct ath_common *common = ath9k_hw_common(ah); 204 struct ath_softc *sc = (struct ath_softc *) common->priv; 205 unsigned long uninitialized_var(flags); 206 u32 val; 207 208 if (ah->config.serialize_regmode == SER_REG_MODE_ON) 209 spin_lock_irqsave(&sc->sc_serial_rw, flags); 210 211 val = ioread32(sc->mem + reg_offset); 212 val &= ~clr; 213 val |= set; 214 iowrite32(val, sc->mem + reg_offset); 215 216 if (ah->config.serialize_regmode == SER_REG_MODE_ON) 217 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 218 219 return val; 220 } 221 222 /**************************/ 223 /* Initialization */ 224 /**************************/ 225 226 static void setup_ht_cap(struct ath_softc *sc, 227 struct ieee80211_sta_ht_cap *ht_info) 228 { 229 struct ath_hw *ah = sc->sc_ah; 230 struct ath_common *common = ath9k_hw_common(ah); 231 u8 tx_streams, rx_streams; 232 int i, max_streams; 233 234 ht_info->ht_supported = true; 235 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 236 IEEE80211_HT_CAP_SM_PS | 237 IEEE80211_HT_CAP_SGI_40 | 238 IEEE80211_HT_CAP_DSSSCCK40; 239 240 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC) 241 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; 242 243 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) 244 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 245 246 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 247 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 248 249 if (AR_SREV_9485(ah)) 250 max_streams = 1; 251 else if (AR_SREV_9300_20_OR_LATER(ah)) 252 max_streams = 3; 253 else 254 max_streams = 2; 255 256 if (AR_SREV_9280_20_OR_LATER(ah)) { 257 if (max_streams >= 2) 258 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; 259 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 260 } 261 262 /* set up supported mcs set */ 263 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 264 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams); 265 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams); 266 267 ath_dbg(common, ATH_DBG_CONFIG, 268 "TX streams %d, RX streams: %d\n", 269 tx_streams, rx_streams); 270 271 if (tx_streams != rx_streams) { 272 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 273 ht_info->mcs.tx_params |= ((tx_streams - 1) << 274 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 275 } 276 277 for (i = 0; i < rx_streams; i++) 278 ht_info->mcs.rx_mask[i] = 0xff; 279 280 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 281 } 282 283 static int ath9k_reg_notifier(struct wiphy *wiphy, 284 struct regulatory_request *request) 285 { 286 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 287 struct ath_softc *sc = hw->priv; 288 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); 289 290 return ath_reg_notifier_apply(wiphy, request, reg); 291 } 292 293 /* 294 * This function will allocate both the DMA descriptor structure, and the 295 * buffers it contains. These are used to contain the descriptors used 296 * by the system. 297 */ 298 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 299 struct list_head *head, const char *name, 300 int nbuf, int ndesc, bool is_tx) 301 { 302 #define DS2PHYS(_dd, _ds) \ 303 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 304 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) 305 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) 306 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 307 u8 *ds; 308 struct ath_buf *bf; 309 int i, bsize, error, desc_len; 310 311 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 312 name, nbuf, ndesc); 313 314 INIT_LIST_HEAD(head); 315 316 if (is_tx) 317 desc_len = sc->sc_ah->caps.tx_desc_len; 318 else 319 desc_len = sizeof(struct ath_desc); 320 321 /* ath_desc must be a multiple of DWORDs */ 322 if ((desc_len % 4) != 0) { 323 ath_err(common, "ath_desc not DWORD aligned\n"); 324 BUG_ON((desc_len % 4) != 0); 325 error = -ENOMEM; 326 goto fail; 327 } 328 329 dd->dd_desc_len = desc_len * nbuf * ndesc; 330 331 /* 332 * Need additional DMA memory because we can't use 333 * descriptors that cross the 4K page boundary. Assume 334 * one skipped descriptor per 4K page. 335 */ 336 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { 337 u32 ndesc_skipped = 338 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); 339 u32 dma_len; 340 341 while (ndesc_skipped) { 342 dma_len = ndesc_skipped * desc_len; 343 dd->dd_desc_len += dma_len; 344 345 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); 346 } 347 } 348 349 /* allocate descriptors */ 350 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 351 &dd->dd_desc_paddr, GFP_KERNEL); 352 if (dd->dd_desc == NULL) { 353 error = -ENOMEM; 354 goto fail; 355 } 356 ds = (u8 *) dd->dd_desc; 357 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 358 name, ds, (u32) dd->dd_desc_len, 359 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 360 361 /* allocate buffers */ 362 bsize = sizeof(struct ath_buf) * nbuf; 363 bf = kzalloc(bsize, GFP_KERNEL); 364 if (bf == NULL) { 365 error = -ENOMEM; 366 goto fail2; 367 } 368 dd->dd_bufptr = bf; 369 370 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) { 371 bf->bf_desc = ds; 372 bf->bf_daddr = DS2PHYS(dd, ds); 373 374 if (!(sc->sc_ah->caps.hw_caps & 375 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 376 /* 377 * Skip descriptor addresses which can cause 4KB 378 * boundary crossing (addr + length) with a 32 dword 379 * descriptor fetch. 380 */ 381 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 382 BUG_ON((caddr_t) bf->bf_desc >= 383 ((caddr_t) dd->dd_desc + 384 dd->dd_desc_len)); 385 386 ds += (desc_len * ndesc); 387 bf->bf_desc = ds; 388 bf->bf_daddr = DS2PHYS(dd, ds); 389 } 390 } 391 list_add_tail(&bf->list, head); 392 } 393 return 0; 394 fail2: 395 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 396 dd->dd_desc_paddr); 397 fail: 398 memset(dd, 0, sizeof(*dd)); 399 return error; 400 #undef ATH_DESC_4KB_BOUND_CHECK 401 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED 402 #undef DS2PHYS 403 } 404 405 void ath9k_init_crypto(struct ath_softc *sc) 406 { 407 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 408 int i = 0; 409 410 /* Get the hardware key cache size. */ 411 common->keymax = AR_KEYTABLE_SIZE; 412 413 /* 414 * Reset the key cache since some parts do not 415 * reset the contents on initial power up. 416 */ 417 for (i = 0; i < common->keymax; i++) 418 ath_hw_keyreset(common, (u16) i); 419 420 /* 421 * Check whether the separate key cache entries 422 * are required to handle both tx+rx MIC keys. 423 * With split mic keys the number of stations is limited 424 * to 27 otherwise 59. 425 */ 426 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) 427 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED; 428 } 429 430 static int ath9k_init_btcoex(struct ath_softc *sc) 431 { 432 struct ath_txq *txq; 433 int r; 434 435 switch (sc->sc_ah->btcoex_hw.scheme) { 436 case ATH_BTCOEX_CFG_NONE: 437 break; 438 case ATH_BTCOEX_CFG_2WIRE: 439 ath9k_hw_btcoex_init_2wire(sc->sc_ah); 440 break; 441 case ATH_BTCOEX_CFG_3WIRE: 442 ath9k_hw_btcoex_init_3wire(sc->sc_ah); 443 r = ath_init_btcoex_timer(sc); 444 if (r) 445 return -1; 446 txq = sc->tx.txq_map[WME_AC_BE]; 447 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); 448 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 449 break; 450 default: 451 WARN_ON(1); 452 break; 453 } 454 455 return 0; 456 } 457 458 static int ath9k_init_queues(struct ath_softc *sc) 459 { 460 int i = 0; 461 462 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); 463 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 464 465 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 466 ath_cabq_update(sc); 467 468 for (i = 0; i < WME_NUM_AC; i++) { 469 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); 470 sc->tx.txq_map[i]->mac80211_qnum = i; 471 } 472 return 0; 473 } 474 475 static int ath9k_init_channels_rates(struct ath_softc *sc) 476 { 477 void *channels; 478 479 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) + 480 ARRAY_SIZE(ath9k_5ghz_chantable) != 481 ATH9K_NUM_CHANNELS); 482 483 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) { 484 channels = kmemdup(ath9k_2ghz_chantable, 485 sizeof(ath9k_2ghz_chantable), GFP_KERNEL); 486 if (!channels) 487 return -ENOMEM; 488 489 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels; 490 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 491 sc->sbands[IEEE80211_BAND_2GHZ].n_channels = 492 ARRAY_SIZE(ath9k_2ghz_chantable); 493 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; 494 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates = 495 ARRAY_SIZE(ath9k_legacy_rates); 496 } 497 498 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) { 499 channels = kmemdup(ath9k_5ghz_chantable, 500 sizeof(ath9k_5ghz_chantable), GFP_KERNEL); 501 if (!channels) { 502 if (sc->sbands[IEEE80211_BAND_2GHZ].channels) 503 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels); 504 return -ENOMEM; 505 } 506 507 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels; 508 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 509 sc->sbands[IEEE80211_BAND_5GHZ].n_channels = 510 ARRAY_SIZE(ath9k_5ghz_chantable); 511 sc->sbands[IEEE80211_BAND_5GHZ].bitrates = 512 ath9k_legacy_rates + 4; 513 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates = 514 ARRAY_SIZE(ath9k_legacy_rates) - 4; 515 } 516 return 0; 517 } 518 519 static void ath9k_init_misc(struct ath_softc *sc) 520 { 521 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 522 int i = 0; 523 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 524 525 sc->config.txpowlimit = ATH_TXPOWER_MAX; 526 527 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 528 sc->sc_flags |= SC_OP_TXAGGR; 529 sc->sc_flags |= SC_OP_RXAGGR; 530 } 531 532 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask; 533 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask; 534 535 ath9k_hw_set_diversity(sc->sc_ah, true); 536 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah); 537 538 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 539 540 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 541 542 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) 543 sc->beacon.bslot[i] = NULL; 544 545 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 546 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; 547 } 548 549 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, 550 const struct ath_bus_ops *bus_ops) 551 { 552 struct ath9k_platform_data *pdata = sc->dev->platform_data; 553 struct ath_hw *ah = NULL; 554 struct ath_common *common; 555 int ret = 0, i; 556 int csz = 0; 557 558 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 559 if (!ah) 560 return -ENOMEM; 561 562 ah->hw = sc->hw; 563 ah->hw_version.devid = devid; 564 ah->hw_version.subsysid = subsysid; 565 ah->reg_ops.read = ath9k_ioread32; 566 ah->reg_ops.write = ath9k_iowrite32; 567 ah->reg_ops.rmw = ath9k_reg_rmw; 568 sc->sc_ah = ah; 569 570 if (!pdata) { 571 ah->ah_flags |= AH_USE_EEPROM; 572 sc->sc_ah->led_pin = -1; 573 } else { 574 sc->sc_ah->gpio_mask = pdata->gpio_mask; 575 sc->sc_ah->gpio_val = pdata->gpio_val; 576 sc->sc_ah->led_pin = pdata->led_pin; 577 ah->is_clk_25mhz = pdata->is_clk_25mhz; 578 } 579 580 common = ath9k_hw_common(ah); 581 common->ops = &ah->reg_ops; 582 common->bus_ops = bus_ops; 583 common->ah = ah; 584 common->hw = sc->hw; 585 common->priv = sc; 586 common->debug_mask = ath9k_debug; 587 common->btcoex_enabled = ath9k_btcoex_enable == 1; 588 common->disable_ani = false; 589 spin_lock_init(&common->cc_lock); 590 591 spin_lock_init(&sc->sc_serial_rw); 592 spin_lock_init(&sc->sc_pm_lock); 593 mutex_init(&sc->mutex); 594 #ifdef CONFIG_ATH9K_DEBUGFS 595 spin_lock_init(&sc->nodes_lock); 596 INIT_LIST_HEAD(&sc->nodes); 597 #endif 598 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 599 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 600 (unsigned long)sc); 601 602 /* 603 * Cache line size is used to size and align various 604 * structures used to communicate with the hardware. 605 */ 606 ath_read_cachesize(common, &csz); 607 common->cachelsz = csz << 2; /* convert to bytes */ 608 609 /* Initializes the hardware for all supported chipsets */ 610 ret = ath9k_hw_init(ah); 611 if (ret) 612 goto err_hw; 613 614 if (pdata && pdata->macaddr) 615 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN); 616 617 ret = ath9k_init_queues(sc); 618 if (ret) 619 goto err_queues; 620 621 ret = ath9k_init_btcoex(sc); 622 if (ret) 623 goto err_btcoex; 624 625 ret = ath9k_init_channels_rates(sc); 626 if (ret) 627 goto err_btcoex; 628 629 ath9k_init_crypto(sc); 630 ath9k_init_misc(sc); 631 632 return 0; 633 634 err_btcoex: 635 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 636 if (ATH_TXQ_SETUP(sc, i)) 637 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 638 err_queues: 639 ath9k_hw_deinit(ah); 640 err_hw: 641 642 kfree(ah); 643 sc->sc_ah = NULL; 644 645 return ret; 646 } 647 648 static void ath9k_init_band_txpower(struct ath_softc *sc, int band) 649 { 650 struct ieee80211_supported_band *sband; 651 struct ieee80211_channel *chan; 652 struct ath_hw *ah = sc->sc_ah; 653 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 654 int i; 655 656 sband = &sc->sbands[band]; 657 for (i = 0; i < sband->n_channels; i++) { 658 chan = &sband->channels[i]; 659 ah->curchan = &ah->channels[chan->hw_value]; 660 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20); 661 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true); 662 chan->max_power = reg->max_power_level / 2; 663 } 664 } 665 666 static void ath9k_init_txpower_limits(struct ath_softc *sc) 667 { 668 struct ath_hw *ah = sc->sc_ah; 669 struct ath9k_channel *curchan = ah->curchan; 670 671 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 672 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); 673 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 674 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ); 675 676 ah->curchan = curchan; 677 } 678 679 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 680 { 681 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 682 683 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 684 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 685 IEEE80211_HW_SIGNAL_DBM | 686 IEEE80211_HW_SUPPORTS_PS | 687 IEEE80211_HW_PS_NULLFUNC_STACK | 688 IEEE80211_HW_SPECTRUM_MGMT | 689 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 690 691 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 692 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 693 694 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) 695 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 696 697 hw->wiphy->interface_modes = 698 BIT(NL80211_IFTYPE_P2P_GO) | 699 BIT(NL80211_IFTYPE_P2P_CLIENT) | 700 BIT(NL80211_IFTYPE_AP) | 701 BIT(NL80211_IFTYPE_WDS) | 702 BIT(NL80211_IFTYPE_STATION) | 703 BIT(NL80211_IFTYPE_ADHOC) | 704 BIT(NL80211_IFTYPE_MESH_POINT); 705 706 if (AR_SREV_5416(sc->sc_ah)) 707 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 708 709 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 710 711 hw->queues = 4; 712 hw->max_rates = 4; 713 hw->channel_change_time = 5000; 714 hw->max_listen_interval = 10; 715 hw->max_rate_tries = 10; 716 hw->sta_data_size = sizeof(struct ath_node); 717 hw->vif_data_size = sizeof(struct ath_vif); 718 719 #ifdef CONFIG_ATH9K_RATE_CONTROL 720 hw->rate_control_algorithm = "ath9k_rate_control"; 721 #endif 722 723 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 724 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 725 &sc->sbands[IEEE80211_BAND_2GHZ]; 726 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 727 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 728 &sc->sbands[IEEE80211_BAND_5GHZ]; 729 730 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 731 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 732 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 733 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 734 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 735 } 736 737 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 738 } 739 740 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, 741 const struct ath_bus_ops *bus_ops) 742 { 743 struct ieee80211_hw *hw = sc->hw; 744 struct ath_common *common; 745 struct ath_hw *ah; 746 int error = 0; 747 struct ath_regulatory *reg; 748 749 /* Bring up device */ 750 error = ath9k_init_softc(devid, sc, subsysid, bus_ops); 751 if (error != 0) 752 goto error_init; 753 754 ah = sc->sc_ah; 755 common = ath9k_hw_common(ah); 756 ath9k_set_hw_capab(sc, hw); 757 758 /* Initialize regulatory */ 759 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 760 ath9k_reg_notifier); 761 if (error) 762 goto error_regd; 763 764 reg = &common->regulatory; 765 766 /* Setup TX DMA */ 767 error = ath_tx_init(sc, ATH_TXBUF); 768 if (error != 0) 769 goto error_tx; 770 771 /* Setup RX DMA */ 772 error = ath_rx_init(sc, ATH_RXBUF); 773 if (error != 0) 774 goto error_rx; 775 776 ath9k_init_txpower_limits(sc); 777 778 #ifdef CONFIG_MAC80211_LEDS 779 /* must be initialized before ieee80211_register_hw */ 780 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw, 781 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink, 782 ARRAY_SIZE(ath9k_tpt_blink)); 783 #endif 784 785 /* Register with mac80211 */ 786 error = ieee80211_register_hw(hw); 787 if (error) 788 goto error_register; 789 790 error = ath9k_init_debug(ah); 791 if (error) { 792 ath_err(common, "Unable to create debugfs files\n"); 793 goto error_world; 794 } 795 796 /* Handle world regulatory */ 797 if (!ath_is_world_regd(reg)) { 798 error = regulatory_hint(hw->wiphy, reg->alpha2); 799 if (error) 800 goto error_world; 801 } 802 803 INIT_WORK(&sc->hw_check_work, ath_hw_check); 804 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); 805 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); 806 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 807 808 ath_init_leds(sc); 809 ath_start_rfkill_poll(sc); 810 811 return 0; 812 813 error_world: 814 ieee80211_unregister_hw(hw); 815 error_register: 816 ath_rx_cleanup(sc); 817 error_rx: 818 ath_tx_cleanup(sc); 819 error_tx: 820 /* Nothing */ 821 error_regd: 822 ath9k_deinit_softc(sc); 823 error_init: 824 return error; 825 } 826 827 /*****************************/ 828 /* De-Initialization */ 829 /*****************************/ 830 831 static void ath9k_deinit_softc(struct ath_softc *sc) 832 { 833 int i = 0; 834 835 if (sc->sbands[IEEE80211_BAND_2GHZ].channels) 836 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels); 837 838 if (sc->sbands[IEEE80211_BAND_5GHZ].channels) 839 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels); 840 841 if ((sc->btcoex.no_stomp_timer) && 842 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 843 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); 844 845 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 846 if (ATH_TXQ_SETUP(sc, i)) 847 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 848 849 ath9k_hw_deinit(sc->sc_ah); 850 851 kfree(sc->sc_ah); 852 sc->sc_ah = NULL; 853 } 854 855 void ath9k_deinit_device(struct ath_softc *sc) 856 { 857 struct ieee80211_hw *hw = sc->hw; 858 859 ath9k_ps_wakeup(sc); 860 861 wiphy_rfkill_stop_polling(sc->hw->wiphy); 862 ath_deinit_leds(sc); 863 864 ath9k_ps_restore(sc); 865 866 ieee80211_unregister_hw(hw); 867 ath_rx_cleanup(sc); 868 ath_tx_cleanup(sc); 869 ath9k_deinit_softc(sc); 870 } 871 872 void ath_descdma_cleanup(struct ath_softc *sc, 873 struct ath_descdma *dd, 874 struct list_head *head) 875 { 876 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 877 dd->dd_desc_paddr); 878 879 INIT_LIST_HEAD(head); 880 kfree(dd->dd_bufptr); 881 memset(dd, 0, sizeof(*dd)); 882 } 883 884 /************************/ 885 /* Module Hooks */ 886 /************************/ 887 888 static int __init ath9k_init(void) 889 { 890 int error; 891 892 /* Register rate control algorithm */ 893 error = ath_rate_control_register(); 894 if (error != 0) { 895 printk(KERN_ERR 896 "ath9k: Unable to register rate control " 897 "algorithm: %d\n", 898 error); 899 goto err_out; 900 } 901 902 error = ath_pci_init(); 903 if (error < 0) { 904 printk(KERN_ERR 905 "ath9k: No PCI devices found, driver not installed.\n"); 906 error = -ENODEV; 907 goto err_rate_unregister; 908 } 909 910 error = ath_ahb_init(); 911 if (error < 0) { 912 error = -ENODEV; 913 goto err_pci_exit; 914 } 915 916 return 0; 917 918 err_pci_exit: 919 ath_pci_exit(); 920 921 err_rate_unregister: 922 ath_rate_control_unregister(); 923 err_out: 924 return error; 925 } 926 module_init(ath9k_init); 927 928 static void __exit ath9k_exit(void) 929 { 930 is_ath9k_unloaded = true; 931 ath_ahb_exit(); 932 ath_pci_exit(); 933 ath_rate_control_unregister(); 934 printk(KERN_INFO "%s: Driver unloaded\n", dev_info); 935 } 936 module_exit(ath9k_exit); 937