1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include <linux/slab.h> 19 #include <linux/ath9k_platform.h> 20 21 #include "ath9k.h" 22 23 static char *dev_info = "ath9k"; 24 25 MODULE_AUTHOR("Atheros Communications"); 26 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 27 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 28 MODULE_LICENSE("Dual BSD/GPL"); 29 30 static unsigned int ath9k_debug = ATH_DBG_DEFAULT; 31 module_param_named(debug, ath9k_debug, uint, 0); 32 MODULE_PARM_DESC(debug, "Debugging mask"); 33 34 int ath9k_modparam_nohwcrypt; 35 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444); 36 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 37 38 int led_blink; 39 module_param_named(blink, led_blink, int, 0444); 40 MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 41 42 static int ath9k_btcoex_enable; 43 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); 44 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 45 46 bool is_ath9k_unloaded; 47 /* We use the hw_value as an index into our private channel structure */ 48 49 #define CHAN2G(_freq, _idx) { \ 50 .band = IEEE80211_BAND_2GHZ, \ 51 .center_freq = (_freq), \ 52 .hw_value = (_idx), \ 53 .max_power = 20, \ 54 } 55 56 #define CHAN5G(_freq, _idx) { \ 57 .band = IEEE80211_BAND_5GHZ, \ 58 .center_freq = (_freq), \ 59 .hw_value = (_idx), \ 60 .max_power = 20, \ 61 } 62 63 /* Some 2 GHz radios are actually tunable on 2312-2732 64 * on 5 MHz steps, we support the channels which we know 65 * we have calibration data for all cards though to make 66 * this static */ 67 static const struct ieee80211_channel ath9k_2ghz_chantable[] = { 68 CHAN2G(2412, 0), /* Channel 1 */ 69 CHAN2G(2417, 1), /* Channel 2 */ 70 CHAN2G(2422, 2), /* Channel 3 */ 71 CHAN2G(2427, 3), /* Channel 4 */ 72 CHAN2G(2432, 4), /* Channel 5 */ 73 CHAN2G(2437, 5), /* Channel 6 */ 74 CHAN2G(2442, 6), /* Channel 7 */ 75 CHAN2G(2447, 7), /* Channel 8 */ 76 CHAN2G(2452, 8), /* Channel 9 */ 77 CHAN2G(2457, 9), /* Channel 10 */ 78 CHAN2G(2462, 10), /* Channel 11 */ 79 CHAN2G(2467, 11), /* Channel 12 */ 80 CHAN2G(2472, 12), /* Channel 13 */ 81 CHAN2G(2484, 13), /* Channel 14 */ 82 }; 83 84 /* Some 5 GHz radios are actually tunable on XXXX-YYYY 85 * on 5 MHz steps, we support the channels which we know 86 * we have calibration data for all cards though to make 87 * this static */ 88 static const struct ieee80211_channel ath9k_5ghz_chantable[] = { 89 /* _We_ call this UNII 1 */ 90 CHAN5G(5180, 14), /* Channel 36 */ 91 CHAN5G(5200, 15), /* Channel 40 */ 92 CHAN5G(5220, 16), /* Channel 44 */ 93 CHAN5G(5240, 17), /* Channel 48 */ 94 /* _We_ call this UNII 2 */ 95 CHAN5G(5260, 18), /* Channel 52 */ 96 CHAN5G(5280, 19), /* Channel 56 */ 97 CHAN5G(5300, 20), /* Channel 60 */ 98 CHAN5G(5320, 21), /* Channel 64 */ 99 /* _We_ call this "Middle band" */ 100 CHAN5G(5500, 22), /* Channel 100 */ 101 CHAN5G(5520, 23), /* Channel 104 */ 102 CHAN5G(5540, 24), /* Channel 108 */ 103 CHAN5G(5560, 25), /* Channel 112 */ 104 CHAN5G(5580, 26), /* Channel 116 */ 105 CHAN5G(5600, 27), /* Channel 120 */ 106 CHAN5G(5620, 28), /* Channel 124 */ 107 CHAN5G(5640, 29), /* Channel 128 */ 108 CHAN5G(5660, 30), /* Channel 132 */ 109 CHAN5G(5680, 31), /* Channel 136 */ 110 CHAN5G(5700, 32), /* Channel 140 */ 111 /* _We_ call this UNII 3 */ 112 CHAN5G(5745, 33), /* Channel 149 */ 113 CHAN5G(5765, 34), /* Channel 153 */ 114 CHAN5G(5785, 35), /* Channel 157 */ 115 CHAN5G(5805, 36), /* Channel 161 */ 116 CHAN5G(5825, 37), /* Channel 165 */ 117 }; 118 119 /* Atheros hardware rate code addition for short premble */ 120 #define SHPCHECK(__hw_rate, __flags) \ 121 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0) 122 123 #define RATE(_bitrate, _hw_rate, _flags) { \ 124 .bitrate = (_bitrate), \ 125 .flags = (_flags), \ 126 .hw_value = (_hw_rate), \ 127 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \ 128 } 129 130 static struct ieee80211_rate ath9k_legacy_rates[] = { 131 RATE(10, 0x1b, 0), 132 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), 133 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), 134 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), 135 RATE(60, 0x0b, 0), 136 RATE(90, 0x0f, 0), 137 RATE(120, 0x0a, 0), 138 RATE(180, 0x0e, 0), 139 RATE(240, 0x09, 0), 140 RATE(360, 0x0d, 0), 141 RATE(480, 0x08, 0), 142 RATE(540, 0x0c, 0), 143 }; 144 145 #ifdef CONFIG_MAC80211_LEDS 146 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = { 147 { .throughput = 0 * 1024, .blink_time = 334 }, 148 { .throughput = 1 * 1024, .blink_time = 260 }, 149 { .throughput = 5 * 1024, .blink_time = 220 }, 150 { .throughput = 10 * 1024, .blink_time = 190 }, 151 { .throughput = 20 * 1024, .blink_time = 170 }, 152 { .throughput = 50 * 1024, .blink_time = 150 }, 153 { .throughput = 70 * 1024, .blink_time = 130 }, 154 { .throughput = 100 * 1024, .blink_time = 110 }, 155 { .throughput = 200 * 1024, .blink_time = 80 }, 156 { .throughput = 300 * 1024, .blink_time = 50 }, 157 }; 158 #endif 159 160 static void ath9k_deinit_softc(struct ath_softc *sc); 161 162 /* 163 * Read and write, they both share the same lock. We do this to serialize 164 * reads and writes on Atheros 802.11n PCI devices only. This is required 165 * as the FIFO on these devices can only accept sanely 2 requests. 166 */ 167 168 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 169 { 170 struct ath_hw *ah = (struct ath_hw *) hw_priv; 171 struct ath_common *common = ath9k_hw_common(ah); 172 struct ath_softc *sc = (struct ath_softc *) common->priv; 173 174 if (ah->config.serialize_regmode == SER_REG_MODE_ON) { 175 unsigned long flags; 176 spin_lock_irqsave(&sc->sc_serial_rw, flags); 177 iowrite32(val, sc->mem + reg_offset); 178 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 179 } else 180 iowrite32(val, sc->mem + reg_offset); 181 } 182 183 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset) 184 { 185 struct ath_hw *ah = (struct ath_hw *) hw_priv; 186 struct ath_common *common = ath9k_hw_common(ah); 187 struct ath_softc *sc = (struct ath_softc *) common->priv; 188 u32 val; 189 190 if (ah->config.serialize_regmode == SER_REG_MODE_ON) { 191 unsigned long flags; 192 spin_lock_irqsave(&sc->sc_serial_rw, flags); 193 val = ioread32(sc->mem + reg_offset); 194 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 195 } else 196 val = ioread32(sc->mem + reg_offset); 197 return val; 198 } 199 200 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset, 201 u32 set, u32 clr) 202 { 203 u32 val; 204 205 val = ioread32(sc->mem + reg_offset); 206 val &= ~clr; 207 val |= set; 208 iowrite32(val, sc->mem + reg_offset); 209 210 return val; 211 } 212 213 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr) 214 { 215 struct ath_hw *ah = (struct ath_hw *) hw_priv; 216 struct ath_common *common = ath9k_hw_common(ah); 217 struct ath_softc *sc = (struct ath_softc *) common->priv; 218 unsigned long uninitialized_var(flags); 219 u32 val; 220 221 if (ah->config.serialize_regmode == SER_REG_MODE_ON) { 222 spin_lock_irqsave(&sc->sc_serial_rw, flags); 223 val = __ath9k_reg_rmw(sc, reg_offset, set, clr); 224 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 225 } else 226 val = __ath9k_reg_rmw(sc, reg_offset, set, clr); 227 228 return val; 229 } 230 231 /**************************/ 232 /* Initialization */ 233 /**************************/ 234 235 static void setup_ht_cap(struct ath_softc *sc, 236 struct ieee80211_sta_ht_cap *ht_info) 237 { 238 struct ath_hw *ah = sc->sc_ah; 239 struct ath_common *common = ath9k_hw_common(ah); 240 u8 tx_streams, rx_streams; 241 int i, max_streams; 242 243 ht_info->ht_supported = true; 244 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 245 IEEE80211_HT_CAP_SM_PS | 246 IEEE80211_HT_CAP_SGI_40 | 247 IEEE80211_HT_CAP_DSSSCCK40; 248 249 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC) 250 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; 251 252 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) 253 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 254 255 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 256 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 257 258 if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) 259 max_streams = 1; 260 else if (AR_SREV_9300_20_OR_LATER(ah)) 261 max_streams = 3; 262 else 263 max_streams = 2; 264 265 if (AR_SREV_9280_20_OR_LATER(ah)) { 266 if (max_streams >= 2) 267 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; 268 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 269 } 270 271 /* set up supported mcs set */ 272 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 273 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams); 274 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams); 275 276 ath_dbg(common, ATH_DBG_CONFIG, 277 "TX streams %d, RX streams: %d\n", 278 tx_streams, rx_streams); 279 280 if (tx_streams != rx_streams) { 281 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 282 ht_info->mcs.tx_params |= ((tx_streams - 1) << 283 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 284 } 285 286 for (i = 0; i < rx_streams; i++) 287 ht_info->mcs.rx_mask[i] = 0xff; 288 289 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 290 } 291 292 static int ath9k_reg_notifier(struct wiphy *wiphy, 293 struct regulatory_request *request) 294 { 295 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 296 struct ath_softc *sc = hw->priv; 297 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); 298 299 return ath_reg_notifier_apply(wiphy, request, reg); 300 } 301 302 /* 303 * This function will allocate both the DMA descriptor structure, and the 304 * buffers it contains. These are used to contain the descriptors used 305 * by the system. 306 */ 307 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 308 struct list_head *head, const char *name, 309 int nbuf, int ndesc, bool is_tx) 310 { 311 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 312 u8 *ds; 313 struct ath_buf *bf; 314 int i, bsize, error, desc_len; 315 316 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 317 name, nbuf, ndesc); 318 319 INIT_LIST_HEAD(head); 320 321 if (is_tx) 322 desc_len = sc->sc_ah->caps.tx_desc_len; 323 else 324 desc_len = sizeof(struct ath_desc); 325 326 /* ath_desc must be a multiple of DWORDs */ 327 if ((desc_len % 4) != 0) { 328 ath_err(common, "ath_desc not DWORD aligned\n"); 329 BUG_ON((desc_len % 4) != 0); 330 error = -ENOMEM; 331 goto fail; 332 } 333 334 dd->dd_desc_len = desc_len * nbuf * ndesc; 335 336 /* 337 * Need additional DMA memory because we can't use 338 * descriptors that cross the 4K page boundary. Assume 339 * one skipped descriptor per 4K page. 340 */ 341 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { 342 u32 ndesc_skipped = 343 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); 344 u32 dma_len; 345 346 while (ndesc_skipped) { 347 dma_len = ndesc_skipped * desc_len; 348 dd->dd_desc_len += dma_len; 349 350 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); 351 } 352 } 353 354 /* allocate descriptors */ 355 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 356 &dd->dd_desc_paddr, GFP_KERNEL); 357 if (dd->dd_desc == NULL) { 358 error = -ENOMEM; 359 goto fail; 360 } 361 ds = (u8 *) dd->dd_desc; 362 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 363 name, ds, (u32) dd->dd_desc_len, 364 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 365 366 /* allocate buffers */ 367 bsize = sizeof(struct ath_buf) * nbuf; 368 bf = kzalloc(bsize, GFP_KERNEL); 369 if (bf == NULL) { 370 error = -ENOMEM; 371 goto fail2; 372 } 373 dd->dd_bufptr = bf; 374 375 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) { 376 bf->bf_desc = ds; 377 bf->bf_daddr = DS2PHYS(dd, ds); 378 379 if (!(sc->sc_ah->caps.hw_caps & 380 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 381 /* 382 * Skip descriptor addresses which can cause 4KB 383 * boundary crossing (addr + length) with a 32 dword 384 * descriptor fetch. 385 */ 386 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 387 BUG_ON((caddr_t) bf->bf_desc >= 388 ((caddr_t) dd->dd_desc + 389 dd->dd_desc_len)); 390 391 ds += (desc_len * ndesc); 392 bf->bf_desc = ds; 393 bf->bf_daddr = DS2PHYS(dd, ds); 394 } 395 } 396 list_add_tail(&bf->list, head); 397 } 398 return 0; 399 fail2: 400 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 401 dd->dd_desc_paddr); 402 fail: 403 memset(dd, 0, sizeof(*dd)); 404 return error; 405 } 406 407 void ath9k_init_crypto(struct ath_softc *sc) 408 { 409 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 410 int i = 0; 411 412 /* Get the hardware key cache size. */ 413 common->keymax = AR_KEYTABLE_SIZE; 414 415 /* 416 * Reset the key cache since some parts do not 417 * reset the contents on initial power up. 418 */ 419 for (i = 0; i < common->keymax; i++) 420 ath_hw_keyreset(common, (u16) i); 421 422 /* 423 * Check whether the separate key cache entries 424 * are required to handle both tx+rx MIC keys. 425 * With split mic keys the number of stations is limited 426 * to 27 otherwise 59. 427 */ 428 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) 429 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED; 430 } 431 432 static int ath9k_init_btcoex(struct ath_softc *sc) 433 { 434 struct ath_txq *txq; 435 int r; 436 437 switch (sc->sc_ah->btcoex_hw.scheme) { 438 case ATH_BTCOEX_CFG_NONE: 439 break; 440 case ATH_BTCOEX_CFG_2WIRE: 441 ath9k_hw_btcoex_init_2wire(sc->sc_ah); 442 break; 443 case ATH_BTCOEX_CFG_3WIRE: 444 ath9k_hw_btcoex_init_3wire(sc->sc_ah); 445 r = ath_init_btcoex_timer(sc); 446 if (r) 447 return -1; 448 txq = sc->tx.txq_map[WME_AC_BE]; 449 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); 450 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 451 break; 452 default: 453 WARN_ON(1); 454 break; 455 } 456 457 return 0; 458 } 459 460 static int ath9k_init_queues(struct ath_softc *sc) 461 { 462 int i = 0; 463 464 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); 465 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 466 467 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 468 ath_cabq_update(sc); 469 470 for (i = 0; i < WME_NUM_AC; i++) { 471 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); 472 sc->tx.txq_map[i]->mac80211_qnum = i; 473 } 474 return 0; 475 } 476 477 static int ath9k_init_channels_rates(struct ath_softc *sc) 478 { 479 void *channels; 480 481 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) + 482 ARRAY_SIZE(ath9k_5ghz_chantable) != 483 ATH9K_NUM_CHANNELS); 484 485 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) { 486 channels = kmemdup(ath9k_2ghz_chantable, 487 sizeof(ath9k_2ghz_chantable), GFP_KERNEL); 488 if (!channels) 489 return -ENOMEM; 490 491 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels; 492 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 493 sc->sbands[IEEE80211_BAND_2GHZ].n_channels = 494 ARRAY_SIZE(ath9k_2ghz_chantable); 495 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; 496 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates = 497 ARRAY_SIZE(ath9k_legacy_rates); 498 } 499 500 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) { 501 channels = kmemdup(ath9k_5ghz_chantable, 502 sizeof(ath9k_5ghz_chantable), GFP_KERNEL); 503 if (!channels) { 504 if (sc->sbands[IEEE80211_BAND_2GHZ].channels) 505 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels); 506 return -ENOMEM; 507 } 508 509 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels; 510 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 511 sc->sbands[IEEE80211_BAND_5GHZ].n_channels = 512 ARRAY_SIZE(ath9k_5ghz_chantable); 513 sc->sbands[IEEE80211_BAND_5GHZ].bitrates = 514 ath9k_legacy_rates + 4; 515 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates = 516 ARRAY_SIZE(ath9k_legacy_rates) - 4; 517 } 518 return 0; 519 } 520 521 static void ath9k_init_misc(struct ath_softc *sc) 522 { 523 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 524 int i = 0; 525 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 526 527 sc->config.txpowlimit = ATH_TXPOWER_MAX; 528 529 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 530 sc->sc_flags |= SC_OP_TXAGGR; 531 sc->sc_flags |= SC_OP_RXAGGR; 532 } 533 534 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask; 535 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask; 536 537 ath9k_hw_set_diversity(sc->sc_ah, true); 538 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah); 539 540 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 541 542 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 543 544 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) 545 sc->beacon.bslot[i] = NULL; 546 547 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 548 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; 549 } 550 551 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, 552 const struct ath_bus_ops *bus_ops) 553 { 554 struct ath9k_platform_data *pdata = sc->dev->platform_data; 555 struct ath_hw *ah = NULL; 556 struct ath_common *common; 557 int ret = 0, i; 558 int csz = 0; 559 560 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 561 if (!ah) 562 return -ENOMEM; 563 564 ah->hw = sc->hw; 565 ah->hw_version.devid = devid; 566 ah->hw_version.subsysid = subsysid; 567 ah->reg_ops.read = ath9k_ioread32; 568 ah->reg_ops.write = ath9k_iowrite32; 569 ah->reg_ops.rmw = ath9k_reg_rmw; 570 sc->sc_ah = ah; 571 572 if (!pdata) { 573 ah->ah_flags |= AH_USE_EEPROM; 574 sc->sc_ah->led_pin = -1; 575 } else { 576 sc->sc_ah->gpio_mask = pdata->gpio_mask; 577 sc->sc_ah->gpio_val = pdata->gpio_val; 578 sc->sc_ah->led_pin = pdata->led_pin; 579 ah->is_clk_25mhz = pdata->is_clk_25mhz; 580 ah->get_mac_revision = pdata->get_mac_revision; 581 ah->external_reset = pdata->external_reset; 582 } 583 584 common = ath9k_hw_common(ah); 585 common->ops = &ah->reg_ops; 586 common->bus_ops = bus_ops; 587 common->ah = ah; 588 common->hw = sc->hw; 589 common->priv = sc; 590 common->debug_mask = ath9k_debug; 591 common->btcoex_enabled = ath9k_btcoex_enable == 1; 592 common->disable_ani = false; 593 spin_lock_init(&common->cc_lock); 594 595 spin_lock_init(&sc->sc_serial_rw); 596 spin_lock_init(&sc->sc_pm_lock); 597 mutex_init(&sc->mutex); 598 #ifdef CONFIG_ATH9K_DEBUGFS 599 spin_lock_init(&sc->nodes_lock); 600 INIT_LIST_HEAD(&sc->nodes); 601 #endif 602 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 603 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 604 (unsigned long)sc); 605 606 /* 607 * Cache line size is used to size and align various 608 * structures used to communicate with the hardware. 609 */ 610 ath_read_cachesize(common, &csz); 611 common->cachelsz = csz << 2; /* convert to bytes */ 612 613 /* Initializes the hardware for all supported chipsets */ 614 ret = ath9k_hw_init(ah); 615 if (ret) 616 goto err_hw; 617 618 if (pdata && pdata->macaddr) 619 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN); 620 621 ret = ath9k_init_queues(sc); 622 if (ret) 623 goto err_queues; 624 625 ret = ath9k_init_btcoex(sc); 626 if (ret) 627 goto err_btcoex; 628 629 ret = ath9k_init_channels_rates(sc); 630 if (ret) 631 goto err_btcoex; 632 633 ath9k_init_crypto(sc); 634 ath9k_init_misc(sc); 635 636 return 0; 637 638 err_btcoex: 639 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 640 if (ATH_TXQ_SETUP(sc, i)) 641 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 642 err_queues: 643 ath9k_hw_deinit(ah); 644 err_hw: 645 646 kfree(ah); 647 sc->sc_ah = NULL; 648 649 return ret; 650 } 651 652 static void ath9k_init_band_txpower(struct ath_softc *sc, int band) 653 { 654 struct ieee80211_supported_band *sband; 655 struct ieee80211_channel *chan; 656 struct ath_hw *ah = sc->sc_ah; 657 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 658 int i; 659 660 sband = &sc->sbands[band]; 661 for (i = 0; i < sband->n_channels; i++) { 662 chan = &sband->channels[i]; 663 ah->curchan = &ah->channels[chan->hw_value]; 664 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20); 665 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true); 666 chan->max_power = reg->max_power_level / 2; 667 } 668 } 669 670 static void ath9k_init_txpower_limits(struct ath_softc *sc) 671 { 672 struct ath_hw *ah = sc->sc_ah; 673 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 674 struct ath9k_channel *curchan = ah->curchan; 675 676 ah->txchainmask = common->tx_chainmask; 677 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 678 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); 679 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 680 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ); 681 682 ah->curchan = curchan; 683 } 684 685 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 686 { 687 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 688 689 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 690 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 691 IEEE80211_HW_SIGNAL_DBM | 692 IEEE80211_HW_SUPPORTS_PS | 693 IEEE80211_HW_PS_NULLFUNC_STACK | 694 IEEE80211_HW_SPECTRUM_MGMT | 695 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 696 697 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 698 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 699 700 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) 701 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 702 703 hw->wiphy->interface_modes = 704 BIT(NL80211_IFTYPE_P2P_GO) | 705 BIT(NL80211_IFTYPE_P2P_CLIENT) | 706 BIT(NL80211_IFTYPE_AP) | 707 BIT(NL80211_IFTYPE_WDS) | 708 BIT(NL80211_IFTYPE_STATION) | 709 BIT(NL80211_IFTYPE_ADHOC) | 710 BIT(NL80211_IFTYPE_MESH_POINT); 711 712 if (AR_SREV_5416(sc->sc_ah)) 713 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 714 715 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 716 717 hw->queues = 4; 718 hw->max_rates = 4; 719 hw->channel_change_time = 5000; 720 hw->max_listen_interval = 10; 721 hw->max_rate_tries = 10; 722 hw->sta_data_size = sizeof(struct ath_node); 723 hw->vif_data_size = sizeof(struct ath_vif); 724 725 #ifdef CONFIG_ATH9K_RATE_CONTROL 726 hw->rate_control_algorithm = "ath9k_rate_control"; 727 #endif 728 729 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 730 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 731 &sc->sbands[IEEE80211_BAND_2GHZ]; 732 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 733 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 734 &sc->sbands[IEEE80211_BAND_5GHZ]; 735 736 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 737 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 738 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 739 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 740 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 741 } 742 743 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 744 } 745 746 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, 747 const struct ath_bus_ops *bus_ops) 748 { 749 struct ieee80211_hw *hw = sc->hw; 750 struct ath_common *common; 751 struct ath_hw *ah; 752 int error = 0; 753 struct ath_regulatory *reg; 754 755 /* Bring up device */ 756 error = ath9k_init_softc(devid, sc, subsysid, bus_ops); 757 if (error != 0) 758 goto error_init; 759 760 ah = sc->sc_ah; 761 common = ath9k_hw_common(ah); 762 ath9k_set_hw_capab(sc, hw); 763 764 /* Initialize regulatory */ 765 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 766 ath9k_reg_notifier); 767 if (error) 768 goto error_regd; 769 770 reg = &common->regulatory; 771 772 /* Setup TX DMA */ 773 error = ath_tx_init(sc, ATH_TXBUF); 774 if (error != 0) 775 goto error_tx; 776 777 /* Setup RX DMA */ 778 error = ath_rx_init(sc, ATH_RXBUF); 779 if (error != 0) 780 goto error_rx; 781 782 ath9k_init_txpower_limits(sc); 783 784 #ifdef CONFIG_MAC80211_LEDS 785 /* must be initialized before ieee80211_register_hw */ 786 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw, 787 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink, 788 ARRAY_SIZE(ath9k_tpt_blink)); 789 #endif 790 791 /* Register with mac80211 */ 792 error = ieee80211_register_hw(hw); 793 if (error) 794 goto error_register; 795 796 error = ath9k_init_debug(ah); 797 if (error) { 798 ath_err(common, "Unable to create debugfs files\n"); 799 goto error_world; 800 } 801 802 /* Handle world regulatory */ 803 if (!ath_is_world_regd(reg)) { 804 error = regulatory_hint(hw->wiphy, reg->alpha2); 805 if (error) 806 goto error_world; 807 } 808 809 INIT_WORK(&sc->hw_check_work, ath_hw_check); 810 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); 811 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); 812 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 813 814 ath_init_leds(sc); 815 ath_start_rfkill_poll(sc); 816 817 return 0; 818 819 error_world: 820 ieee80211_unregister_hw(hw); 821 error_register: 822 ath_rx_cleanup(sc); 823 error_rx: 824 ath_tx_cleanup(sc); 825 error_tx: 826 /* Nothing */ 827 error_regd: 828 ath9k_deinit_softc(sc); 829 error_init: 830 return error; 831 } 832 833 /*****************************/ 834 /* De-Initialization */ 835 /*****************************/ 836 837 static void ath9k_deinit_softc(struct ath_softc *sc) 838 { 839 int i = 0; 840 841 if (sc->sbands[IEEE80211_BAND_2GHZ].channels) 842 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels); 843 844 if (sc->sbands[IEEE80211_BAND_5GHZ].channels) 845 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels); 846 847 if ((sc->btcoex.no_stomp_timer) && 848 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 849 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); 850 851 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 852 if (ATH_TXQ_SETUP(sc, i)) 853 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 854 855 ath9k_hw_deinit(sc->sc_ah); 856 857 kfree(sc->sc_ah); 858 sc->sc_ah = NULL; 859 } 860 861 void ath9k_deinit_device(struct ath_softc *sc) 862 { 863 struct ieee80211_hw *hw = sc->hw; 864 865 ath9k_ps_wakeup(sc); 866 867 wiphy_rfkill_stop_polling(sc->hw->wiphy); 868 ath_deinit_leds(sc); 869 870 ath9k_ps_restore(sc); 871 872 ieee80211_unregister_hw(hw); 873 ath_rx_cleanup(sc); 874 ath_tx_cleanup(sc); 875 ath9k_deinit_softc(sc); 876 } 877 878 void ath_descdma_cleanup(struct ath_softc *sc, 879 struct ath_descdma *dd, 880 struct list_head *head) 881 { 882 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 883 dd->dd_desc_paddr); 884 885 INIT_LIST_HEAD(head); 886 kfree(dd->dd_bufptr); 887 memset(dd, 0, sizeof(*dd)); 888 } 889 890 /************************/ 891 /* Module Hooks */ 892 /************************/ 893 894 static int __init ath9k_init(void) 895 { 896 int error; 897 898 /* Register rate control algorithm */ 899 error = ath_rate_control_register(); 900 if (error != 0) { 901 printk(KERN_ERR 902 "ath9k: Unable to register rate control " 903 "algorithm: %d\n", 904 error); 905 goto err_out; 906 } 907 908 error = ath_pci_init(); 909 if (error < 0) { 910 printk(KERN_ERR 911 "ath9k: No PCI devices found, driver not installed.\n"); 912 error = -ENODEV; 913 goto err_rate_unregister; 914 } 915 916 error = ath_ahb_init(); 917 if (error < 0) { 918 error = -ENODEV; 919 goto err_pci_exit; 920 } 921 922 return 0; 923 924 err_pci_exit: 925 ath_pci_exit(); 926 927 err_rate_unregister: 928 ath_rate_control_unregister(); 929 err_out: 930 return error; 931 } 932 module_init(ath9k_init); 933 934 static void __exit ath9k_exit(void) 935 { 936 is_ath9k_unloaded = true; 937 ath_ahb_exit(); 938 ath_pci_exit(); 939 ath_rate_control_unregister(); 940 printk(KERN_INFO "%s: Driver unloaded\n", dev_info); 941 } 942 module_exit(ath9k_exit); 943