1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/ath9k_platform.h> 22 #include <linux/module.h> 23 #include <linux/relay.h> 24 #include <net/ieee80211_radiotap.h> 25 26 #include "ath9k.h" 27 28 struct ath9k_eeprom_ctx { 29 struct completion complete; 30 struct ath_hw *ah; 31 }; 32 33 static char *dev_info = "ath9k"; 34 35 MODULE_AUTHOR("Atheros Communications"); 36 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 37 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 38 MODULE_LICENSE("Dual BSD/GPL"); 39 40 static unsigned int ath9k_debug = ATH_DBG_DEFAULT; 41 module_param_named(debug, ath9k_debug, uint, 0); 42 MODULE_PARM_DESC(debug, "Debugging mask"); 43 44 int ath9k_modparam_nohwcrypt; 45 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444); 46 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 47 48 int ath9k_led_blink; 49 module_param_named(blink, ath9k_led_blink, int, 0444); 50 MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 51 52 static int ath9k_btcoex_enable; 53 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); 54 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 55 56 static int ath9k_bt_ant_diversity; 57 module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444); 58 MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity"); 59 60 static int ath9k_ps_enable; 61 module_param_named(ps_enable, ath9k_ps_enable, int, 0444); 62 MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); 63 64 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 65 66 int ath9k_use_chanctx; 67 module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444); 68 MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency"); 69 70 #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ 71 72 bool is_ath9k_unloaded; 73 74 #ifdef CONFIG_MAC80211_LEDS 75 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = { 76 { .throughput = 0 * 1024, .blink_time = 334 }, 77 { .throughput = 1 * 1024, .blink_time = 260 }, 78 { .throughput = 5 * 1024, .blink_time = 220 }, 79 { .throughput = 10 * 1024, .blink_time = 190 }, 80 { .throughput = 20 * 1024, .blink_time = 170 }, 81 { .throughput = 50 * 1024, .blink_time = 150 }, 82 { .throughput = 70 * 1024, .blink_time = 130 }, 83 { .throughput = 100 * 1024, .blink_time = 110 }, 84 { .throughput = 200 * 1024, .blink_time = 80 }, 85 { .throughput = 300 * 1024, .blink_time = 50 }, 86 }; 87 #endif 88 89 static void ath9k_deinit_softc(struct ath_softc *sc); 90 91 static void ath9k_op_ps_wakeup(struct ath_common *common) 92 { 93 ath9k_ps_wakeup((struct ath_softc *) common->priv); 94 } 95 96 static void ath9k_op_ps_restore(struct ath_common *common) 97 { 98 ath9k_ps_restore((struct ath_softc *) common->priv); 99 } 100 101 static struct ath_ps_ops ath9k_ps_ops = { 102 .wakeup = ath9k_op_ps_wakeup, 103 .restore = ath9k_op_ps_restore, 104 }; 105 106 /* 107 * Read and write, they both share the same lock. We do this to serialize 108 * reads and writes on Atheros 802.11n PCI devices only. This is required 109 * as the FIFO on these devices can only accept sanely 2 requests. 110 */ 111 112 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 113 { 114 struct ath_hw *ah = (struct ath_hw *) hw_priv; 115 struct ath_common *common = ath9k_hw_common(ah); 116 struct ath_softc *sc = (struct ath_softc *) common->priv; 117 118 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { 119 unsigned long flags; 120 spin_lock_irqsave(&sc->sc_serial_rw, flags); 121 iowrite32(val, sc->mem + reg_offset); 122 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 123 } else 124 iowrite32(val, sc->mem + reg_offset); 125 } 126 127 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset) 128 { 129 struct ath_hw *ah = (struct ath_hw *) hw_priv; 130 struct ath_common *common = ath9k_hw_common(ah); 131 struct ath_softc *sc = (struct ath_softc *) common->priv; 132 u32 val; 133 134 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { 135 unsigned long flags; 136 spin_lock_irqsave(&sc->sc_serial_rw, flags); 137 val = ioread32(sc->mem + reg_offset); 138 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 139 } else 140 val = ioread32(sc->mem + reg_offset); 141 return val; 142 } 143 144 static void ath9k_multi_ioread32(void *hw_priv, u32 *addr, 145 u32 *val, u16 count) 146 { 147 int i; 148 149 for (i = 0; i < count; i++) 150 val[i] = ath9k_ioread32(hw_priv, addr[i]); 151 } 152 153 154 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset, 155 u32 set, u32 clr) 156 { 157 u32 val; 158 159 val = ioread32(sc->mem + reg_offset); 160 val &= ~clr; 161 val |= set; 162 iowrite32(val, sc->mem + reg_offset); 163 164 return val; 165 } 166 167 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr) 168 { 169 struct ath_hw *ah = (struct ath_hw *) hw_priv; 170 struct ath_common *common = ath9k_hw_common(ah); 171 struct ath_softc *sc = (struct ath_softc *) common->priv; 172 unsigned long uninitialized_var(flags); 173 u32 val; 174 175 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { 176 spin_lock_irqsave(&sc->sc_serial_rw, flags); 177 val = __ath9k_reg_rmw(sc, reg_offset, set, clr); 178 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 179 } else 180 val = __ath9k_reg_rmw(sc, reg_offset, set, clr); 181 182 return val; 183 } 184 185 /**************************/ 186 /* Initialization */ 187 /**************************/ 188 189 static void ath9k_reg_notifier(struct wiphy *wiphy, 190 struct regulatory_request *request) 191 { 192 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 193 struct ath_softc *sc = hw->priv; 194 struct ath_hw *ah = sc->sc_ah; 195 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 196 197 ath_reg_notifier_apply(wiphy, request, reg); 198 199 /* Set tx power */ 200 if (!ah->curchan) 201 return; 202 203 sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power; 204 ath9k_ps_wakeup(sc); 205 ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false); 206 ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower, 207 sc->cur_chan->txpower, 208 &sc->cur_chan->cur_txpower); 209 /* synchronize DFS detector if regulatory domain changed */ 210 if (sc->dfs_detector != NULL) 211 sc->dfs_detector->set_dfs_domain(sc->dfs_detector, 212 request->dfs_region); 213 ath9k_ps_restore(sc); 214 } 215 216 /* 217 * This function will allocate both the DMA descriptor structure, and the 218 * buffers it contains. These are used to contain the descriptors used 219 * by the system. 220 */ 221 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 222 struct list_head *head, const char *name, 223 int nbuf, int ndesc, bool is_tx) 224 { 225 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 226 u8 *ds; 227 int i, bsize, desc_len; 228 229 ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n", 230 name, nbuf, ndesc); 231 232 INIT_LIST_HEAD(head); 233 234 if (is_tx) 235 desc_len = sc->sc_ah->caps.tx_desc_len; 236 else 237 desc_len = sizeof(struct ath_desc); 238 239 /* ath_desc must be a multiple of DWORDs */ 240 if ((desc_len % 4) != 0) { 241 ath_err(common, "ath_desc not DWORD aligned\n"); 242 BUG_ON((desc_len % 4) != 0); 243 return -ENOMEM; 244 } 245 246 dd->dd_desc_len = desc_len * nbuf * ndesc; 247 248 /* 249 * Need additional DMA memory because we can't use 250 * descriptors that cross the 4K page boundary. Assume 251 * one skipped descriptor per 4K page. 252 */ 253 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { 254 u32 ndesc_skipped = 255 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); 256 u32 dma_len; 257 258 while (ndesc_skipped) { 259 dma_len = ndesc_skipped * desc_len; 260 dd->dd_desc_len += dma_len; 261 262 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); 263 } 264 } 265 266 /* allocate descriptors */ 267 dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len, 268 &dd->dd_desc_paddr, GFP_KERNEL); 269 if (!dd->dd_desc) 270 return -ENOMEM; 271 272 ds = (u8 *) dd->dd_desc; 273 ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 274 name, ds, (u32) dd->dd_desc_len, 275 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 276 277 /* allocate buffers */ 278 if (is_tx) { 279 struct ath_buf *bf; 280 281 bsize = sizeof(struct ath_buf) * nbuf; 282 bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL); 283 if (!bf) 284 return -ENOMEM; 285 286 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) { 287 bf->bf_desc = ds; 288 bf->bf_daddr = DS2PHYS(dd, ds); 289 290 if (!(sc->sc_ah->caps.hw_caps & 291 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 292 /* 293 * Skip descriptor addresses which can cause 4KB 294 * boundary crossing (addr + length) with a 32 dword 295 * descriptor fetch. 296 */ 297 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 298 BUG_ON((caddr_t) bf->bf_desc >= 299 ((caddr_t) dd->dd_desc + 300 dd->dd_desc_len)); 301 302 ds += (desc_len * ndesc); 303 bf->bf_desc = ds; 304 bf->bf_daddr = DS2PHYS(dd, ds); 305 } 306 } 307 list_add_tail(&bf->list, head); 308 } 309 } else { 310 struct ath_rxbuf *bf; 311 312 bsize = sizeof(struct ath_rxbuf) * nbuf; 313 bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL); 314 if (!bf) 315 return -ENOMEM; 316 317 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) { 318 bf->bf_desc = ds; 319 bf->bf_daddr = DS2PHYS(dd, ds); 320 321 if (!(sc->sc_ah->caps.hw_caps & 322 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 323 /* 324 * Skip descriptor addresses which can cause 4KB 325 * boundary crossing (addr + length) with a 32 dword 326 * descriptor fetch. 327 */ 328 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 329 BUG_ON((caddr_t) bf->bf_desc >= 330 ((caddr_t) dd->dd_desc + 331 dd->dd_desc_len)); 332 333 ds += (desc_len * ndesc); 334 bf->bf_desc = ds; 335 bf->bf_daddr = DS2PHYS(dd, ds); 336 } 337 } 338 list_add_tail(&bf->list, head); 339 } 340 } 341 return 0; 342 } 343 344 static int ath9k_init_queues(struct ath_softc *sc) 345 { 346 int i = 0; 347 348 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); 349 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 350 ath_cabq_update(sc); 351 352 sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0); 353 354 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 355 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); 356 sc->tx.txq_map[i]->mac80211_qnum = i; 357 sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH; 358 } 359 return 0; 360 } 361 362 static void ath9k_init_misc(struct ath_softc *sc) 363 { 364 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 365 int i = 0; 366 367 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 368 369 common->last_rssi = ATH_RSSI_DUMMY_MARKER; 370 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 371 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 372 373 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) 374 sc->beacon.bslot[i] = NULL; 375 376 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 377 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; 378 379 sc->spec_priv.ah = sc->sc_ah; 380 sc->spec_priv.spec_config.enabled = 0; 381 sc->spec_priv.spec_config.short_repeat = true; 382 sc->spec_priv.spec_config.count = 8; 383 sc->spec_priv.spec_config.endless = false; 384 sc->spec_priv.spec_config.period = 0xFF; 385 sc->spec_priv.spec_config.fft_period = 0xF; 386 } 387 388 static void ath9k_init_pcoem_platform(struct ath_softc *sc) 389 { 390 struct ath_hw *ah = sc->sc_ah; 391 struct ath9k_hw_capabilities *pCap = &ah->caps; 392 struct ath_common *common = ath9k_hw_common(ah); 393 394 if (!IS_ENABLED(CONFIG_ATH9K_PCOEM)) 395 return; 396 397 if (common->bus_ops->ath_bus_type != ATH_PCI) 398 return; 399 400 if (sc->driver_data & (ATH9K_PCI_CUS198 | 401 ATH9K_PCI_CUS230)) { 402 ah->config.xlna_gpio = 9; 403 ah->config.xatten_margin_cfg = true; 404 ah->config.alt_mingainidx = true; 405 ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88; 406 sc->ant_comb.low_rssi_thresh = 20; 407 sc->ant_comb.fast_div_bias = 3; 408 409 ath_info(common, "Set parameters for %s\n", 410 (sc->driver_data & ATH9K_PCI_CUS198) ? 411 "CUS198" : "CUS230"); 412 } 413 414 if (sc->driver_data & ATH9K_PCI_CUS217) 415 ath_info(common, "CUS217 card detected\n"); 416 417 if (sc->driver_data & ATH9K_PCI_CUS252) 418 ath_info(common, "CUS252 card detected\n"); 419 420 if (sc->driver_data & ATH9K_PCI_AR9565_1ANT) 421 ath_info(common, "WB335 1-ANT card detected\n"); 422 423 if (sc->driver_data & ATH9K_PCI_AR9565_2ANT) 424 ath_info(common, "WB335 2-ANT card detected\n"); 425 426 if (sc->driver_data & ATH9K_PCI_KILLER) 427 ath_info(common, "Killer Wireless card detected\n"); 428 429 /* 430 * Some WB335 cards do not support antenna diversity. Since 431 * we use a hardcoded value for AR9565 instead of using the 432 * EEPROM/OTP data, remove the combining feature from 433 * the HW capabilities bitmap. 434 */ 435 if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) { 436 if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV)) 437 pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB; 438 } 439 440 if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) { 441 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; 442 ath_info(common, "Set BT/WLAN RX diversity capability\n"); 443 } 444 445 if (sc->driver_data & ATH9K_PCI_D3_L1_WAR) { 446 ah->config.pcie_waen = 0x0040473b; 447 ath_info(common, "Enable WAR for ASPM D3/L1\n"); 448 } 449 450 /* 451 * The default value of pll_pwrsave is 1. 452 * For certain AR9485 cards, it is set to 0. 453 * For AR9462, AR9565 it's set to 7. 454 */ 455 ah->config.pll_pwrsave = 1; 456 457 if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) { 458 ah->config.pll_pwrsave = 0; 459 ath_info(common, "Disable PLL PowerSave\n"); 460 } 461 462 if (sc->driver_data & ATH9K_PCI_LED_ACT_HI) 463 ah->config.led_active_high = true; 464 } 465 466 static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, 467 void *ctx) 468 { 469 struct ath9k_eeprom_ctx *ec = ctx; 470 471 if (eeprom_blob) 472 ec->ah->eeprom_blob = eeprom_blob; 473 474 complete(&ec->complete); 475 } 476 477 static int ath9k_eeprom_request(struct ath_softc *sc, const char *name) 478 { 479 struct ath9k_eeprom_ctx ec; 480 struct ath_hw *ah = ah = sc->sc_ah; 481 int err; 482 483 /* try to load the EEPROM content asynchronously */ 484 init_completion(&ec.complete); 485 ec.ah = sc->sc_ah; 486 487 err = request_firmware_nowait(THIS_MODULE, 1, name, sc->dev, GFP_KERNEL, 488 &ec, ath9k_eeprom_request_cb); 489 if (err < 0) { 490 ath_err(ath9k_hw_common(ah), 491 "EEPROM request failed\n"); 492 return err; 493 } 494 495 wait_for_completion(&ec.complete); 496 497 if (!ah->eeprom_blob) { 498 ath_err(ath9k_hw_common(ah), 499 "Unable to load EEPROM file %s\n", name); 500 return -EINVAL; 501 } 502 503 return 0; 504 } 505 506 static void ath9k_eeprom_release(struct ath_softc *sc) 507 { 508 release_firmware(sc->sc_ah->eeprom_blob); 509 } 510 511 static int ath9k_init_soc_platform(struct ath_softc *sc) 512 { 513 struct ath9k_platform_data *pdata = sc->dev->platform_data; 514 struct ath_hw *ah = sc->sc_ah; 515 int ret = 0; 516 517 if (!pdata) 518 return 0; 519 520 if (pdata->eeprom_name) { 521 ret = ath9k_eeprom_request(sc, pdata->eeprom_name); 522 if (ret) 523 return ret; 524 } 525 526 if (pdata->tx_gain_buffalo) 527 ah->config.tx_gain_buffalo = true; 528 529 return ret; 530 } 531 532 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, 533 const struct ath_bus_ops *bus_ops) 534 { 535 struct ath9k_platform_data *pdata = sc->dev->platform_data; 536 struct ath_hw *ah = NULL; 537 struct ath9k_hw_capabilities *pCap; 538 struct ath_common *common; 539 int ret = 0, i; 540 int csz = 0; 541 542 ah = devm_kzalloc(sc->dev, sizeof(struct ath_hw), GFP_KERNEL); 543 if (!ah) 544 return -ENOMEM; 545 546 ah->dev = sc->dev; 547 ah->hw = sc->hw; 548 ah->hw_version.devid = devid; 549 ah->reg_ops.read = ath9k_ioread32; 550 ah->reg_ops.multi_read = ath9k_multi_ioread32; 551 ah->reg_ops.write = ath9k_iowrite32; 552 ah->reg_ops.rmw = ath9k_reg_rmw; 553 pCap = &ah->caps; 554 555 common = ath9k_hw_common(ah); 556 557 /* Will be cleared in ath9k_start() */ 558 set_bit(ATH_OP_INVALID, &common->op_flags); 559 560 sc->sc_ah = ah; 561 sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET); 562 sc->tx99_power = MAX_RATE_POWER + 1; 563 init_waitqueue_head(&sc->tx_wait); 564 sc->cur_chan = &sc->chanctx[0]; 565 if (!ath9k_is_chanctx_enabled()) 566 sc->cur_chan->hw_queue_base = 0; 567 568 if (!pdata || pdata->use_eeprom) { 569 ah->ah_flags |= AH_USE_EEPROM; 570 sc->sc_ah->led_pin = -1; 571 } else { 572 sc->sc_ah->gpio_mask = pdata->gpio_mask; 573 sc->sc_ah->gpio_val = pdata->gpio_val; 574 sc->sc_ah->led_pin = pdata->led_pin; 575 ah->is_clk_25mhz = pdata->is_clk_25mhz; 576 ah->get_mac_revision = pdata->get_mac_revision; 577 ah->external_reset = pdata->external_reset; 578 ah->disable_2ghz = pdata->disable_2ghz; 579 ah->disable_5ghz = pdata->disable_5ghz; 580 if (!pdata->endian_check) 581 ah->ah_flags |= AH_NO_EEP_SWAP; 582 } 583 584 common->ops = &ah->reg_ops; 585 common->bus_ops = bus_ops; 586 common->ps_ops = &ath9k_ps_ops; 587 common->ah = ah; 588 common->hw = sc->hw; 589 common->priv = sc; 590 common->debug_mask = ath9k_debug; 591 common->btcoex_enabled = ath9k_btcoex_enable == 1; 592 common->disable_ani = false; 593 594 /* 595 * Platform quirks. 596 */ 597 ath9k_init_pcoem_platform(sc); 598 599 ret = ath9k_init_soc_platform(sc); 600 if (ret) 601 return ret; 602 603 /* 604 * Enable WLAN/BT RX Antenna diversity only when: 605 * 606 * - BTCOEX is disabled. 607 * - the user manually requests the feature. 608 * - the HW cap is set using the platform data. 609 */ 610 if (!common->btcoex_enabled && ath9k_bt_ant_diversity && 611 (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV)) 612 common->bt_ant_diversity = 1; 613 614 spin_lock_init(&common->cc_lock); 615 spin_lock_init(&sc->sc_serial_rw); 616 spin_lock_init(&sc->sc_pm_lock); 617 spin_lock_init(&sc->chan_lock); 618 mutex_init(&sc->mutex); 619 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 620 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, 621 (unsigned long)sc); 622 623 setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc); 624 INIT_WORK(&sc->hw_reset_work, ath_reset_work); 625 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); 626 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); 627 628 ath9k_init_channel_context(sc); 629 630 /* 631 * Cache line size is used to size and align various 632 * structures used to communicate with the hardware. 633 */ 634 ath_read_cachesize(common, &csz); 635 common->cachelsz = csz << 2; /* convert to bytes */ 636 637 /* Initializes the hardware for all supported chipsets */ 638 ret = ath9k_hw_init(ah); 639 if (ret) 640 goto err_hw; 641 642 if (pdata && pdata->macaddr) 643 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN); 644 645 ret = ath9k_init_queues(sc); 646 if (ret) 647 goto err_queues; 648 649 ret = ath9k_init_btcoex(sc); 650 if (ret) 651 goto err_btcoex; 652 653 ret = ath9k_cmn_init_channels_rates(common); 654 if (ret) 655 goto err_btcoex; 656 657 ret = ath9k_init_p2p(sc); 658 if (ret) 659 goto err_btcoex; 660 661 ath9k_cmn_init_crypto(sc->sc_ah); 662 ath9k_init_misc(sc); 663 ath_fill_led_pin(sc); 664 ath_chanctx_init(sc); 665 ath9k_offchannel_init(sc); 666 667 if (common->bus_ops->aspm_init) 668 common->bus_ops->aspm_init(common); 669 670 return 0; 671 672 err_btcoex: 673 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 674 if (ATH_TXQ_SETUP(sc, i)) 675 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 676 err_queues: 677 ath9k_hw_deinit(ah); 678 err_hw: 679 ath9k_eeprom_release(sc); 680 dev_kfree_skb_any(sc->tx99_skb); 681 return ret; 682 } 683 684 static void ath9k_init_band_txpower(struct ath_softc *sc, int band) 685 { 686 struct ieee80211_supported_band *sband; 687 struct ieee80211_channel *chan; 688 struct ath_hw *ah = sc->sc_ah; 689 struct ath_common *common = ath9k_hw_common(ah); 690 struct cfg80211_chan_def chandef; 691 int i; 692 693 sband = &common->sbands[band]; 694 for (i = 0; i < sband->n_channels; i++) { 695 chan = &sband->channels[i]; 696 ah->curchan = &ah->channels[chan->hw_value]; 697 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); 698 ath9k_cmn_get_channel(sc->hw, ah, &chandef); 699 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true); 700 } 701 } 702 703 static void ath9k_init_txpower_limits(struct ath_softc *sc) 704 { 705 struct ath_hw *ah = sc->sc_ah; 706 struct ath9k_channel *curchan = ah->curchan; 707 708 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 709 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); 710 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 711 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ); 712 713 ah->curchan = curchan; 714 } 715 716 static const struct ieee80211_iface_limit if_limits[] = { 717 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) }, 718 { .max = 8, .types = 719 #ifdef CONFIG_MAC80211_MESH 720 BIT(NL80211_IFTYPE_MESH_POINT) | 721 #endif 722 BIT(NL80211_IFTYPE_AP) }, 723 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | 724 BIT(NL80211_IFTYPE_P2P_GO) }, 725 }; 726 727 static const struct ieee80211_iface_limit wds_limits[] = { 728 { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) }, 729 }; 730 731 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 732 733 static const struct ieee80211_iface_limit if_limits_multi[] = { 734 { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | 735 BIT(NL80211_IFTYPE_AP) | 736 BIT(NL80211_IFTYPE_P2P_CLIENT) | 737 BIT(NL80211_IFTYPE_P2P_GO) }, 738 { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, 739 }; 740 741 static const struct ieee80211_iface_combination if_comb_multi[] = { 742 { 743 .limits = if_limits_multi, 744 .n_limits = ARRAY_SIZE(if_limits_multi), 745 .max_interfaces = 2, 746 .num_different_channels = 2, 747 .beacon_int_infra_match = true, 748 }, 749 }; 750 751 #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ 752 753 static const struct ieee80211_iface_limit if_dfs_limits[] = { 754 { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | 755 #ifdef CONFIG_MAC80211_MESH 756 BIT(NL80211_IFTYPE_MESH_POINT) | 757 #endif 758 BIT(NL80211_IFTYPE_ADHOC) }, 759 }; 760 761 static const struct ieee80211_iface_combination if_comb[] = { 762 { 763 .limits = if_limits, 764 .n_limits = ARRAY_SIZE(if_limits), 765 .max_interfaces = 2048, 766 .num_different_channels = 1, 767 .beacon_int_infra_match = true, 768 }, 769 { 770 .limits = wds_limits, 771 .n_limits = ARRAY_SIZE(wds_limits), 772 .max_interfaces = 2048, 773 .num_different_channels = 1, 774 .beacon_int_infra_match = true, 775 }, 776 #ifdef CONFIG_ATH9K_DFS_CERTIFIED 777 { 778 .limits = if_dfs_limits, 779 .n_limits = ARRAY_SIZE(if_dfs_limits), 780 .max_interfaces = 1, 781 .num_different_channels = 1, 782 .beacon_int_infra_match = true, 783 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 784 BIT(NL80211_CHAN_WIDTH_20) | 785 BIT(NL80211_CHAN_WIDTH_40), 786 } 787 #endif 788 }; 789 790 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 791 static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 792 { 793 struct ath_hw *ah = sc->sc_ah; 794 struct ath_common *common = ath9k_hw_common(ah); 795 796 if (!ath9k_is_chanctx_enabled()) 797 return; 798 799 ieee80211_hw_set(hw, QUEUE_CONTROL); 800 hw->queues = ATH9K_NUM_TX_QUEUES; 801 hw->offchannel_tx_hw_queue = hw->queues - 1; 802 hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS); 803 hw->wiphy->iface_combinations = if_comb_multi; 804 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi); 805 hw->wiphy->max_scan_ssids = 255; 806 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; 807 hw->wiphy->max_remain_on_channel_duration = 10000; 808 hw->chanctx_data_size = sizeof(void *); 809 hw->extra_beacon_tailroom = 810 sizeof(struct ieee80211_p2p_noa_attr) + 9; 811 812 ath_dbg(common, CHAN_CTX, "Use channel contexts\n"); 813 } 814 #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ 815 816 static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 817 { 818 struct ath_hw *ah = sc->sc_ah; 819 struct ath_common *common = ath9k_hw_common(ah); 820 821 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); 822 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); 823 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 824 ieee80211_hw_set(hw, SPECTRUM_MGMT); 825 ieee80211_hw_set(hw, PS_NULLFUNC_STACK); 826 ieee80211_hw_set(hw, SIGNAL_DBM); 827 ieee80211_hw_set(hw, RX_INCLUDES_FCS); 828 ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); 829 830 if (ath9k_ps_enable) 831 ieee80211_hw_set(hw, SUPPORTS_PS); 832 833 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 834 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 835 836 if (AR_SREV_9280_20_OR_LATER(ah)) 837 hw->radiotap_mcs_details |= 838 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 839 } 840 841 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) 842 ieee80211_hw_set(hw, MFP_CAPABLE); 843 844 hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | 845 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 846 NL80211_FEATURE_P2P_GO_CTWIN; 847 848 if (!config_enabled(CONFIG_ATH9K_TX99)) { 849 hw->wiphy->interface_modes = 850 BIT(NL80211_IFTYPE_P2P_GO) | 851 BIT(NL80211_IFTYPE_P2P_CLIENT) | 852 BIT(NL80211_IFTYPE_AP) | 853 BIT(NL80211_IFTYPE_STATION) | 854 BIT(NL80211_IFTYPE_ADHOC) | 855 BIT(NL80211_IFTYPE_MESH_POINT) | 856 BIT(NL80211_IFTYPE_WDS); 857 858 hw->wiphy->iface_combinations = if_comb; 859 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 860 } 861 862 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 863 864 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 865 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 866 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 867 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ; 868 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 869 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 870 871 hw->queues = 4; 872 hw->max_rates = 4; 873 hw->max_listen_interval = 10; 874 hw->max_rate_tries = 10; 875 hw->sta_data_size = sizeof(struct ath_node); 876 hw->vif_data_size = sizeof(struct ath_vif); 877 878 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1; 879 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1; 880 881 /* single chain devices with rx diversity */ 882 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 883 hw->wiphy->available_antennas_rx = BIT(0) | BIT(1); 884 885 sc->ant_rx = hw->wiphy->available_antennas_rx; 886 sc->ant_tx = hw->wiphy->available_antennas_tx; 887 888 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 889 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 890 &common->sbands[IEEE80211_BAND_2GHZ]; 891 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 892 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 893 &common->sbands[IEEE80211_BAND_5GHZ]; 894 895 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 896 ath9k_set_mcc_capab(sc, hw); 897 #endif 898 ath9k_init_wow(hw); 899 ath9k_cmn_reload_chainmask(ah); 900 901 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 902 } 903 904 int ath9k_init_device(u16 devid, struct ath_softc *sc, 905 const struct ath_bus_ops *bus_ops) 906 { 907 struct ieee80211_hw *hw = sc->hw; 908 struct ath_common *common; 909 struct ath_hw *ah; 910 int error = 0; 911 struct ath_regulatory *reg; 912 913 /* Bring up device */ 914 error = ath9k_init_softc(devid, sc, bus_ops); 915 if (error) 916 return error; 917 918 ah = sc->sc_ah; 919 common = ath9k_hw_common(ah); 920 ath9k_set_hw_capab(sc, hw); 921 922 /* Initialize regulatory */ 923 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 924 ath9k_reg_notifier); 925 if (error) 926 goto deinit; 927 928 reg = &common->regulatory; 929 930 /* Setup TX DMA */ 931 error = ath_tx_init(sc, ATH_TXBUF); 932 if (error != 0) 933 goto deinit; 934 935 /* Setup RX DMA */ 936 error = ath_rx_init(sc, ATH_RXBUF); 937 if (error != 0) 938 goto deinit; 939 940 ath9k_init_txpower_limits(sc); 941 942 #ifdef CONFIG_MAC80211_LEDS 943 /* must be initialized before ieee80211_register_hw */ 944 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw, 945 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink, 946 ARRAY_SIZE(ath9k_tpt_blink)); 947 #endif 948 949 /* Register with mac80211 */ 950 error = ieee80211_register_hw(hw); 951 if (error) 952 goto rx_cleanup; 953 954 error = ath9k_init_debug(ah); 955 if (error) { 956 ath_err(common, "Unable to create debugfs files\n"); 957 goto unregister; 958 } 959 960 /* Handle world regulatory */ 961 if (!ath_is_world_regd(reg)) { 962 error = regulatory_hint(hw->wiphy, reg->alpha2); 963 if (error) 964 goto debug_cleanup; 965 } 966 967 ath_init_leds(sc); 968 ath_start_rfkill_poll(sc); 969 970 return 0; 971 972 debug_cleanup: 973 ath9k_deinit_debug(sc); 974 unregister: 975 ieee80211_unregister_hw(hw); 976 rx_cleanup: 977 ath_rx_cleanup(sc); 978 deinit: 979 ath9k_deinit_softc(sc); 980 return error; 981 } 982 983 /*****************************/ 984 /* De-Initialization */ 985 /*****************************/ 986 987 static void ath9k_deinit_softc(struct ath_softc *sc) 988 { 989 int i = 0; 990 991 ath9k_deinit_p2p(sc); 992 ath9k_deinit_btcoex(sc); 993 994 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 995 if (ATH_TXQ_SETUP(sc, i)) 996 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 997 998 del_timer_sync(&sc->sleep_timer); 999 ath9k_hw_deinit(sc->sc_ah); 1000 if (sc->dfs_detector != NULL) 1001 sc->dfs_detector->exit(sc->dfs_detector); 1002 1003 ath9k_eeprom_release(sc); 1004 } 1005 1006 void ath9k_deinit_device(struct ath_softc *sc) 1007 { 1008 struct ieee80211_hw *hw = sc->hw; 1009 1010 ath9k_ps_wakeup(sc); 1011 1012 wiphy_rfkill_stop_polling(sc->hw->wiphy); 1013 ath_deinit_leds(sc); 1014 1015 ath9k_ps_restore(sc); 1016 1017 ath9k_deinit_debug(sc); 1018 ath9k_deinit_wow(hw); 1019 ieee80211_unregister_hw(hw); 1020 ath_rx_cleanup(sc); 1021 ath9k_deinit_softc(sc); 1022 } 1023 1024 /************************/ 1025 /* Module Hooks */ 1026 /************************/ 1027 1028 static int __init ath9k_init(void) 1029 { 1030 int error; 1031 1032 error = ath_pci_init(); 1033 if (error < 0) { 1034 pr_err("No PCI devices found, driver not installed\n"); 1035 error = -ENODEV; 1036 goto err_out; 1037 } 1038 1039 error = ath_ahb_init(); 1040 if (error < 0) { 1041 error = -ENODEV; 1042 goto err_pci_exit; 1043 } 1044 1045 return 0; 1046 1047 err_pci_exit: 1048 ath_pci_exit(); 1049 err_out: 1050 return error; 1051 } 1052 module_init(ath9k_init); 1053 1054 static void __exit ath9k_exit(void) 1055 { 1056 is_ath9k_unloaded = true; 1057 ath_ahb_exit(); 1058 ath_pci_exit(); 1059 pr_info("%s: Driver unloaded\n", dev_info); 1060 } 1061 module_exit(ath9k_exit); 1062