1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/nl80211.h> 18 #include <linux/delay.h> 19 #include "ath9k.h" 20 #include "btcoex.h" 21 22 static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 23 u32 queues, bool drop); 24 25 u8 ath9k_parse_mpdudensity(u8 mpdudensity) 26 { 27 /* 28 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 29 * 0 for no restriction 30 * 1 for 1/4 us 31 * 2 for 1/2 us 32 * 3 for 1 us 33 * 4 for 2 us 34 * 5 for 4 us 35 * 6 for 8 us 36 * 7 for 16 us 37 */ 38 switch (mpdudensity) { 39 case 0: 40 return 0; 41 case 1: 42 case 2: 43 case 3: 44 /* Our lower layer calculations limit our precision to 45 1 microsecond */ 46 return 1; 47 case 4: 48 return 2; 49 case 5: 50 return 4; 51 case 6: 52 return 8; 53 case 7: 54 return 16; 55 default: 56 return 0; 57 } 58 } 59 60 static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq, 61 bool sw_pending) 62 { 63 bool pending = false; 64 65 spin_lock_bh(&txq->axq_lock); 66 67 if (txq->axq_depth) { 68 pending = true; 69 goto out; 70 } 71 72 if (!sw_pending) 73 goto out; 74 75 if (txq->mac80211_qnum >= 0) { 76 struct ath_acq *acq; 77 78 acq = &sc->cur_chan->acq[txq->mac80211_qnum]; 79 if (!list_empty(&acq->acq_new) || !list_empty(&acq->acq_old)) 80 pending = true; 81 } 82 out: 83 spin_unlock_bh(&txq->axq_lock); 84 return pending; 85 } 86 87 static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) 88 { 89 unsigned long flags; 90 bool ret; 91 92 spin_lock_irqsave(&sc->sc_pm_lock, flags); 93 ret = ath9k_hw_setpower(sc->sc_ah, mode); 94 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 95 96 return ret; 97 } 98 99 void ath_ps_full_sleep(struct timer_list *t) 100 { 101 struct ath_softc *sc = from_timer(sc, t, sleep_timer); 102 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 103 unsigned long flags; 104 bool reset; 105 106 spin_lock_irqsave(&common->cc_lock, flags); 107 ath_hw_cycle_counters_update(common); 108 spin_unlock_irqrestore(&common->cc_lock, flags); 109 110 ath9k_hw_setrxabort(sc->sc_ah, 1); 111 ath9k_hw_stopdmarecv(sc->sc_ah, &reset); 112 113 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 114 } 115 116 void ath9k_ps_wakeup(struct ath_softc *sc) 117 { 118 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 119 unsigned long flags; 120 enum ath9k_power_mode power_mode; 121 122 spin_lock_irqsave(&sc->sc_pm_lock, flags); 123 if (++sc->ps_usecount != 1) 124 goto unlock; 125 126 del_timer_sync(&sc->sleep_timer); 127 power_mode = sc->sc_ah->power_mode; 128 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 129 130 /* 131 * While the hardware is asleep, the cycle counters contain no 132 * useful data. Better clear them now so that they don't mess up 133 * survey data results. 134 */ 135 if (power_mode != ATH9K_PM_AWAKE) { 136 spin_lock(&common->cc_lock); 137 ath_hw_cycle_counters_update(common); 138 memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 139 memset(&common->cc_ani, 0, sizeof(common->cc_ani)); 140 spin_unlock(&common->cc_lock); 141 } 142 143 unlock: 144 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 145 } 146 147 void ath9k_ps_restore(struct ath_softc *sc) 148 { 149 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 150 enum ath9k_power_mode mode; 151 unsigned long flags; 152 153 spin_lock_irqsave(&sc->sc_pm_lock, flags); 154 if (--sc->ps_usecount != 0) 155 goto unlock; 156 157 if (sc->ps_idle) { 158 mod_timer(&sc->sleep_timer, jiffies + HZ / 10); 159 goto unlock; 160 } 161 162 if (sc->ps_enabled && 163 !(sc->ps_flags & (PS_WAIT_FOR_BEACON | 164 PS_WAIT_FOR_CAB | 165 PS_WAIT_FOR_PSPOLL_DATA | 166 PS_WAIT_FOR_TX_ACK | 167 PS_WAIT_FOR_ANI))) { 168 mode = ATH9K_PM_NETWORK_SLEEP; 169 if (ath9k_hw_btcoex_is_enabled(sc->sc_ah)) 170 ath9k_btcoex_stop_gen_timer(sc); 171 } else { 172 goto unlock; 173 } 174 175 spin_lock(&common->cc_lock); 176 ath_hw_cycle_counters_update(common); 177 spin_unlock(&common->cc_lock); 178 179 ath9k_hw_setpower(sc->sc_ah, mode); 180 181 unlock: 182 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 183 } 184 185 static void __ath_cancel_work(struct ath_softc *sc) 186 { 187 cancel_work_sync(&sc->paprd_work); 188 cancel_delayed_work_sync(&sc->hw_check_work); 189 cancel_delayed_work_sync(&sc->hw_pll_work); 190 191 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 192 if (ath9k_hw_mci_is_enabled(sc->sc_ah)) 193 cancel_work_sync(&sc->mci_work); 194 #endif 195 } 196 197 void ath_cancel_work(struct ath_softc *sc) 198 { 199 __ath_cancel_work(sc); 200 cancel_work_sync(&sc->hw_reset_work); 201 } 202 203 void ath_restart_work(struct ath_softc *sc) 204 { 205 ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work, 206 msecs_to_jiffies(ATH_HW_CHECK_POLL_INT)); 207 208 if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) 209 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, 210 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); 211 212 ath_start_ani(sc); 213 } 214 215 static bool ath_prepare_reset(struct ath_softc *sc) 216 { 217 struct ath_hw *ah = sc->sc_ah; 218 bool ret = true; 219 220 ieee80211_stop_queues(sc->hw); 221 ath_stop_ani(sc); 222 ath9k_hw_disable_interrupts(ah); 223 224 if (AR_SREV_9300_20_OR_LATER(ah)) { 225 ret &= ath_stoprecv(sc); 226 ret &= ath_drain_all_txq(sc); 227 } else { 228 ret &= ath_drain_all_txq(sc); 229 ret &= ath_stoprecv(sc); 230 } 231 232 return ret; 233 } 234 235 static bool ath_complete_reset(struct ath_softc *sc, bool start) 236 { 237 struct ath_hw *ah = sc->sc_ah; 238 struct ath_common *common = ath9k_hw_common(ah); 239 unsigned long flags; 240 241 ath9k_calculate_summary_state(sc, sc->cur_chan); 242 ath_startrecv(sc); 243 ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower, 244 sc->cur_chan->txpower, 245 &sc->cur_chan->cur_txpower); 246 clear_bit(ATH_OP_HW_RESET, &common->op_flags); 247 248 if (!sc->cur_chan->offchannel && start) { 249 /* restore per chanctx TSF timer */ 250 if (sc->cur_chan->tsf_val) { 251 u32 offset; 252 253 offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, 254 NULL); 255 ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset); 256 } 257 258 259 if (!test_bit(ATH_OP_BEACONS, &common->op_flags)) 260 goto work; 261 262 if (ah->opmode == NL80211_IFTYPE_STATION && 263 test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) { 264 spin_lock_irqsave(&sc->sc_pm_lock, flags); 265 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 266 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 267 } else { 268 ath9k_set_beacon(sc); 269 } 270 work: 271 ath_restart_work(sc); 272 ath_txq_schedule_all(sc); 273 } 274 275 sc->gtt_cnt = 0; 276 277 ath9k_hw_set_interrupts(ah); 278 ath9k_hw_enable_interrupts(ah); 279 ieee80211_wake_queues(sc->hw); 280 ath9k_p2p_ps_timer(sc); 281 282 return true; 283 } 284 285 static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) 286 { 287 struct ath_hw *ah = sc->sc_ah; 288 struct ath_common *common = ath9k_hw_common(ah); 289 struct ath9k_hw_cal_data *caldata = NULL; 290 bool fastcc = true; 291 int r; 292 293 __ath_cancel_work(sc); 294 295 disable_irq(sc->irq); 296 tasklet_disable(&sc->intr_tq); 297 tasklet_disable(&sc->bcon_tasklet); 298 spin_lock_bh(&sc->sc_pcu_lock); 299 300 if (!sc->cur_chan->offchannel) { 301 fastcc = false; 302 caldata = &sc->cur_chan->caldata; 303 } 304 305 if (!hchan) { 306 fastcc = false; 307 hchan = ah->curchan; 308 } 309 310 if (!hchan) { 311 fastcc = false; 312 hchan = ath9k_cmn_get_channel(sc->hw, ah, &sc->cur_chan->chandef); 313 } 314 315 if (!ath_prepare_reset(sc)) 316 fastcc = false; 317 318 if (ath9k_is_chanctx_enabled()) 319 fastcc = false; 320 321 spin_lock_bh(&sc->chan_lock); 322 sc->cur_chandef = sc->cur_chan->chandef; 323 spin_unlock_bh(&sc->chan_lock); 324 325 ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n", 326 hchan->channel, IS_CHAN_HT40(hchan), fastcc); 327 328 r = ath9k_hw_reset(ah, hchan, caldata, fastcc); 329 if (r) { 330 ath_err(common, 331 "Unable to reset channel, reset status %d\n", r); 332 333 ath9k_hw_enable_interrupts(ah); 334 ath9k_queue_reset(sc, RESET_TYPE_BB_HANG); 335 336 goto out; 337 } 338 339 if (ath9k_hw_mci_is_enabled(sc->sc_ah) && 340 sc->cur_chan->offchannel) 341 ath9k_mci_set_txpower(sc, true, false); 342 343 if (!ath_complete_reset(sc, true)) 344 r = -EIO; 345 346 out: 347 enable_irq(sc->irq); 348 spin_unlock_bh(&sc->sc_pcu_lock); 349 tasklet_enable(&sc->bcon_tasklet); 350 tasklet_enable(&sc->intr_tq); 351 352 return r; 353 } 354 355 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, 356 struct ieee80211_vif *vif) 357 { 358 struct ath_node *an; 359 an = (struct ath_node *)sta->drv_priv; 360 361 an->sc = sc; 362 an->sta = sta; 363 an->vif = vif; 364 memset(&an->key_idx, 0, sizeof(an->key_idx)); 365 366 ath_tx_node_init(sc, an); 367 368 ath_dynack_node_init(sc->sc_ah, an); 369 } 370 371 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) 372 { 373 struct ath_node *an = (struct ath_node *)sta->drv_priv; 374 ath_tx_node_cleanup(sc, an); 375 376 ath_dynack_node_deinit(sc->sc_ah, an); 377 } 378 379 void ath9k_tasklet(struct tasklet_struct *t) 380 { 381 struct ath_softc *sc = from_tasklet(sc, t, intr_tq); 382 struct ath_hw *ah = sc->sc_ah; 383 struct ath_common *common = ath9k_hw_common(ah); 384 enum ath_reset_type type; 385 unsigned long flags; 386 u32 status; 387 u32 rxmask; 388 389 spin_lock_irqsave(&sc->intr_lock, flags); 390 status = sc->intrstatus; 391 sc->intrstatus = 0; 392 spin_unlock_irqrestore(&sc->intr_lock, flags); 393 394 ath9k_ps_wakeup(sc); 395 spin_lock(&sc->sc_pcu_lock); 396 397 if (status & ATH9K_INT_FATAL) { 398 type = RESET_TYPE_FATAL_INT; 399 ath9k_queue_reset(sc, type); 400 ath_dbg(common, RESET, "FATAL: Skipping interrupts\n"); 401 goto out; 402 } 403 404 if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) && 405 (status & ATH9K_INT_BB_WATCHDOG)) { 406 spin_lock_irqsave(&common->cc_lock, flags); 407 ath_hw_cycle_counters_update(common); 408 ar9003_hw_bb_watchdog_dbg_info(ah); 409 spin_unlock_irqrestore(&common->cc_lock, flags); 410 411 if (ar9003_hw_bb_watchdog_check(ah)) { 412 type = RESET_TYPE_BB_WATCHDOG; 413 ath9k_queue_reset(sc, type); 414 415 ath_dbg(common, RESET, 416 "BB_WATCHDOG: Skipping interrupts\n"); 417 goto out; 418 } 419 } 420 421 if (status & ATH9K_INT_GTT) { 422 sc->gtt_cnt++; 423 424 if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) { 425 type = RESET_TYPE_TX_GTT; 426 ath9k_queue_reset(sc, type); 427 ath_dbg(common, RESET, 428 "GTT: Skipping interrupts\n"); 429 goto out; 430 } 431 } 432 433 spin_lock_irqsave(&sc->sc_pm_lock, flags); 434 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 435 /* 436 * TSF sync does not look correct; remain awake to sync with 437 * the next Beacon. 438 */ 439 ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n"); 440 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; 441 } 442 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 443 444 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 445 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL | 446 ATH9K_INT_RXORN); 447 else 448 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 449 450 if (status & rxmask) { 451 /* Check for high priority Rx first */ 452 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 453 (status & ATH9K_INT_RXHP)) 454 ath_rx_tasklet(sc, 0, true); 455 456 ath_rx_tasklet(sc, 0, false); 457 } 458 459 if (status & ATH9K_INT_TX) { 460 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 461 /* 462 * For EDMA chips, TX completion is enabled for the 463 * beacon queue, so if a beacon has been transmitted 464 * successfully after a GTT interrupt, the GTT counter 465 * gets reset to zero here. 466 */ 467 sc->gtt_cnt = 0; 468 469 ath_tx_edma_tasklet(sc); 470 } else { 471 ath_tx_tasklet(sc); 472 } 473 474 wake_up(&sc->tx_wait); 475 } 476 477 if (status & ATH9K_INT_GENTIMER) 478 ath_gen_timer_isr(sc->sc_ah); 479 480 ath9k_btcoex_handle_interrupt(sc, status); 481 482 /* re-enable hardware interrupt */ 483 ath9k_hw_resume_interrupts(ah); 484 out: 485 spin_unlock(&sc->sc_pcu_lock); 486 ath9k_ps_restore(sc); 487 } 488 489 irqreturn_t ath_isr(int irq, void *dev) 490 { 491 #define SCHED_INTR ( \ 492 ATH9K_INT_FATAL | \ 493 ATH9K_INT_BB_WATCHDOG | \ 494 ATH9K_INT_RXORN | \ 495 ATH9K_INT_RXEOL | \ 496 ATH9K_INT_RX | \ 497 ATH9K_INT_RXLP | \ 498 ATH9K_INT_RXHP | \ 499 ATH9K_INT_TX | \ 500 ATH9K_INT_BMISS | \ 501 ATH9K_INT_CST | \ 502 ATH9K_INT_GTT | \ 503 ATH9K_INT_TSFOOR | \ 504 ATH9K_INT_GENTIMER | \ 505 ATH9K_INT_MCI) 506 507 struct ath_softc *sc = dev; 508 struct ath_hw *ah = sc->sc_ah; 509 struct ath_common *common = ath9k_hw_common(ah); 510 enum ath9k_int status; 511 u32 sync_cause = 0; 512 bool sched = false; 513 514 /* 515 * The hardware is not ready/present, don't 516 * touch anything. Note this can happen early 517 * on if the IRQ is shared. 518 */ 519 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) 520 return IRQ_NONE; 521 522 /* shared irq, not for us */ 523 if (!ath9k_hw_intrpend(ah)) 524 return IRQ_NONE; 525 526 /* 527 * Figure out the reason(s) for the interrupt. Note 528 * that the hal returns a pseudo-ISR that may include 529 * bits we haven't explicitly enabled so we mask the 530 * value to insure we only process bits we requested. 531 */ 532 ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */ 533 ath9k_debug_sync_cause(sc, sync_cause); 534 status &= ah->imask; /* discard unasked-for bits */ 535 536 if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) { 537 ath9k_hw_kill_interrupts(sc->sc_ah); 538 return IRQ_HANDLED; 539 } 540 541 /* 542 * If there are no status bits set, then this interrupt was not 543 * for me (should have been caught above). 544 */ 545 if (!status) 546 return IRQ_NONE; 547 548 /* Cache the status */ 549 spin_lock(&sc->intr_lock); 550 sc->intrstatus |= status; 551 spin_unlock(&sc->intr_lock); 552 553 if (status & SCHED_INTR) 554 sched = true; 555 556 /* 557 * If a FATAL interrupt is received, we have to reset the chip 558 * immediately. 559 */ 560 if (status & ATH9K_INT_FATAL) 561 goto chip_reset; 562 563 if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) && 564 (status & ATH9K_INT_BB_WATCHDOG)) 565 goto chip_reset; 566 567 if (status & ATH9K_INT_SWBA) 568 tasklet_schedule(&sc->bcon_tasklet); 569 570 if (status & ATH9K_INT_TXURN) 571 ath9k_hw_updatetxtriglevel(ah, true); 572 573 if (status & ATH9K_INT_RXEOL) { 574 ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 575 ath9k_hw_set_interrupts(ah); 576 } 577 578 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 579 if (status & ATH9K_INT_TIM_TIMER) { 580 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle)) 581 goto chip_reset; 582 /* Clear RxAbort bit so that we can 583 * receive frames */ 584 ath9k_setpower(sc, ATH9K_PM_AWAKE); 585 spin_lock(&sc->sc_pm_lock); 586 ath9k_hw_setrxabort(sc->sc_ah, 0); 587 sc->ps_flags |= PS_WAIT_FOR_BEACON; 588 spin_unlock(&sc->sc_pm_lock); 589 } 590 591 chip_reset: 592 593 ath_debug_stat_interrupt(sc, status); 594 595 if (sched) { 596 /* turn off every interrupt */ 597 ath9k_hw_kill_interrupts(ah); 598 tasklet_schedule(&sc->intr_tq); 599 } 600 601 return IRQ_HANDLED; 602 603 #undef SCHED_INTR 604 } 605 606 /* 607 * This function is called when a HW reset cannot be deferred 608 * and has to be immediate. 609 */ 610 int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan) 611 { 612 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 613 int r; 614 615 ath9k_hw_kill_interrupts(sc->sc_ah); 616 set_bit(ATH_OP_HW_RESET, &common->op_flags); 617 618 ath9k_ps_wakeup(sc); 619 r = ath_reset_internal(sc, hchan); 620 ath9k_ps_restore(sc); 621 622 return r; 623 } 624 625 /* 626 * When a HW reset can be deferred, it is added to the 627 * hw_reset_work workqueue, but we set ATH_OP_HW_RESET before 628 * queueing. 629 */ 630 void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type) 631 { 632 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 633 #ifdef CONFIG_ATH9K_DEBUGFS 634 RESET_STAT_INC(sc, type); 635 #endif 636 ath9k_hw_kill_interrupts(sc->sc_ah); 637 set_bit(ATH_OP_HW_RESET, &common->op_flags); 638 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 639 } 640 641 void ath_reset_work(struct work_struct *work) 642 { 643 struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work); 644 645 ath9k_ps_wakeup(sc); 646 ath_reset_internal(sc, NULL); 647 ath9k_ps_restore(sc); 648 } 649 650 /**********************/ 651 /* mac80211 callbacks */ 652 /**********************/ 653 654 static int ath9k_start(struct ieee80211_hw *hw) 655 { 656 struct ath_softc *sc = hw->priv; 657 struct ath_hw *ah = sc->sc_ah; 658 struct ath_common *common = ath9k_hw_common(ah); 659 struct ieee80211_channel *curchan = sc->cur_chan->chandef.chan; 660 struct ath_chanctx *ctx = sc->cur_chan; 661 struct ath9k_channel *init_channel; 662 int r; 663 664 ath_dbg(common, CONFIG, 665 "Starting driver with initial channel: %d MHz\n", 666 curchan->center_freq); 667 668 ath9k_ps_wakeup(sc); 669 mutex_lock(&sc->mutex); 670 671 init_channel = ath9k_cmn_get_channel(hw, ah, &ctx->chandef); 672 sc->cur_chandef = hw->conf.chandef; 673 674 /* Reset SERDES registers */ 675 ath9k_hw_configpcipowersave(ah, false); 676 677 /* 678 * The basic interface to setting the hardware in a good 679 * state is ``reset''. On return the hardware is known to 680 * be powered up and with interrupts disabled. This must 681 * be followed by initialization of the appropriate bits 682 * and then setup of the interrupt mask. 683 */ 684 spin_lock_bh(&sc->sc_pcu_lock); 685 686 atomic_set(&ah->intr_ref_cnt, -1); 687 688 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 689 if (r) { 690 ath_err(common, 691 "Unable to reset hardware; reset status %d (freq %u MHz)\n", 692 r, curchan->center_freq); 693 ah->reset_power_on = false; 694 } 695 696 /* Setup our intr mask. */ 697 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL | 698 ATH9K_INT_RXORN | ATH9K_INT_FATAL | 699 ATH9K_INT_GLOBAL; 700 701 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 702 ah->imask |= ATH9K_INT_RXHP | 703 ATH9K_INT_RXLP; 704 else 705 ah->imask |= ATH9K_INT_RX; 706 707 if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) 708 ah->imask |= ATH9K_INT_BB_WATCHDOG; 709 710 /* 711 * Enable GTT interrupts only for AR9003/AR9004 chips 712 * for now. 713 */ 714 if (AR_SREV_9300_20_OR_LATER(ah)) 715 ah->imask |= ATH9K_INT_GTT; 716 717 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 718 ah->imask |= ATH9K_INT_CST; 719 720 ath_mci_enable(sc); 721 722 clear_bit(ATH_OP_INVALID, &common->op_flags); 723 sc->sc_ah->is_monitoring = false; 724 725 if (!ath_complete_reset(sc, false)) 726 ah->reset_power_on = false; 727 728 if (ah->led_pin >= 0) { 729 ath9k_hw_set_gpio(ah, ah->led_pin, 730 (ah->config.led_active_high) ? 1 : 0); 731 ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL, 732 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 733 } 734 735 /* 736 * Reset key cache to sane defaults (all entries cleared) instead of 737 * semi-random values after suspend/resume. 738 */ 739 ath9k_cmn_init_crypto(sc->sc_ah); 740 741 ath9k_hw_reset_tsf(ah); 742 743 spin_unlock_bh(&sc->sc_pcu_lock); 744 745 ath9k_rng_start(sc); 746 747 mutex_unlock(&sc->mutex); 748 749 ath9k_ps_restore(sc); 750 751 return 0; 752 } 753 754 static void ath9k_tx(struct ieee80211_hw *hw, 755 struct ieee80211_tx_control *control, 756 struct sk_buff *skb) 757 { 758 struct ath_softc *sc = hw->priv; 759 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 760 struct ath_tx_control txctl; 761 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 762 unsigned long flags; 763 764 if (sc->ps_enabled) { 765 /* 766 * mac80211 does not set PM field for normal data frames, so we 767 * need to update that based on the current PS mode. 768 */ 769 if (ieee80211_is_data(hdr->frame_control) && 770 !ieee80211_is_nullfunc(hdr->frame_control) && 771 !ieee80211_has_pm(hdr->frame_control)) { 772 ath_dbg(common, PS, 773 "Add PM=1 for a TX frame while in PS mode\n"); 774 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 775 } 776 } 777 778 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) { 779 /* 780 * We are using PS-Poll and mac80211 can request TX while in 781 * power save mode. Need to wake up hardware for the TX to be 782 * completed and if needed, also for RX of buffered frames. 783 */ 784 ath9k_ps_wakeup(sc); 785 spin_lock_irqsave(&sc->sc_pm_lock, flags); 786 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 787 ath9k_hw_setrxabort(sc->sc_ah, 0); 788 if (ieee80211_is_pspoll(hdr->frame_control)) { 789 ath_dbg(common, PS, 790 "Sending PS-Poll to pick a buffered frame\n"); 791 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA; 792 } else { 793 ath_dbg(common, PS, "Wake up to complete TX\n"); 794 sc->ps_flags |= PS_WAIT_FOR_TX_ACK; 795 } 796 /* 797 * The actual restore operation will happen only after 798 * the ps_flags bit is cleared. We are just dropping 799 * the ps_usecount here. 800 */ 801 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 802 ath9k_ps_restore(sc); 803 } 804 805 /* 806 * Cannot tx while the hardware is in full sleep, it first needs a full 807 * chip reset to recover from that 808 */ 809 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) { 810 ath_err(common, "TX while HW is in FULL_SLEEP mode\n"); 811 goto exit; 812 } 813 814 memset(&txctl, 0, sizeof(struct ath_tx_control)); 815 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; 816 txctl.sta = control->sta; 817 818 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb); 819 820 if (ath_tx_start(hw, skb, &txctl) != 0) { 821 ath_dbg(common, XMIT, "TX failed\n"); 822 TX_STAT_INC(sc, txctl.txq->axq_qnum, txfailed); 823 goto exit; 824 } 825 826 return; 827 exit: 828 ieee80211_free_txskb(hw, skb); 829 } 830 831 static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) 832 { 833 struct ath_buf *bf; 834 struct ieee80211_tx_info *txinfo; 835 struct ath_frame_info *fi; 836 837 list_for_each_entry(bf, txq_list, list) { 838 if (bf->bf_state.stale || !bf->bf_mpdu) 839 continue; 840 841 txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); 842 fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0]; 843 if (fi->keyix == keyix) 844 return true; 845 } 846 847 return false; 848 } 849 850 static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix) 851 { 852 struct ath_hw *ah = sc->sc_ah; 853 int i, j; 854 struct ath_txq *txq; 855 bool key_in_use = false; 856 857 for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) { 858 if (!ATH_TXQ_SETUP(sc, i)) 859 continue; 860 txq = &sc->tx.txq[i]; 861 if (!txq->axq_depth) 862 continue; 863 if (!ath9k_hw_numtxpending(ah, txq->axq_qnum)) 864 continue; 865 866 ath_txq_lock(sc, txq); 867 key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix); 868 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 869 int idx = txq->txq_tailidx; 870 871 for (j = 0; !key_in_use && 872 !list_empty(&txq->txq_fifo[idx]) && 873 j < ATH_TXFIFO_DEPTH; j++) { 874 key_in_use = ath9k_txq_list_has_key( 875 &txq->txq_fifo[idx], keyix); 876 INCR(idx, ATH_TXFIFO_DEPTH); 877 } 878 } 879 ath_txq_unlock(sc, txq); 880 } 881 882 return key_in_use; 883 } 884 885 static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix) 886 { 887 struct ath_hw *ah = sc->sc_ah; 888 struct ath_common *common = ath9k_hw_common(ah); 889 890 if (!test_bit(keyix, ah->pending_del_keymap) || 891 ath9k_txq_has_key(sc, keyix)) 892 return; 893 894 /* No more TXQ frames point to this key cache entry, so delete it. */ 895 clear_bit(keyix, ah->pending_del_keymap); 896 ath_key_delete(common, keyix); 897 } 898 899 static void ath9k_stop(struct ieee80211_hw *hw) 900 { 901 struct ath_softc *sc = hw->priv; 902 struct ath_hw *ah = sc->sc_ah; 903 struct ath_common *common = ath9k_hw_common(ah); 904 bool prev_idle; 905 int i; 906 907 ath9k_deinit_channel_context(sc); 908 909 mutex_lock(&sc->mutex); 910 911 ath9k_rng_stop(sc); 912 913 ath_cancel_work(sc); 914 915 if (test_bit(ATH_OP_INVALID, &common->op_flags)) { 916 ath_dbg(common, ANY, "Device not present\n"); 917 mutex_unlock(&sc->mutex); 918 return; 919 } 920 921 /* Ensure HW is awake when we try to shut it down. */ 922 ath9k_ps_wakeup(sc); 923 924 spin_lock_bh(&sc->sc_pcu_lock); 925 926 /* prevent tasklets to enable interrupts once we disable them */ 927 ah->imask &= ~ATH9K_INT_GLOBAL; 928 929 /* make sure h/w will not generate any interrupt 930 * before setting the invalid flag. */ 931 ath9k_hw_disable_interrupts(ah); 932 933 spin_unlock_bh(&sc->sc_pcu_lock); 934 935 /* we can now sync irq and kill any running tasklets, since we already 936 * disabled interrupts and not holding a spin lock */ 937 synchronize_irq(sc->irq); 938 tasklet_kill(&sc->intr_tq); 939 tasklet_kill(&sc->bcon_tasklet); 940 941 prev_idle = sc->ps_idle; 942 sc->ps_idle = true; 943 944 spin_lock_bh(&sc->sc_pcu_lock); 945 946 if (ah->led_pin >= 0) { 947 ath9k_hw_set_gpio(ah, ah->led_pin, 948 (ah->config.led_active_high) ? 0 : 1); 949 ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL); 950 } 951 952 ath_prepare_reset(sc); 953 954 if (sc->rx.frag) { 955 dev_kfree_skb_any(sc->rx.frag); 956 sc->rx.frag = NULL; 957 } 958 959 if (!ah->curchan) 960 ah->curchan = ath9k_cmn_get_channel(hw, ah, 961 &sc->cur_chan->chandef); 962 963 ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 964 965 set_bit(ATH_OP_INVALID, &common->op_flags); 966 967 ath9k_hw_phy_disable(ah); 968 969 ath9k_hw_configpcipowersave(ah, true); 970 971 spin_unlock_bh(&sc->sc_pcu_lock); 972 973 for (i = 0; i < ATH_KEYMAX; i++) 974 ath9k_pending_key_del(sc, i); 975 976 /* Clear key cache entries explicitly to get rid of any potentially 977 * remaining keys. 978 */ 979 ath9k_cmn_init_crypto(sc->sc_ah); 980 981 ath9k_ps_restore(sc); 982 983 sc->ps_idle = prev_idle; 984 985 mutex_unlock(&sc->mutex); 986 987 ath_dbg(common, CONFIG, "Driver halt\n"); 988 } 989 990 static bool ath9k_uses_beacons(int type) 991 { 992 switch (type) { 993 case NL80211_IFTYPE_AP: 994 case NL80211_IFTYPE_ADHOC: 995 case NL80211_IFTYPE_MESH_POINT: 996 return true; 997 default: 998 return false; 999 } 1000 } 1001 1002 static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data, 1003 struct ieee80211_vif *vif) 1004 { 1005 /* Use the first (configured) interface, but prefering AP interfaces. */ 1006 if (!iter_data->primary_beacon_vif) { 1007 iter_data->primary_beacon_vif = vif; 1008 } else { 1009 if (iter_data->primary_beacon_vif->type != NL80211_IFTYPE_AP && 1010 vif->type == NL80211_IFTYPE_AP) 1011 iter_data->primary_beacon_vif = vif; 1012 } 1013 1014 iter_data->beacons = true; 1015 iter_data->nbcnvifs += 1; 1016 } 1017 1018 static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data, 1019 u8 *mac, struct ieee80211_vif *vif) 1020 { 1021 struct ath_vif *avp = (struct ath_vif *)vif->drv_priv; 1022 int i; 1023 1024 if (iter_data->has_hw_macaddr) { 1025 for (i = 0; i < ETH_ALEN; i++) 1026 iter_data->mask[i] &= 1027 ~(iter_data->hw_macaddr[i] ^ mac[i]); 1028 } else { 1029 memcpy(iter_data->hw_macaddr, mac, ETH_ALEN); 1030 iter_data->has_hw_macaddr = true; 1031 } 1032 1033 if (!vif->bss_conf.use_short_slot) 1034 iter_data->slottime = 20; 1035 1036 switch (vif->type) { 1037 case NL80211_IFTYPE_AP: 1038 iter_data->naps++; 1039 if (vif->bss_conf.enable_beacon) 1040 ath9k_vif_iter_set_beacon(iter_data, vif); 1041 break; 1042 case NL80211_IFTYPE_STATION: 1043 iter_data->nstations++; 1044 if (avp->assoc && !iter_data->primary_sta) 1045 iter_data->primary_sta = vif; 1046 break; 1047 case NL80211_IFTYPE_OCB: 1048 iter_data->nocbs++; 1049 break; 1050 case NL80211_IFTYPE_ADHOC: 1051 iter_data->nadhocs++; 1052 if (vif->bss_conf.enable_beacon) 1053 ath9k_vif_iter_set_beacon(iter_data, vif); 1054 break; 1055 case NL80211_IFTYPE_MESH_POINT: 1056 iter_data->nmeshes++; 1057 if (vif->bss_conf.enable_beacon) 1058 ath9k_vif_iter_set_beacon(iter_data, vif); 1059 break; 1060 default: 1061 break; 1062 } 1063 } 1064 1065 static void ath9k_update_bssid_mask(struct ath_softc *sc, 1066 struct ath_chanctx *ctx, 1067 struct ath9k_vif_iter_data *iter_data) 1068 { 1069 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1070 struct ath_vif *avp; 1071 int i; 1072 1073 if (!ath9k_is_chanctx_enabled()) 1074 return; 1075 1076 list_for_each_entry(avp, &ctx->vifs, list) { 1077 if (ctx->nvifs_assigned != 1) 1078 continue; 1079 1080 if (!iter_data->has_hw_macaddr) 1081 continue; 1082 1083 ether_addr_copy(common->curbssid, avp->bssid); 1084 1085 /* perm_addr will be used as the p2p device address. */ 1086 for (i = 0; i < ETH_ALEN; i++) 1087 iter_data->mask[i] &= 1088 ~(iter_data->hw_macaddr[i] ^ 1089 sc->hw->wiphy->perm_addr[i]); 1090 } 1091 } 1092 1093 /* Called with sc->mutex held. */ 1094 void ath9k_calculate_iter_data(struct ath_softc *sc, 1095 struct ath_chanctx *ctx, 1096 struct ath9k_vif_iter_data *iter_data) 1097 { 1098 struct ath_vif *avp; 1099 1100 /* 1101 * The hardware will use primary station addr together with the 1102 * BSSID mask when matching addresses. 1103 */ 1104 memset(iter_data, 0, sizeof(*iter_data)); 1105 eth_broadcast_addr(iter_data->mask); 1106 iter_data->slottime = 9; 1107 1108 list_for_each_entry(avp, &ctx->vifs, list) 1109 ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif); 1110 1111 ath9k_update_bssid_mask(sc, ctx, iter_data); 1112 } 1113 1114 static void ath9k_set_assoc_state(struct ath_softc *sc, 1115 struct ieee80211_vif *vif, bool changed) 1116 { 1117 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1118 struct ath_vif *avp = (struct ath_vif *)vif->drv_priv; 1119 unsigned long flags; 1120 1121 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); 1122 1123 ether_addr_copy(common->curbssid, avp->bssid); 1124 common->curaid = avp->aid; 1125 ath9k_hw_write_associd(sc->sc_ah); 1126 1127 if (changed) { 1128 common->last_rssi = ATH_RSSI_DUMMY_MARKER; 1129 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1130 1131 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1132 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 1133 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1134 } 1135 1136 if (ath9k_hw_mci_is_enabled(sc->sc_ah)) 1137 ath9k_mci_update_wlan_channels(sc, false); 1138 1139 ath_dbg(common, CONFIG, 1140 "Primary Station interface: %pM, BSSID: %pM\n", 1141 vif->addr, common->curbssid); 1142 } 1143 1144 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 1145 static void ath9k_set_offchannel_state(struct ath_softc *sc) 1146 { 1147 struct ath_hw *ah = sc->sc_ah; 1148 struct ath_common *common = ath9k_hw_common(ah); 1149 struct ieee80211_vif *vif = NULL; 1150 1151 ath9k_ps_wakeup(sc); 1152 1153 if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START) 1154 vif = sc->offchannel.scan_vif; 1155 else 1156 vif = sc->offchannel.roc_vif; 1157 1158 if (WARN_ON(!vif)) 1159 goto exit; 1160 1161 eth_zero_addr(common->curbssid); 1162 eth_broadcast_addr(common->bssidmask); 1163 memcpy(common->macaddr, vif->addr, ETH_ALEN); 1164 common->curaid = 0; 1165 ah->opmode = vif->type; 1166 ah->imask &= ~ATH9K_INT_SWBA; 1167 ah->imask &= ~ATH9K_INT_TSFOOR; 1168 ah->slottime = 9; 1169 1170 ath_hw_setbssidmask(common); 1171 ath9k_hw_setopmode(ah); 1172 ath9k_hw_write_associd(sc->sc_ah); 1173 ath9k_hw_set_interrupts(ah); 1174 ath9k_hw_init_global_settings(ah); 1175 1176 exit: 1177 ath9k_ps_restore(sc); 1178 } 1179 #endif 1180 1181 /* Called with sc->mutex held. */ 1182 void ath9k_calculate_summary_state(struct ath_softc *sc, 1183 struct ath_chanctx *ctx) 1184 { 1185 struct ath_hw *ah = sc->sc_ah; 1186 struct ath_common *common = ath9k_hw_common(ah); 1187 struct ath9k_vif_iter_data iter_data; 1188 1189 ath_chanctx_check_active(sc, ctx); 1190 1191 if (ctx != sc->cur_chan) 1192 return; 1193 1194 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 1195 if (ctx == &sc->offchannel.chan) 1196 return ath9k_set_offchannel_state(sc); 1197 #endif 1198 1199 ath9k_ps_wakeup(sc); 1200 ath9k_calculate_iter_data(sc, ctx, &iter_data); 1201 1202 if (iter_data.has_hw_macaddr) 1203 memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN); 1204 1205 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 1206 ath_hw_setbssidmask(common); 1207 1208 if (iter_data.naps > 0) { 1209 ath9k_hw_set_tsfadjust(ah, true); 1210 ah->opmode = NL80211_IFTYPE_AP; 1211 } else { 1212 ath9k_hw_set_tsfadjust(ah, false); 1213 if (iter_data.beacons) 1214 ath9k_beacon_ensure_primary_slot(sc); 1215 1216 if (iter_data.nmeshes) 1217 ah->opmode = NL80211_IFTYPE_MESH_POINT; 1218 else if (iter_data.nocbs) 1219 ah->opmode = NL80211_IFTYPE_OCB; 1220 else if (iter_data.nadhocs) 1221 ah->opmode = NL80211_IFTYPE_ADHOC; 1222 else 1223 ah->opmode = NL80211_IFTYPE_STATION; 1224 } 1225 1226 ath9k_hw_setopmode(ah); 1227 1228 ctx->switch_after_beacon = false; 1229 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) 1230 ah->imask |= ATH9K_INT_TSFOOR; 1231 else { 1232 ah->imask &= ~ATH9K_INT_TSFOOR; 1233 if (iter_data.naps == 1 && iter_data.beacons) 1234 ctx->switch_after_beacon = true; 1235 } 1236 1237 if (ah->opmode == NL80211_IFTYPE_STATION) { 1238 bool changed = (iter_data.primary_sta != ctx->primary_sta); 1239 1240 if (iter_data.primary_sta) { 1241 iter_data.primary_beacon_vif = iter_data.primary_sta; 1242 iter_data.beacons = true; 1243 ath9k_set_assoc_state(sc, iter_data.primary_sta, 1244 changed); 1245 ctx->primary_sta = iter_data.primary_sta; 1246 } else { 1247 ctx->primary_sta = NULL; 1248 eth_zero_addr(common->curbssid); 1249 common->curaid = 0; 1250 ath9k_hw_write_associd(sc->sc_ah); 1251 if (ath9k_hw_mci_is_enabled(sc->sc_ah)) 1252 ath9k_mci_update_wlan_channels(sc, true); 1253 } 1254 } 1255 sc->nbcnvifs = iter_data.nbcnvifs; 1256 ath9k_beacon_config(sc, iter_data.primary_beacon_vif, 1257 iter_data.beacons); 1258 ath9k_hw_set_interrupts(ah); 1259 1260 if (ah->slottime != iter_data.slottime) { 1261 ah->slottime = iter_data.slottime; 1262 ath9k_hw_init_global_settings(ah); 1263 } 1264 1265 if (iter_data.primary_sta) 1266 set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); 1267 else 1268 clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); 1269 1270 ath_dbg(common, CONFIG, 1271 "macaddr: %pM, bssid: %pM, bssidmask: %pM\n", 1272 common->macaddr, common->curbssid, common->bssidmask); 1273 1274 ath9k_ps_restore(sc); 1275 } 1276 1277 static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 1278 { 1279 int *power = data; 1280 1281 if (vif->bss_conf.txpower == INT_MIN) 1282 return; 1283 1284 if (*power < vif->bss_conf.txpower) 1285 *power = vif->bss_conf.txpower; 1286 } 1287 1288 /* Called with sc->mutex held. */ 1289 void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif) 1290 { 1291 int power; 1292 struct ath_hw *ah = sc->sc_ah; 1293 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 1294 1295 ath9k_ps_wakeup(sc); 1296 if (ah->tpc_enabled) { 1297 power = (vif) ? vif->bss_conf.txpower : -1; 1298 ieee80211_iterate_active_interfaces_atomic( 1299 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL, 1300 ath9k_tpc_vif_iter, &power); 1301 if (power == -1) 1302 power = sc->hw->conf.power_level; 1303 } else { 1304 power = sc->hw->conf.power_level; 1305 } 1306 sc->cur_chan->txpower = 2 * power; 1307 ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false); 1308 sc->cur_chan->cur_txpower = reg->max_power_level; 1309 ath9k_ps_restore(sc); 1310 } 1311 1312 static void ath9k_assign_hw_queues(struct ieee80211_hw *hw, 1313 struct ieee80211_vif *vif) 1314 { 1315 int i; 1316 1317 if (!ath9k_is_chanctx_enabled()) 1318 return; 1319 1320 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1321 vif->hw_queue[i] = i; 1322 1323 if (vif->type == NL80211_IFTYPE_AP || 1324 vif->type == NL80211_IFTYPE_MESH_POINT) 1325 vif->cab_queue = hw->queues - 2; 1326 else 1327 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; 1328 } 1329 1330 static int ath9k_add_interface(struct ieee80211_hw *hw, 1331 struct ieee80211_vif *vif) 1332 { 1333 struct ath_softc *sc = hw->priv; 1334 struct ath_hw *ah = sc->sc_ah; 1335 struct ath_common *common = ath9k_hw_common(ah); 1336 struct ath_vif *avp = (void *)vif->drv_priv; 1337 struct ath_node *an = &avp->mcast_node; 1338 1339 mutex_lock(&sc->mutex); 1340 if (IS_ENABLED(CONFIG_ATH9K_TX99)) { 1341 if (sc->cur_chan->nvifs >= 1) { 1342 mutex_unlock(&sc->mutex); 1343 return -EOPNOTSUPP; 1344 } 1345 sc->tx99_vif = vif; 1346 } 1347 1348 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 1349 sc->cur_chan->nvifs++; 1350 1351 if (vif->type == NL80211_IFTYPE_STATION && ath9k_is_chanctx_enabled()) 1352 vif->driver_flags |= IEEE80211_VIF_GET_NOA_UPDATE; 1353 1354 if (ath9k_uses_beacons(vif->type)) 1355 ath9k_beacon_assign_slot(sc, vif); 1356 1357 avp->vif = vif; 1358 if (!ath9k_is_chanctx_enabled()) { 1359 avp->chanctx = sc->cur_chan; 1360 list_add_tail(&avp->list, &avp->chanctx->vifs); 1361 } 1362 1363 ath9k_calculate_summary_state(sc, avp->chanctx); 1364 1365 ath9k_assign_hw_queues(hw, vif); 1366 1367 ath9k_set_txpower(sc, vif); 1368 1369 an->sc = sc; 1370 an->sta = NULL; 1371 an->vif = vif; 1372 an->no_ps_filter = true; 1373 ath_tx_node_init(sc, an); 1374 1375 mutex_unlock(&sc->mutex); 1376 return 0; 1377 } 1378 1379 static int ath9k_change_interface(struct ieee80211_hw *hw, 1380 struct ieee80211_vif *vif, 1381 enum nl80211_iftype new_type, 1382 bool p2p) 1383 { 1384 struct ath_softc *sc = hw->priv; 1385 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1386 struct ath_vif *avp = (void *)vif->drv_priv; 1387 1388 mutex_lock(&sc->mutex); 1389 1390 if (IS_ENABLED(CONFIG_ATH9K_TX99)) { 1391 mutex_unlock(&sc->mutex); 1392 return -EOPNOTSUPP; 1393 } 1394 1395 ath_dbg(common, CONFIG, "Change Interface\n"); 1396 1397 if (ath9k_uses_beacons(vif->type)) 1398 ath9k_beacon_remove_slot(sc, vif); 1399 1400 vif->type = new_type; 1401 vif->p2p = p2p; 1402 1403 if (ath9k_uses_beacons(vif->type)) 1404 ath9k_beacon_assign_slot(sc, vif); 1405 1406 ath9k_assign_hw_queues(hw, vif); 1407 ath9k_calculate_summary_state(sc, avp->chanctx); 1408 1409 ath9k_set_txpower(sc, vif); 1410 1411 mutex_unlock(&sc->mutex); 1412 return 0; 1413 } 1414 1415 static void ath9k_remove_interface(struct ieee80211_hw *hw, 1416 struct ieee80211_vif *vif) 1417 { 1418 struct ath_softc *sc = hw->priv; 1419 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1420 struct ath_vif *avp = (void *)vif->drv_priv; 1421 1422 ath_dbg(common, CONFIG, "Detach Interface\n"); 1423 1424 mutex_lock(&sc->mutex); 1425 1426 ath9k_p2p_remove_vif(sc, vif); 1427 1428 sc->cur_chan->nvifs--; 1429 sc->tx99_vif = NULL; 1430 if (!ath9k_is_chanctx_enabled()) 1431 list_del(&avp->list); 1432 1433 if (ath9k_uses_beacons(vif->type)) 1434 ath9k_beacon_remove_slot(sc, vif); 1435 1436 ath_tx_node_cleanup(sc, &avp->mcast_node); 1437 1438 ath9k_calculate_summary_state(sc, avp->chanctx); 1439 1440 ath9k_set_txpower(sc, NULL); 1441 1442 mutex_unlock(&sc->mutex); 1443 } 1444 1445 static void ath9k_enable_ps(struct ath_softc *sc) 1446 { 1447 struct ath_hw *ah = sc->sc_ah; 1448 struct ath_common *common = ath9k_hw_common(ah); 1449 1450 if (IS_ENABLED(CONFIG_ATH9K_TX99)) 1451 return; 1452 1453 sc->ps_enabled = true; 1454 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1455 if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) { 1456 ah->imask |= ATH9K_INT_TIM_TIMER; 1457 ath9k_hw_set_interrupts(ah); 1458 } 1459 ath9k_hw_setrxabort(ah, 1); 1460 } 1461 ath_dbg(common, PS, "PowerSave enabled\n"); 1462 } 1463 1464 static void ath9k_disable_ps(struct ath_softc *sc) 1465 { 1466 struct ath_hw *ah = sc->sc_ah; 1467 struct ath_common *common = ath9k_hw_common(ah); 1468 1469 if (IS_ENABLED(CONFIG_ATH9K_TX99)) 1470 return; 1471 1472 sc->ps_enabled = false; 1473 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); 1474 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1475 ath9k_hw_setrxabort(ah, 0); 1476 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON | 1477 PS_WAIT_FOR_CAB | 1478 PS_WAIT_FOR_PSPOLL_DATA | 1479 PS_WAIT_FOR_TX_ACK); 1480 if (ah->imask & ATH9K_INT_TIM_TIMER) { 1481 ah->imask &= ~ATH9K_INT_TIM_TIMER; 1482 ath9k_hw_set_interrupts(ah); 1483 } 1484 } 1485 ath_dbg(common, PS, "PowerSave disabled\n"); 1486 } 1487 1488 static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1489 { 1490 struct ath_softc *sc = hw->priv; 1491 struct ath_hw *ah = sc->sc_ah; 1492 struct ath_common *common = ath9k_hw_common(ah); 1493 struct ieee80211_conf *conf = &hw->conf; 1494 struct ath_chanctx *ctx = sc->cur_chan; 1495 1496 ath9k_ps_wakeup(sc); 1497 mutex_lock(&sc->mutex); 1498 1499 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1500 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1501 if (sc->ps_idle) { 1502 ath_cancel_work(sc); 1503 ath9k_stop_btcoex(sc); 1504 } else { 1505 ath9k_start_btcoex(sc); 1506 /* 1507 * The chip needs a reset to properly wake up from 1508 * full sleep 1509 */ 1510 ath_chanctx_set_channel(sc, ctx, &ctx->chandef); 1511 } 1512 } 1513 1514 /* 1515 * We just prepare to enable PS. We have to wait until our AP has 1516 * ACK'd our null data frame to disable RX otherwise we'll ignore 1517 * those ACKs and end up retransmitting the same null data frames. 1518 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode. 1519 */ 1520 if (changed & IEEE80211_CONF_CHANGE_PS) { 1521 unsigned long flags; 1522 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1523 if (conf->flags & IEEE80211_CONF_PS) 1524 ath9k_enable_ps(sc); 1525 else 1526 ath9k_disable_ps(sc); 1527 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1528 } 1529 1530 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1531 if (conf->flags & IEEE80211_CONF_MONITOR) { 1532 ath_dbg(common, CONFIG, "Monitor mode is enabled\n"); 1533 sc->sc_ah->is_monitoring = true; 1534 } else { 1535 ath_dbg(common, CONFIG, "Monitor mode is disabled\n"); 1536 sc->sc_ah->is_monitoring = false; 1537 } 1538 } 1539 1540 if (!ath9k_is_chanctx_enabled() && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { 1541 ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL); 1542 ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); 1543 } 1544 1545 if (changed & IEEE80211_CONF_CHANGE_POWER) 1546 ath9k_set_txpower(sc, NULL); 1547 1548 mutex_unlock(&sc->mutex); 1549 ath9k_ps_restore(sc); 1550 1551 return 0; 1552 } 1553 1554 #define SUPPORTED_FILTERS \ 1555 (FIF_ALLMULTI | \ 1556 FIF_CONTROL | \ 1557 FIF_PSPOLL | \ 1558 FIF_OTHER_BSS | \ 1559 FIF_BCN_PRBRESP_PROMISC | \ 1560 FIF_PROBE_REQ | \ 1561 FIF_MCAST_ACTION | \ 1562 FIF_FCSFAIL) 1563 1564 /* FIXME: sc->sc_full_reset ? */ 1565 static void ath9k_configure_filter(struct ieee80211_hw *hw, 1566 unsigned int changed_flags, 1567 unsigned int *total_flags, 1568 u64 multicast) 1569 { 1570 struct ath_softc *sc = hw->priv; 1571 struct ath_chanctx *ctx; 1572 u32 rfilt; 1573 1574 *total_flags &= SUPPORTED_FILTERS; 1575 1576 spin_lock_bh(&sc->chan_lock); 1577 ath_for_each_chanctx(sc, ctx) 1578 ctx->rxfilter = *total_flags; 1579 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 1580 sc->offchannel.chan.rxfilter = *total_flags; 1581 #endif 1582 spin_unlock_bh(&sc->chan_lock); 1583 1584 ath9k_ps_wakeup(sc); 1585 rfilt = ath_calcrxfilter(sc); 1586 ath9k_hw_setrxfilter(sc->sc_ah, rfilt); 1587 ath9k_ps_restore(sc); 1588 1589 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n", 1590 rfilt); 1591 } 1592 1593 static int ath9k_sta_add(struct ieee80211_hw *hw, 1594 struct ieee80211_vif *vif, 1595 struct ieee80211_sta *sta) 1596 { 1597 struct ath_softc *sc = hw->priv; 1598 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1599 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1600 struct ieee80211_key_conf ps_key = { }; 1601 int key; 1602 1603 ath_node_attach(sc, sta, vif); 1604 1605 if (vif->type != NL80211_IFTYPE_AP && 1606 vif->type != NL80211_IFTYPE_AP_VLAN) 1607 return 0; 1608 1609 key = ath_key_config(common, vif, sta, &ps_key); 1610 if (key > 0) { 1611 an->ps_key = key; 1612 an->key_idx[0] = key; 1613 } 1614 1615 return 0; 1616 } 1617 1618 static void ath9k_del_ps_key(struct ath_softc *sc, 1619 struct ieee80211_vif *vif, 1620 struct ieee80211_sta *sta) 1621 { 1622 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1623 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1624 1625 if (!an->ps_key) 1626 return; 1627 1628 ath_key_delete(common, an->ps_key); 1629 an->ps_key = 0; 1630 an->key_idx[0] = 0; 1631 } 1632 1633 static int ath9k_sta_remove(struct ieee80211_hw *hw, 1634 struct ieee80211_vif *vif, 1635 struct ieee80211_sta *sta) 1636 { 1637 struct ath_softc *sc = hw->priv; 1638 1639 ath9k_del_ps_key(sc, vif, sta); 1640 ath_node_detach(sc, sta); 1641 1642 return 0; 1643 } 1644 1645 static int ath9k_sta_state(struct ieee80211_hw *hw, 1646 struct ieee80211_vif *vif, 1647 struct ieee80211_sta *sta, 1648 enum ieee80211_sta_state old_state, 1649 enum ieee80211_sta_state new_state) 1650 { 1651 struct ath_softc *sc = hw->priv; 1652 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1653 int ret = 0; 1654 1655 if (old_state == IEEE80211_STA_NOTEXIST && 1656 new_state == IEEE80211_STA_NONE) { 1657 ret = ath9k_sta_add(hw, vif, sta); 1658 ath_dbg(common, CONFIG, 1659 "Add station: %pM\n", sta->addr); 1660 } else if (old_state == IEEE80211_STA_NONE && 1661 new_state == IEEE80211_STA_NOTEXIST) { 1662 ret = ath9k_sta_remove(hw, vif, sta); 1663 ath_dbg(common, CONFIG, 1664 "Remove station: %pM\n", sta->addr); 1665 } 1666 1667 if (ath9k_is_chanctx_enabled()) { 1668 if (vif->type == NL80211_IFTYPE_STATION) { 1669 if (old_state == IEEE80211_STA_ASSOC && 1670 new_state == IEEE80211_STA_AUTHORIZED) 1671 ath_chanctx_event(sc, vif, 1672 ATH_CHANCTX_EVENT_AUTHORIZED); 1673 } 1674 } 1675 1676 return ret; 1677 } 1678 1679 static void ath9k_sta_set_tx_filter(struct ath_hw *ah, 1680 struct ath_node *an, 1681 bool set) 1682 { 1683 int i; 1684 1685 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) { 1686 if (!an->key_idx[i]) 1687 continue; 1688 ath9k_hw_set_tx_filter(ah, an->key_idx[i], set); 1689 } 1690 } 1691 1692 static void ath9k_sta_notify(struct ieee80211_hw *hw, 1693 struct ieee80211_vif *vif, 1694 enum sta_notify_cmd cmd, 1695 struct ieee80211_sta *sta) 1696 { 1697 struct ath_softc *sc = hw->priv; 1698 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1699 1700 switch (cmd) { 1701 case STA_NOTIFY_SLEEP: 1702 an->sleeping = true; 1703 ath_tx_aggr_sleep(sta, sc, an); 1704 ath9k_sta_set_tx_filter(sc->sc_ah, an, true); 1705 break; 1706 case STA_NOTIFY_AWAKE: 1707 ath9k_sta_set_tx_filter(sc->sc_ah, an, false); 1708 an->sleeping = false; 1709 ath_tx_aggr_wakeup(sc, an); 1710 break; 1711 } 1712 } 1713 1714 static int ath9k_conf_tx(struct ieee80211_hw *hw, 1715 struct ieee80211_vif *vif, 1716 unsigned int link_id, u16 queue, 1717 const struct ieee80211_tx_queue_params *params) 1718 { 1719 struct ath_softc *sc = hw->priv; 1720 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1721 struct ath_txq *txq; 1722 struct ath9k_tx_queue_info qi; 1723 int ret = 0; 1724 1725 if (queue >= IEEE80211_NUM_ACS) 1726 return 0; 1727 1728 txq = sc->tx.txq_map[queue]; 1729 1730 ath9k_ps_wakeup(sc); 1731 mutex_lock(&sc->mutex); 1732 1733 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 1734 1735 qi.tqi_aifs = params->aifs; 1736 qi.tqi_cwmin = params->cw_min; 1737 qi.tqi_cwmax = params->cw_max; 1738 qi.tqi_burstTime = params->txop * 32; 1739 1740 ath_dbg(common, CONFIG, 1741 "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 1742 queue, txq->axq_qnum, params->aifs, params->cw_min, 1743 params->cw_max, params->txop); 1744 1745 ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime); 1746 ret = ath_txq_update(sc, txq->axq_qnum, &qi); 1747 if (ret) 1748 ath_err(common, "TXQ Update failed\n"); 1749 1750 mutex_unlock(&sc->mutex); 1751 ath9k_ps_restore(sc); 1752 1753 return ret; 1754 } 1755 1756 static int ath9k_set_key(struct ieee80211_hw *hw, 1757 enum set_key_cmd cmd, 1758 struct ieee80211_vif *vif, 1759 struct ieee80211_sta *sta, 1760 struct ieee80211_key_conf *key) 1761 { 1762 struct ath_softc *sc = hw->priv; 1763 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1764 struct ath_node *an = NULL; 1765 int ret = 0, i; 1766 1767 if (ath9k_modparam_nohwcrypt) 1768 return -ENOSPC; 1769 1770 if ((vif->type == NL80211_IFTYPE_ADHOC || 1771 vif->type == NL80211_IFTYPE_MESH_POINT) && 1772 (key->cipher == WLAN_CIPHER_SUITE_TKIP || 1773 key->cipher == WLAN_CIPHER_SUITE_CCMP) && 1774 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 1775 /* 1776 * For now, disable hw crypto for the RSN IBSS group keys. This 1777 * could be optimized in the future to use a modified key cache 1778 * design to support per-STA RX GTK, but until that gets 1779 * implemented, use of software crypto for group addressed 1780 * frames is a acceptable to allow RSN IBSS to be used. 1781 */ 1782 return -EOPNOTSUPP; 1783 } 1784 1785 /* There may be MPDUs queued for the outgoing PTK key. Flush queues to 1786 * make sure these are not send unencrypted or with a wrong (new) key 1787 */ 1788 if (cmd == DISABLE_KEY && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 1789 ieee80211_stop_queues(hw); 1790 ath9k_flush(hw, vif, 0, true); 1791 ieee80211_wake_queues(hw); 1792 } 1793 1794 mutex_lock(&sc->mutex); 1795 ath9k_ps_wakeup(sc); 1796 ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd); 1797 if (sta) 1798 an = (struct ath_node *)sta->drv_priv; 1799 1800 /* Delete pending key cache entries if no more frames are pointing to 1801 * them in TXQs. 1802 */ 1803 for (i = 0; i < ATH_KEYMAX; i++) 1804 ath9k_pending_key_del(sc, i); 1805 1806 switch (cmd) { 1807 case SET_KEY: 1808 if (sta) 1809 ath9k_del_ps_key(sc, vif, sta); 1810 1811 key->hw_key_idx = 0; 1812 ret = ath_key_config(common, vif, sta, key); 1813 if (ret >= 0) { 1814 key->hw_key_idx = ret; 1815 /* push IV and Michael MIC generation to stack */ 1816 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1817 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) 1818 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1819 if (sc->sc_ah->sw_mgmt_crypto_tx && 1820 key->cipher == WLAN_CIPHER_SUITE_CCMP) 1821 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; 1822 ret = 0; 1823 } 1824 if (an && key->hw_key_idx) { 1825 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) { 1826 if (an->key_idx[i]) 1827 continue; 1828 an->key_idx[i] = key->hw_key_idx; 1829 break; 1830 } 1831 WARN_ON(i == ARRAY_SIZE(an->key_idx)); 1832 } 1833 break; 1834 case DISABLE_KEY: 1835 if (ath9k_txq_has_key(sc, key->hw_key_idx)) { 1836 /* Delay key cache entry deletion until there are no 1837 * remaining TXQ frames pointing to this entry. 1838 */ 1839 set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap); 1840 ath_hw_keysetmac(common, key->hw_key_idx, NULL); 1841 } else { 1842 ath_key_delete(common, key->hw_key_idx); 1843 } 1844 if (an) { 1845 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) { 1846 if (an->key_idx[i] != key->hw_key_idx) 1847 continue; 1848 an->key_idx[i] = 0; 1849 break; 1850 } 1851 } 1852 key->hw_key_idx = 0; 1853 break; 1854 default: 1855 ret = -EINVAL; 1856 } 1857 1858 ath9k_ps_restore(sc); 1859 mutex_unlock(&sc->mutex); 1860 1861 return ret; 1862 } 1863 1864 static void ath9k_bss_info_changed(struct ieee80211_hw *hw, 1865 struct ieee80211_vif *vif, 1866 struct ieee80211_bss_conf *bss_conf, 1867 u64 changed) 1868 { 1869 #define CHECK_ANI \ 1870 (BSS_CHANGED_ASSOC | \ 1871 BSS_CHANGED_IBSS | \ 1872 BSS_CHANGED_BEACON_ENABLED) 1873 1874 struct ath_softc *sc = hw->priv; 1875 struct ath_hw *ah = sc->sc_ah; 1876 struct ath_common *common = ath9k_hw_common(ah); 1877 struct ath_vif *avp = (void *)vif->drv_priv; 1878 int slottime; 1879 1880 ath9k_ps_wakeup(sc); 1881 mutex_lock(&sc->mutex); 1882 1883 if (changed & BSS_CHANGED_ASSOC) { 1884 ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n", 1885 bss_conf->bssid, vif->cfg.assoc); 1886 1887 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN); 1888 avp->aid = vif->cfg.aid; 1889 avp->assoc = vif->cfg.assoc; 1890 1891 ath9k_calculate_summary_state(sc, avp->chanctx); 1892 } 1893 1894 if ((changed & BSS_CHANGED_IBSS) || 1895 (changed & BSS_CHANGED_OCB)) { 1896 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1897 common->curaid = vif->cfg.aid; 1898 ath9k_hw_write_associd(sc->sc_ah); 1899 } 1900 1901 if ((changed & BSS_CHANGED_BEACON_ENABLED) || 1902 (changed & BSS_CHANGED_BEACON_INT) || 1903 (changed & BSS_CHANGED_BEACON_INFO)) { 1904 ath9k_calculate_summary_state(sc, avp->chanctx); 1905 } 1906 1907 if ((avp->chanctx == sc->cur_chan) && 1908 (changed & BSS_CHANGED_ERP_SLOT)) { 1909 if (bss_conf->use_short_slot) 1910 slottime = 9; 1911 else 1912 slottime = 20; 1913 1914 if (vif->type == NL80211_IFTYPE_AP) { 1915 /* 1916 * Defer update, so that connected stations can adjust 1917 * their settings at the same time. 1918 * See beacon.c for more details 1919 */ 1920 sc->beacon.slottime = slottime; 1921 sc->beacon.updateslot = UPDATE; 1922 } else { 1923 ah->slottime = slottime; 1924 ath9k_hw_init_global_settings(ah); 1925 } 1926 } 1927 1928 if (changed & BSS_CHANGED_P2P_PS) 1929 ath9k_p2p_bss_info_changed(sc, vif); 1930 1931 if (changed & CHECK_ANI) 1932 ath_check_ani(sc); 1933 1934 if (changed & BSS_CHANGED_TXPOWER) { 1935 ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n", 1936 vif->addr, bss_conf->txpower, bss_conf->txpower_type); 1937 ath9k_set_txpower(sc, vif); 1938 } 1939 1940 mutex_unlock(&sc->mutex); 1941 ath9k_ps_restore(sc); 1942 1943 #undef CHECK_ANI 1944 } 1945 1946 static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1947 { 1948 struct ath_softc *sc = hw->priv; 1949 struct ath_vif *avp = (void *)vif->drv_priv; 1950 u64 tsf; 1951 1952 mutex_lock(&sc->mutex); 1953 ath9k_ps_wakeup(sc); 1954 /* Get current TSF either from HW or kernel time. */ 1955 if (sc->cur_chan == avp->chanctx) { 1956 tsf = ath9k_hw_gettsf64(sc->sc_ah); 1957 } else { 1958 tsf = sc->cur_chan->tsf_val + 1959 ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL); 1960 } 1961 tsf += le64_to_cpu(avp->tsf_adjust); 1962 ath9k_ps_restore(sc); 1963 mutex_unlock(&sc->mutex); 1964 1965 return tsf; 1966 } 1967 1968 static void ath9k_set_tsf(struct ieee80211_hw *hw, 1969 struct ieee80211_vif *vif, 1970 u64 tsf) 1971 { 1972 struct ath_softc *sc = hw->priv; 1973 struct ath_vif *avp = (void *)vif->drv_priv; 1974 1975 mutex_lock(&sc->mutex); 1976 ath9k_ps_wakeup(sc); 1977 tsf -= le64_to_cpu(avp->tsf_adjust); 1978 ktime_get_raw_ts64(&avp->chanctx->tsf_ts); 1979 if (sc->cur_chan == avp->chanctx) 1980 ath9k_hw_settsf64(sc->sc_ah, tsf); 1981 avp->chanctx->tsf_val = tsf; 1982 ath9k_ps_restore(sc); 1983 mutex_unlock(&sc->mutex); 1984 } 1985 1986 static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1987 { 1988 struct ath_softc *sc = hw->priv; 1989 struct ath_vif *avp = (void *)vif->drv_priv; 1990 1991 mutex_lock(&sc->mutex); 1992 1993 ath9k_ps_wakeup(sc); 1994 ktime_get_raw_ts64(&avp->chanctx->tsf_ts); 1995 if (sc->cur_chan == avp->chanctx) 1996 ath9k_hw_reset_tsf(sc->sc_ah); 1997 avp->chanctx->tsf_val = 0; 1998 ath9k_ps_restore(sc); 1999 2000 mutex_unlock(&sc->mutex); 2001 } 2002 2003 static int ath9k_ampdu_action(struct ieee80211_hw *hw, 2004 struct ieee80211_vif *vif, 2005 struct ieee80211_ampdu_params *params) 2006 { 2007 struct ath_softc *sc = hw->priv; 2008 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2009 bool flush = false; 2010 int ret = 0; 2011 struct ieee80211_sta *sta = params->sta; 2012 struct ath_node *an = (struct ath_node *)sta->drv_priv; 2013 enum ieee80211_ampdu_mlme_action action = params->action; 2014 u16 tid = params->tid; 2015 u16 *ssn = ¶ms->ssn; 2016 struct ath_atx_tid *atid; 2017 2018 mutex_lock(&sc->mutex); 2019 2020 switch (action) { 2021 case IEEE80211_AMPDU_RX_START: 2022 break; 2023 case IEEE80211_AMPDU_RX_STOP: 2024 break; 2025 case IEEE80211_AMPDU_TX_START: 2026 if (ath9k_is_chanctx_enabled()) { 2027 if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { 2028 ret = -EBUSY; 2029 break; 2030 } 2031 } 2032 ath9k_ps_wakeup(sc); 2033 ret = ath_tx_aggr_start(sc, sta, tid, ssn); 2034 if (!ret) 2035 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; 2036 ath9k_ps_restore(sc); 2037 break; 2038 case IEEE80211_AMPDU_TX_STOP_FLUSH: 2039 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 2040 flush = true; 2041 fallthrough; 2042 case IEEE80211_AMPDU_TX_STOP_CONT: 2043 ath9k_ps_wakeup(sc); 2044 ath_tx_aggr_stop(sc, sta, tid); 2045 if (!flush) 2046 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2047 ath9k_ps_restore(sc); 2048 break; 2049 case IEEE80211_AMPDU_TX_OPERATIONAL: 2050 atid = ath_node_to_tid(an, tid); 2051 atid->baw_size = IEEE80211_MIN_AMPDU_BUF << 2052 sta->deflink.ht_cap.ampdu_factor; 2053 break; 2054 default: 2055 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n"); 2056 } 2057 2058 mutex_unlock(&sc->mutex); 2059 2060 return ret; 2061 } 2062 2063 static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, 2064 struct survey_info *survey) 2065 { 2066 struct ath_softc *sc = hw->priv; 2067 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2068 struct ieee80211_supported_band *sband; 2069 struct ieee80211_channel *chan; 2070 unsigned long flags; 2071 int pos; 2072 2073 if (IS_ENABLED(CONFIG_ATH9K_TX99)) 2074 return -EOPNOTSUPP; 2075 2076 spin_lock_irqsave(&common->cc_lock, flags); 2077 if (idx == 0) 2078 ath_update_survey_stats(sc); 2079 2080 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 2081 if (sband && idx >= sband->n_channels) { 2082 idx -= sband->n_channels; 2083 sband = NULL; 2084 } 2085 2086 if (!sband) 2087 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 2088 2089 if (!sband || idx >= sband->n_channels) { 2090 spin_unlock_irqrestore(&common->cc_lock, flags); 2091 return -ENOENT; 2092 } 2093 2094 chan = &sband->channels[idx]; 2095 pos = chan->hw_value; 2096 memcpy(survey, &sc->survey[pos], sizeof(*survey)); 2097 survey->channel = chan; 2098 spin_unlock_irqrestore(&common->cc_lock, flags); 2099 2100 return 0; 2101 } 2102 2103 static void ath9k_enable_dynack(struct ath_softc *sc) 2104 { 2105 #ifdef CONFIG_ATH9K_DYNACK 2106 u32 rfilt; 2107 struct ath_hw *ah = sc->sc_ah; 2108 2109 ath_dynack_reset(ah); 2110 2111 ah->dynack.enabled = true; 2112 rfilt = ath_calcrxfilter(sc); 2113 ath9k_hw_setrxfilter(ah, rfilt); 2114 #endif 2115 } 2116 2117 static void ath9k_set_coverage_class(struct ieee80211_hw *hw, 2118 s16 coverage_class) 2119 { 2120 struct ath_softc *sc = hw->priv; 2121 struct ath_hw *ah = sc->sc_ah; 2122 2123 if (IS_ENABLED(CONFIG_ATH9K_TX99)) 2124 return; 2125 2126 mutex_lock(&sc->mutex); 2127 2128 if (coverage_class >= 0) { 2129 ah->coverage_class = coverage_class; 2130 if (ah->dynack.enabled) { 2131 u32 rfilt; 2132 2133 ah->dynack.enabled = false; 2134 rfilt = ath_calcrxfilter(sc); 2135 ath9k_hw_setrxfilter(ah, rfilt); 2136 } 2137 ath9k_ps_wakeup(sc); 2138 ath9k_hw_init_global_settings(ah); 2139 ath9k_ps_restore(sc); 2140 } else if (!ah->dynack.enabled) { 2141 ath9k_enable_dynack(sc); 2142 } 2143 2144 mutex_unlock(&sc->mutex); 2145 } 2146 2147 static bool ath9k_has_tx_pending(struct ath_softc *sc, 2148 bool sw_pending) 2149 { 2150 int i, npend = 0; 2151 2152 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2153 if (!ATH_TXQ_SETUP(sc, i)) 2154 continue; 2155 2156 npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i], 2157 sw_pending); 2158 if (npend) 2159 break; 2160 } 2161 2162 return !!npend; 2163 } 2164 2165 static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2166 u32 queues, bool drop) 2167 { 2168 struct ath_softc *sc = hw->priv; 2169 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2170 2171 if (ath9k_is_chanctx_enabled()) { 2172 if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) 2173 goto flush; 2174 2175 /* 2176 * If MCC is active, extend the flush timeout 2177 * and wait for the HW/SW queues to become 2178 * empty. This needs to be done outside the 2179 * sc->mutex lock to allow the channel scheduler 2180 * to switch channel contexts. 2181 * 2182 * The vif queues have been stopped in mac80211, 2183 * so there won't be any incoming frames. 2184 */ 2185 __ath9k_flush(hw, queues, drop, true, true); 2186 return; 2187 } 2188 flush: 2189 mutex_lock(&sc->mutex); 2190 __ath9k_flush(hw, queues, drop, true, false); 2191 mutex_unlock(&sc->mutex); 2192 } 2193 2194 void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop, 2195 bool sw_pending, bool timeout_override) 2196 { 2197 struct ath_softc *sc = hw->priv; 2198 struct ath_hw *ah = sc->sc_ah; 2199 struct ath_common *common = ath9k_hw_common(ah); 2200 int timeout; 2201 bool drain_txq; 2202 2203 cancel_delayed_work_sync(&sc->hw_check_work); 2204 2205 if (ah->ah_flags & AH_UNPLUGGED) { 2206 ath_dbg(common, ANY, "Device has been unplugged!\n"); 2207 return; 2208 } 2209 2210 if (test_bit(ATH_OP_INVALID, &common->op_flags)) { 2211 ath_dbg(common, ANY, "Device not present\n"); 2212 return; 2213 } 2214 2215 spin_lock_bh(&sc->chan_lock); 2216 if (timeout_override) 2217 timeout = HZ / 5; 2218 else 2219 timeout = sc->cur_chan->flush_timeout; 2220 spin_unlock_bh(&sc->chan_lock); 2221 2222 ath_dbg(common, CHAN_CTX, 2223 "Flush timeout: %d\n", jiffies_to_msecs(timeout)); 2224 2225 if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc, sw_pending), 2226 timeout) > 0) 2227 drop = false; 2228 2229 if (drop) { 2230 ath9k_ps_wakeup(sc); 2231 spin_lock_bh(&sc->sc_pcu_lock); 2232 drain_txq = ath_drain_all_txq(sc); 2233 spin_unlock_bh(&sc->sc_pcu_lock); 2234 2235 if (!drain_txq) 2236 ath_reset(sc, NULL); 2237 2238 ath9k_ps_restore(sc); 2239 } 2240 2241 ieee80211_queue_delayed_work(hw, &sc->hw_check_work, 2242 msecs_to_jiffies(ATH_HW_CHECK_POLL_INT)); 2243 } 2244 2245 static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw) 2246 { 2247 struct ath_softc *sc = hw->priv; 2248 2249 return ath9k_has_tx_pending(sc, true); 2250 } 2251 2252 static int ath9k_tx_last_beacon(struct ieee80211_hw *hw) 2253 { 2254 struct ath_softc *sc = hw->priv; 2255 struct ath_hw *ah = sc->sc_ah; 2256 struct ieee80211_vif *vif; 2257 struct ath_vif *avp; 2258 struct ath_buf *bf; 2259 struct ath_tx_status ts; 2260 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 2261 int status; 2262 2263 vif = sc->beacon.bslot[0]; 2264 if (!vif) 2265 return 0; 2266 2267 if (!vif->bss_conf.enable_beacon) 2268 return 0; 2269 2270 avp = (void *)vif->drv_priv; 2271 2272 if (!sc->beacon.tx_processed && !edma) { 2273 tasklet_disable(&sc->bcon_tasklet); 2274 2275 bf = avp->av_bcbuf; 2276 if (!bf || !bf->bf_mpdu) 2277 goto skip; 2278 2279 status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts); 2280 if (status == -EINPROGRESS) 2281 goto skip; 2282 2283 sc->beacon.tx_processed = true; 2284 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); 2285 2286 skip: 2287 tasklet_enable(&sc->bcon_tasklet); 2288 } 2289 2290 return sc->beacon.tx_last; 2291 } 2292 2293 static int ath9k_get_stats(struct ieee80211_hw *hw, 2294 struct ieee80211_low_level_stats *stats) 2295 { 2296 struct ath_softc *sc = hw->priv; 2297 struct ath_hw *ah = sc->sc_ah; 2298 struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats; 2299 2300 stats->dot11ACKFailureCount = mib_stats->ackrcv_bad; 2301 stats->dot11RTSFailureCount = mib_stats->rts_bad; 2302 stats->dot11FCSErrorCount = mib_stats->fcs_bad; 2303 stats->dot11RTSSuccessCount = mib_stats->rts_good; 2304 return 0; 2305 } 2306 2307 static u32 fill_chainmask(u32 cap, u32 new) 2308 { 2309 u32 filled = 0; 2310 int i; 2311 2312 for (i = 0; cap && new; i++, cap >>= 1) { 2313 if (!(cap & BIT(0))) 2314 continue; 2315 2316 if (new & BIT(0)) 2317 filled |= BIT(i); 2318 2319 new >>= 1; 2320 } 2321 2322 return filled; 2323 } 2324 2325 static bool validate_antenna_mask(struct ath_hw *ah, u32 val) 2326 { 2327 if (AR_SREV_9300_20_OR_LATER(ah)) 2328 return true; 2329 2330 switch (val & 0x7) { 2331 case 0x1: 2332 case 0x3: 2333 case 0x7: 2334 return true; 2335 case 0x2: 2336 return (ah->caps.rx_chainmask == 1); 2337 default: 2338 return false; 2339 } 2340 } 2341 2342 static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 2343 { 2344 struct ath_softc *sc = hw->priv; 2345 struct ath_hw *ah = sc->sc_ah; 2346 2347 if (ah->caps.rx_chainmask != 1) 2348 rx_ant |= tx_ant; 2349 2350 if (!validate_antenna_mask(ah, rx_ant) || !tx_ant) 2351 return -EINVAL; 2352 2353 sc->ant_rx = rx_ant; 2354 sc->ant_tx = tx_ant; 2355 2356 if (ah->caps.rx_chainmask == 1) 2357 return 0; 2358 2359 /* AR9100 runs into calibration issues if not all rx chains are enabled */ 2360 if (AR_SREV_9100(ah)) 2361 ah->rxchainmask = 0x7; 2362 else 2363 ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant); 2364 2365 ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant); 2366 ath9k_cmn_reload_chainmask(ah); 2367 2368 return 0; 2369 } 2370 2371 static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 2372 { 2373 struct ath_softc *sc = hw->priv; 2374 2375 *tx_ant = sc->ant_tx; 2376 *rx_ant = sc->ant_rx; 2377 return 0; 2378 } 2379 2380 static void ath9k_sw_scan_start(struct ieee80211_hw *hw, 2381 struct ieee80211_vif *vif, 2382 const u8 *mac_addr) 2383 { 2384 struct ath_softc *sc = hw->priv; 2385 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2386 set_bit(ATH_OP_SCANNING, &common->op_flags); 2387 } 2388 2389 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw, 2390 struct ieee80211_vif *vif) 2391 { 2392 struct ath_softc *sc = hw->priv; 2393 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2394 clear_bit(ATH_OP_SCANNING, &common->op_flags); 2395 } 2396 2397 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 2398 2399 static void ath9k_cancel_pending_offchannel(struct ath_softc *sc) 2400 { 2401 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2402 2403 if (sc->offchannel.roc_vif) { 2404 ath_dbg(common, CHAN_CTX, 2405 "%s: Aborting RoC\n", __func__); 2406 2407 del_timer_sync(&sc->offchannel.timer); 2408 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) 2409 ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT); 2410 } 2411 2412 if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { 2413 ath_dbg(common, CHAN_CTX, 2414 "%s: Aborting HW scan\n", __func__); 2415 2416 del_timer_sync(&sc->offchannel.timer); 2417 ath_scan_complete(sc, true); 2418 } 2419 } 2420 2421 static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2422 struct ieee80211_scan_request *hw_req) 2423 { 2424 struct cfg80211_scan_request *req = &hw_req->req; 2425 struct ath_softc *sc = hw->priv; 2426 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2427 int ret = 0; 2428 2429 mutex_lock(&sc->mutex); 2430 2431 if (WARN_ON(sc->offchannel.scan_req)) { 2432 ret = -EBUSY; 2433 goto out; 2434 } 2435 2436 ath9k_ps_wakeup(sc); 2437 set_bit(ATH_OP_SCANNING, &common->op_flags); 2438 sc->offchannel.scan_vif = vif; 2439 sc->offchannel.scan_req = req; 2440 sc->offchannel.scan_idx = 0; 2441 2442 ath_dbg(common, CHAN_CTX, "HW scan request received on vif: %pM\n", 2443 vif->addr); 2444 2445 if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) { 2446 ath_dbg(common, CHAN_CTX, "Starting HW scan\n"); 2447 ath_offchannel_next(sc); 2448 } 2449 2450 out: 2451 mutex_unlock(&sc->mutex); 2452 2453 return ret; 2454 } 2455 2456 static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw, 2457 struct ieee80211_vif *vif) 2458 { 2459 struct ath_softc *sc = hw->priv; 2460 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2461 2462 ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr); 2463 2464 mutex_lock(&sc->mutex); 2465 del_timer_sync(&sc->offchannel.timer); 2466 ath_scan_complete(sc, true); 2467 mutex_unlock(&sc->mutex); 2468 } 2469 2470 static int ath9k_remain_on_channel(struct ieee80211_hw *hw, 2471 struct ieee80211_vif *vif, 2472 struct ieee80211_channel *chan, int duration, 2473 enum ieee80211_roc_type type) 2474 { 2475 struct ath_softc *sc = hw->priv; 2476 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2477 int ret = 0; 2478 2479 mutex_lock(&sc->mutex); 2480 2481 if (WARN_ON(sc->offchannel.roc_vif)) { 2482 ret = -EBUSY; 2483 goto out; 2484 } 2485 2486 ath9k_ps_wakeup(sc); 2487 sc->offchannel.roc_vif = vif; 2488 sc->offchannel.roc_chan = chan; 2489 sc->offchannel.roc_duration = duration; 2490 2491 ath_dbg(common, CHAN_CTX, 2492 "RoC request on vif: %pM, type: %d duration: %d\n", 2493 vif->addr, type, duration); 2494 2495 if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) { 2496 ath_dbg(common, CHAN_CTX, "Starting RoC period\n"); 2497 ath_offchannel_next(sc); 2498 } 2499 2500 out: 2501 mutex_unlock(&sc->mutex); 2502 2503 return ret; 2504 } 2505 2506 static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw, 2507 struct ieee80211_vif *vif) 2508 { 2509 struct ath_softc *sc = hw->priv; 2510 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2511 2512 mutex_lock(&sc->mutex); 2513 2514 ath_dbg(common, CHAN_CTX, "Cancel RoC\n"); 2515 del_timer_sync(&sc->offchannel.timer); 2516 2517 if (sc->offchannel.roc_vif) { 2518 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) 2519 ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL); 2520 } 2521 2522 mutex_unlock(&sc->mutex); 2523 2524 return 0; 2525 } 2526 2527 static int ath9k_add_chanctx(struct ieee80211_hw *hw, 2528 struct ieee80211_chanctx_conf *conf) 2529 { 2530 struct ath_softc *sc = hw->priv; 2531 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2532 struct ath_chanctx *ctx, **ptr; 2533 int pos; 2534 2535 mutex_lock(&sc->mutex); 2536 2537 ath_for_each_chanctx(sc, ctx) { 2538 if (ctx->assigned) 2539 continue; 2540 2541 ptr = (void *) conf->drv_priv; 2542 *ptr = ctx; 2543 ctx->assigned = true; 2544 pos = ctx - &sc->chanctx[0]; 2545 ctx->hw_queue_base = pos * IEEE80211_NUM_ACS; 2546 2547 ath_dbg(common, CHAN_CTX, 2548 "Add channel context: %d MHz\n", 2549 conf->def.chan->center_freq); 2550 2551 ath_chanctx_set_channel(sc, ctx, &conf->def); 2552 2553 mutex_unlock(&sc->mutex); 2554 return 0; 2555 } 2556 2557 mutex_unlock(&sc->mutex); 2558 return -ENOSPC; 2559 } 2560 2561 2562 static void ath9k_remove_chanctx(struct ieee80211_hw *hw, 2563 struct ieee80211_chanctx_conf *conf) 2564 { 2565 struct ath_softc *sc = hw->priv; 2566 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2567 struct ath_chanctx *ctx = ath_chanctx_get(conf); 2568 2569 mutex_lock(&sc->mutex); 2570 2571 ath_dbg(common, CHAN_CTX, 2572 "Remove channel context: %d MHz\n", 2573 conf->def.chan->center_freq); 2574 2575 ctx->assigned = false; 2576 ctx->hw_queue_base = 0; 2577 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN); 2578 2579 mutex_unlock(&sc->mutex); 2580 } 2581 2582 static void ath9k_change_chanctx(struct ieee80211_hw *hw, 2583 struct ieee80211_chanctx_conf *conf, 2584 u32 changed) 2585 { 2586 struct ath_softc *sc = hw->priv; 2587 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2588 struct ath_chanctx *ctx = ath_chanctx_get(conf); 2589 2590 mutex_lock(&sc->mutex); 2591 ath_dbg(common, CHAN_CTX, 2592 "Change channel context: %d MHz\n", 2593 conf->def.chan->center_freq); 2594 ath_chanctx_set_channel(sc, ctx, &conf->def); 2595 mutex_unlock(&sc->mutex); 2596 } 2597 2598 static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw, 2599 struct ieee80211_vif *vif, 2600 struct ieee80211_bss_conf *link_conf, 2601 struct ieee80211_chanctx_conf *conf) 2602 { 2603 struct ath_softc *sc = hw->priv; 2604 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2605 struct ath_vif *avp = (void *)vif->drv_priv; 2606 struct ath_chanctx *ctx = ath_chanctx_get(conf); 2607 int i; 2608 2609 ath9k_cancel_pending_offchannel(sc); 2610 2611 mutex_lock(&sc->mutex); 2612 2613 ath_dbg(common, CHAN_CTX, 2614 "Assign VIF (addr: %pM, type: %d, p2p: %d) to channel context: %d MHz\n", 2615 vif->addr, vif->type, vif->p2p, 2616 conf->def.chan->center_freq); 2617 2618 avp->chanctx = ctx; 2619 ctx->nvifs_assigned++; 2620 list_add_tail(&avp->list, &ctx->vifs); 2621 ath9k_calculate_summary_state(sc, ctx); 2622 for (i = 0; i < IEEE80211_NUM_ACS; i++) 2623 vif->hw_queue[i] = ctx->hw_queue_base + i; 2624 2625 mutex_unlock(&sc->mutex); 2626 2627 return 0; 2628 } 2629 2630 static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw, 2631 struct ieee80211_vif *vif, 2632 struct ieee80211_bss_conf *link_conf, 2633 struct ieee80211_chanctx_conf *conf) 2634 { 2635 struct ath_softc *sc = hw->priv; 2636 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2637 struct ath_vif *avp = (void *)vif->drv_priv; 2638 struct ath_chanctx *ctx = ath_chanctx_get(conf); 2639 int ac; 2640 2641 ath9k_cancel_pending_offchannel(sc); 2642 2643 mutex_lock(&sc->mutex); 2644 2645 ath_dbg(common, CHAN_CTX, 2646 "Remove VIF (addr: %pM, type: %d, p2p: %d) from channel context: %d MHz\n", 2647 vif->addr, vif->type, vif->p2p, 2648 conf->def.chan->center_freq); 2649 2650 avp->chanctx = NULL; 2651 ctx->nvifs_assigned--; 2652 list_del(&avp->list); 2653 ath9k_calculate_summary_state(sc, ctx); 2654 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2655 vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE; 2656 2657 mutex_unlock(&sc->mutex); 2658 } 2659 2660 static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw, 2661 struct ieee80211_vif *vif, 2662 struct ieee80211_prep_tx_info *info) 2663 { 2664 struct ath_softc *sc = hw->priv; 2665 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2666 struct ath_vif *avp = (struct ath_vif *) vif->drv_priv; 2667 struct ath_beacon_config *cur_conf; 2668 struct ath_chanctx *go_ctx; 2669 unsigned long timeout; 2670 bool changed = false; 2671 u32 beacon_int; 2672 2673 if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) 2674 return; 2675 2676 if (!avp->chanctx) 2677 return; 2678 2679 mutex_lock(&sc->mutex); 2680 2681 spin_lock_bh(&sc->chan_lock); 2682 if (sc->next_chan || (sc->cur_chan != avp->chanctx)) 2683 changed = true; 2684 spin_unlock_bh(&sc->chan_lock); 2685 2686 if (!changed) 2687 goto out; 2688 2689 ath9k_cancel_pending_offchannel(sc); 2690 2691 go_ctx = ath_is_go_chanctx_present(sc); 2692 2693 if (go_ctx) { 2694 /* 2695 * Wait till the GO interface gets a chance 2696 * to send out an NoA. 2697 */ 2698 spin_lock_bh(&sc->chan_lock); 2699 sc->sched.mgd_prepare_tx = true; 2700 cur_conf = &go_ctx->beacon; 2701 beacon_int = TU_TO_USEC(cur_conf->beacon_interval); 2702 spin_unlock_bh(&sc->chan_lock); 2703 2704 timeout = usecs_to_jiffies(beacon_int * 2); 2705 init_completion(&sc->go_beacon); 2706 2707 mutex_unlock(&sc->mutex); 2708 2709 if (wait_for_completion_timeout(&sc->go_beacon, 2710 timeout) == 0) { 2711 ath_dbg(common, CHAN_CTX, 2712 "Failed to send new NoA\n"); 2713 2714 spin_lock_bh(&sc->chan_lock); 2715 sc->sched.mgd_prepare_tx = false; 2716 spin_unlock_bh(&sc->chan_lock); 2717 } 2718 2719 mutex_lock(&sc->mutex); 2720 } 2721 2722 ath_dbg(common, CHAN_CTX, 2723 "%s: Set chanctx state to FORCE_ACTIVE for vif: %pM\n", 2724 __func__, vif->addr); 2725 2726 spin_lock_bh(&sc->chan_lock); 2727 sc->next_chan = avp->chanctx; 2728 sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; 2729 spin_unlock_bh(&sc->chan_lock); 2730 2731 ath_chanctx_set_next(sc, true); 2732 out: 2733 mutex_unlock(&sc->mutex); 2734 } 2735 2736 void ath9k_fill_chanctx_ops(void) 2737 { 2738 if (!ath9k_is_chanctx_enabled()) 2739 return; 2740 2741 ath9k_ops.hw_scan = ath9k_hw_scan; 2742 ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan; 2743 ath9k_ops.remain_on_channel = ath9k_remain_on_channel; 2744 ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel; 2745 ath9k_ops.add_chanctx = ath9k_add_chanctx; 2746 ath9k_ops.remove_chanctx = ath9k_remove_chanctx; 2747 ath9k_ops.change_chanctx = ath9k_change_chanctx; 2748 ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx; 2749 ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx; 2750 ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx; 2751 } 2752 2753 #endif 2754 2755 static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2756 int *dbm) 2757 { 2758 struct ath_softc *sc = hw->priv; 2759 struct ath_vif *avp = (void *)vif->drv_priv; 2760 2761 mutex_lock(&sc->mutex); 2762 if (avp->chanctx) 2763 *dbm = avp->chanctx->cur_txpower; 2764 else 2765 *dbm = sc->cur_chan->cur_txpower; 2766 mutex_unlock(&sc->mutex); 2767 2768 *dbm /= 2; 2769 2770 return 0; 2771 } 2772 2773 struct ieee80211_ops ath9k_ops = { 2774 .tx = ath9k_tx, 2775 .start = ath9k_start, 2776 .stop = ath9k_stop, 2777 .add_interface = ath9k_add_interface, 2778 .change_interface = ath9k_change_interface, 2779 .remove_interface = ath9k_remove_interface, 2780 .config = ath9k_config, 2781 .configure_filter = ath9k_configure_filter, 2782 .sta_state = ath9k_sta_state, 2783 .sta_notify = ath9k_sta_notify, 2784 .conf_tx = ath9k_conf_tx, 2785 .bss_info_changed = ath9k_bss_info_changed, 2786 .set_key = ath9k_set_key, 2787 .get_tsf = ath9k_get_tsf, 2788 .set_tsf = ath9k_set_tsf, 2789 .reset_tsf = ath9k_reset_tsf, 2790 .ampdu_action = ath9k_ampdu_action, 2791 .get_survey = ath9k_get_survey, 2792 .rfkill_poll = ath9k_rfkill_poll_state, 2793 .set_coverage_class = ath9k_set_coverage_class, 2794 .flush = ath9k_flush, 2795 .tx_frames_pending = ath9k_tx_frames_pending, 2796 .tx_last_beacon = ath9k_tx_last_beacon, 2797 .release_buffered_frames = ath9k_release_buffered_frames, 2798 .get_stats = ath9k_get_stats, 2799 .set_antenna = ath9k_set_antenna, 2800 .get_antenna = ath9k_get_antenna, 2801 2802 #ifdef CONFIG_ATH9K_WOW 2803 .suspend = ath9k_suspend, 2804 .resume = ath9k_resume, 2805 .set_wakeup = ath9k_set_wakeup, 2806 #endif 2807 2808 #ifdef CONFIG_ATH9K_DEBUGFS 2809 .get_et_sset_count = ath9k_get_et_sset_count, 2810 .get_et_stats = ath9k_get_et_stats, 2811 .get_et_strings = ath9k_get_et_strings, 2812 #endif 2813 2814 #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS) 2815 .sta_add_debugfs = ath9k_sta_add_debugfs, 2816 #endif 2817 .sw_scan_start = ath9k_sw_scan_start, 2818 .sw_scan_complete = ath9k_sw_scan_complete, 2819 .get_txpower = ath9k_get_txpower, 2820 .wake_tx_queue = ath9k_wake_tx_queue, 2821 }; 2822