1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22 23 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 24 int mindelta, int main_rssi_avg, 25 int alt_rssi_avg, int pkt_count) 26 { 27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 28 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 30 } 31 32 static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, 33 int curr_main_set, int curr_alt_set, 34 int alt_rssi_avg, int main_rssi_avg) 35 { 36 bool result = false; 37 switch (div_group) { 38 case 0: 39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 40 result = true; 41 break; 42 case 1: 43 case 2: 44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && 45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && 46 (alt_rssi_avg >= (main_rssi_avg - 5))) || 47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && 48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && 49 (alt_rssi_avg >= (main_rssi_avg - 2)))) && 50 (alt_rssi_avg >= 4)) 51 result = true; 52 else 53 result = false; 54 break; 55 } 56 57 return result; 58 } 59 60 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 61 { 62 return sc->ps_enabled && 63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 64 } 65 66 /* 67 * Setup and link descriptors. 68 * 69 * 11N: we can no longer afford to self link the last descriptor. 70 * MAC acknowledges BA status as long as it copies frames to host 71 * buffer (or rx fifo). This can incorrectly acknowledge packets 72 * to a sender if last desc is self-linked. 73 */ 74 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 75 { 76 struct ath_hw *ah = sc->sc_ah; 77 struct ath_common *common = ath9k_hw_common(ah); 78 struct ath_desc *ds; 79 struct sk_buff *skb; 80 81 ATH_RXBUF_RESET(bf); 82 83 ds = bf->bf_desc; 84 ds->ds_link = 0; /* link to null */ 85 ds->ds_data = bf->bf_buf_addr; 86 87 /* virtual addr of the beginning of the buffer. */ 88 skb = bf->bf_mpdu; 89 BUG_ON(skb == NULL); 90 ds->ds_vdata = skb->data; 91 92 /* 93 * setup rx descriptors. The rx_bufsize here tells the hardware 94 * how much data it can DMA to us and that we are prepared 95 * to process 96 */ 97 ath9k_hw_setuprxdesc(ah, ds, 98 common->rx_bufsize, 99 0); 100 101 if (sc->rx.rxlink == NULL) 102 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 103 else 104 *sc->rx.rxlink = bf->bf_daddr; 105 106 sc->rx.rxlink = &ds->ds_link; 107 } 108 109 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 110 { 111 /* XXX block beacon interrupts */ 112 ath9k_hw_setantenna(sc->sc_ah, antenna); 113 sc->rx.defant = antenna; 114 sc->rx.rxotherant = 0; 115 } 116 117 static void ath_opmode_init(struct ath_softc *sc) 118 { 119 struct ath_hw *ah = sc->sc_ah; 120 struct ath_common *common = ath9k_hw_common(ah); 121 122 u32 rfilt, mfilt[2]; 123 124 /* configure rx filter */ 125 rfilt = ath_calcrxfilter(sc); 126 ath9k_hw_setrxfilter(ah, rfilt); 127 128 /* configure bssid mask */ 129 ath_hw_setbssidmask(common); 130 131 /* configure operational mode */ 132 ath9k_hw_setopmode(ah); 133 134 /* calculate and install multicast filter */ 135 mfilt[0] = mfilt[1] = ~0; 136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 137 } 138 139 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 140 enum ath9k_rx_qtype qtype) 141 { 142 struct ath_hw *ah = sc->sc_ah; 143 struct ath_rx_edma *rx_edma; 144 struct sk_buff *skb; 145 struct ath_buf *bf; 146 147 rx_edma = &sc->rx.rx_edma[qtype]; 148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 149 return false; 150 151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 152 list_del_init(&bf->list); 153 154 skb = bf->bf_mpdu; 155 156 ATH_RXBUF_RESET(bf); 157 memset(skb->data, 0, ah->caps.rx_status_len); 158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 159 ah->caps.rx_status_len, DMA_TO_DEVICE); 160 161 SKB_CB_ATHBUF(skb) = bf; 162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 163 skb_queue_tail(&rx_edma->rx_fifo, skb); 164 165 return true; 166 } 167 168 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 169 enum ath9k_rx_qtype qtype, int size) 170 { 171 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 172 struct ath_buf *bf, *tbf; 173 174 if (list_empty(&sc->rx.rxbuf)) { 175 ath_dbg(common, QUEUE, "No free rx buf available\n"); 176 return; 177 } 178 179 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 180 if (!ath_rx_edma_buf_link(sc, qtype)) 181 break; 182 183 } 184 185 static void ath_rx_remove_buffer(struct ath_softc *sc, 186 enum ath9k_rx_qtype qtype) 187 { 188 struct ath_buf *bf; 189 struct ath_rx_edma *rx_edma; 190 struct sk_buff *skb; 191 192 rx_edma = &sc->rx.rx_edma[qtype]; 193 194 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 195 bf = SKB_CB_ATHBUF(skb); 196 BUG_ON(!bf); 197 list_add_tail(&bf->list, &sc->rx.rxbuf); 198 } 199 } 200 201 static void ath_rx_edma_cleanup(struct ath_softc *sc) 202 { 203 struct ath_hw *ah = sc->sc_ah; 204 struct ath_common *common = ath9k_hw_common(ah); 205 struct ath_buf *bf; 206 207 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 208 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 209 210 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 211 if (bf->bf_mpdu) { 212 dma_unmap_single(sc->dev, bf->bf_buf_addr, 213 common->rx_bufsize, 214 DMA_BIDIRECTIONAL); 215 dev_kfree_skb_any(bf->bf_mpdu); 216 bf->bf_buf_addr = 0; 217 bf->bf_mpdu = NULL; 218 } 219 } 220 221 INIT_LIST_HEAD(&sc->rx.rxbuf); 222 223 kfree(sc->rx.rx_bufptr); 224 sc->rx.rx_bufptr = NULL; 225 } 226 227 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 228 { 229 skb_queue_head_init(&rx_edma->rx_fifo); 230 rx_edma->rx_fifo_hwsize = size; 231 } 232 233 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 234 { 235 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 236 struct ath_hw *ah = sc->sc_ah; 237 struct sk_buff *skb; 238 struct ath_buf *bf; 239 int error = 0, i; 240 u32 size; 241 242 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 243 ah->caps.rx_status_len); 244 245 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 246 ah->caps.rx_lp_qdepth); 247 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 248 ah->caps.rx_hp_qdepth); 249 250 size = sizeof(struct ath_buf) * nbufs; 251 bf = kzalloc(size, GFP_KERNEL); 252 if (!bf) 253 return -ENOMEM; 254 255 INIT_LIST_HEAD(&sc->rx.rxbuf); 256 sc->rx.rx_bufptr = bf; 257 258 for (i = 0; i < nbufs; i++, bf++) { 259 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 260 if (!skb) { 261 error = -ENOMEM; 262 goto rx_init_fail; 263 } 264 265 memset(skb->data, 0, common->rx_bufsize); 266 bf->bf_mpdu = skb; 267 268 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 269 common->rx_bufsize, 270 DMA_BIDIRECTIONAL); 271 if (unlikely(dma_mapping_error(sc->dev, 272 bf->bf_buf_addr))) { 273 dev_kfree_skb_any(skb); 274 bf->bf_mpdu = NULL; 275 bf->bf_buf_addr = 0; 276 ath_err(common, 277 "dma_mapping_error() on RX init\n"); 278 error = -ENOMEM; 279 goto rx_init_fail; 280 } 281 282 list_add_tail(&bf->list, &sc->rx.rxbuf); 283 } 284 285 return 0; 286 287 rx_init_fail: 288 ath_rx_edma_cleanup(sc); 289 return error; 290 } 291 292 static void ath_edma_start_recv(struct ath_softc *sc) 293 { 294 spin_lock_bh(&sc->rx.rxbuflock); 295 296 ath9k_hw_rxena(sc->sc_ah); 297 298 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 299 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 300 301 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 302 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 303 304 ath_opmode_init(sc); 305 306 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 307 308 spin_unlock_bh(&sc->rx.rxbuflock); 309 } 310 311 static void ath_edma_stop_recv(struct ath_softc *sc) 312 { 313 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 314 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 315 } 316 317 int ath_rx_init(struct ath_softc *sc, int nbufs) 318 { 319 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 320 struct sk_buff *skb; 321 struct ath_buf *bf; 322 int error = 0; 323 324 spin_lock_init(&sc->sc_pcu_lock); 325 sc->sc_flags &= ~SC_OP_RXFLUSH; 326 spin_lock_init(&sc->rx.rxbuflock); 327 328 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 329 sc->sc_ah->caps.rx_status_len; 330 331 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 332 return ath_rx_edma_init(sc, nbufs); 333 } else { 334 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 335 common->cachelsz, common->rx_bufsize); 336 337 /* Initialize rx descriptors */ 338 339 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 340 "rx", nbufs, 1, 0); 341 if (error != 0) { 342 ath_err(common, 343 "failed to allocate rx descriptors: %d\n", 344 error); 345 goto err; 346 } 347 348 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 349 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 350 GFP_KERNEL); 351 if (skb == NULL) { 352 error = -ENOMEM; 353 goto err; 354 } 355 356 bf->bf_mpdu = skb; 357 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 358 common->rx_bufsize, 359 DMA_FROM_DEVICE); 360 if (unlikely(dma_mapping_error(sc->dev, 361 bf->bf_buf_addr))) { 362 dev_kfree_skb_any(skb); 363 bf->bf_mpdu = NULL; 364 bf->bf_buf_addr = 0; 365 ath_err(common, 366 "dma_mapping_error() on RX init\n"); 367 error = -ENOMEM; 368 goto err; 369 } 370 } 371 sc->rx.rxlink = NULL; 372 } 373 374 err: 375 if (error) 376 ath_rx_cleanup(sc); 377 378 return error; 379 } 380 381 void ath_rx_cleanup(struct ath_softc *sc) 382 { 383 struct ath_hw *ah = sc->sc_ah; 384 struct ath_common *common = ath9k_hw_common(ah); 385 struct sk_buff *skb; 386 struct ath_buf *bf; 387 388 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 389 ath_rx_edma_cleanup(sc); 390 return; 391 } else { 392 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 393 skb = bf->bf_mpdu; 394 if (skb) { 395 dma_unmap_single(sc->dev, bf->bf_buf_addr, 396 common->rx_bufsize, 397 DMA_FROM_DEVICE); 398 dev_kfree_skb(skb); 399 bf->bf_buf_addr = 0; 400 bf->bf_mpdu = NULL; 401 } 402 } 403 404 if (sc->rx.rxdma.dd_desc_len != 0) 405 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 406 } 407 } 408 409 /* 410 * Calculate the receive filter according to the 411 * operating mode and state: 412 * 413 * o always accept unicast, broadcast, and multicast traffic 414 * o maintain current state of phy error reception (the hal 415 * may enable phy error frames for noise immunity work) 416 * o probe request frames are accepted only when operating in 417 * hostap, adhoc, or monitor modes 418 * o enable promiscuous mode according to the interface state 419 * o accept beacons: 420 * - when operating in adhoc mode so the 802.11 layer creates 421 * node table entries for peers, 422 * - when operating in station mode for collecting rssi data when 423 * the station is otherwise quiet, or 424 * - when operating as a repeater so we see repeater-sta beacons 425 * - when scanning 426 */ 427 428 u32 ath_calcrxfilter(struct ath_softc *sc) 429 { 430 u32 rfilt; 431 432 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 433 | ATH9K_RX_FILTER_MCAST; 434 435 if (sc->rx.rxfilter & FIF_PROBE_REQ) 436 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 437 438 /* 439 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 440 * mode interface or when in monitor mode. AP mode does not need this 441 * since it receives all in-BSS frames anyway. 442 */ 443 if (sc->sc_ah->is_monitoring) 444 rfilt |= ATH9K_RX_FILTER_PROM; 445 446 if (sc->rx.rxfilter & FIF_CONTROL) 447 rfilt |= ATH9K_RX_FILTER_CONTROL; 448 449 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 450 (sc->nvifs <= 1) && 451 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 452 rfilt |= ATH9K_RX_FILTER_MYBEACON; 453 else 454 rfilt |= ATH9K_RX_FILTER_BEACON; 455 456 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 457 (sc->rx.rxfilter & FIF_PSPOLL)) 458 rfilt |= ATH9K_RX_FILTER_PSPOLL; 459 460 if (conf_is_ht(&sc->hw->conf)) 461 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 462 463 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 464 /* The following may also be needed for other older chips */ 465 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 466 rfilt |= ATH9K_RX_FILTER_PROM; 467 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 468 } 469 470 return rfilt; 471 472 } 473 474 int ath_startrecv(struct ath_softc *sc) 475 { 476 struct ath_hw *ah = sc->sc_ah; 477 struct ath_buf *bf, *tbf; 478 479 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 480 ath_edma_start_recv(sc); 481 return 0; 482 } 483 484 spin_lock_bh(&sc->rx.rxbuflock); 485 if (list_empty(&sc->rx.rxbuf)) 486 goto start_recv; 487 488 sc->rx.rxlink = NULL; 489 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 490 ath_rx_buf_link(sc, bf); 491 } 492 493 /* We could have deleted elements so the list may be empty now */ 494 if (list_empty(&sc->rx.rxbuf)) 495 goto start_recv; 496 497 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 498 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 499 ath9k_hw_rxena(ah); 500 501 start_recv: 502 ath_opmode_init(sc); 503 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 504 505 spin_unlock_bh(&sc->rx.rxbuflock); 506 507 return 0; 508 } 509 510 bool ath_stoprecv(struct ath_softc *sc) 511 { 512 struct ath_hw *ah = sc->sc_ah; 513 bool stopped, reset = false; 514 515 spin_lock_bh(&sc->rx.rxbuflock); 516 ath9k_hw_abortpcurecv(ah); 517 ath9k_hw_setrxfilter(ah, 0); 518 stopped = ath9k_hw_stopdmarecv(ah, &reset); 519 520 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 521 ath_edma_stop_recv(sc); 522 else 523 sc->rx.rxlink = NULL; 524 spin_unlock_bh(&sc->rx.rxbuflock); 525 526 if (!(ah->ah_flags & AH_UNPLUGGED) && 527 unlikely(!stopped)) { 528 ath_err(ath9k_hw_common(sc->sc_ah), 529 "Could not stop RX, we could be " 530 "confusing the DMA engine when we start RX up\n"); 531 ATH_DBG_WARN_ON_ONCE(!stopped); 532 } 533 return stopped && !reset; 534 } 535 536 void ath_flushrecv(struct ath_softc *sc) 537 { 538 sc->sc_flags |= SC_OP_RXFLUSH; 539 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 540 ath_rx_tasklet(sc, 1, true); 541 ath_rx_tasklet(sc, 1, false); 542 sc->sc_flags &= ~SC_OP_RXFLUSH; 543 } 544 545 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 546 { 547 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 548 struct ieee80211_mgmt *mgmt; 549 u8 *pos, *end, id, elen; 550 struct ieee80211_tim_ie *tim; 551 552 mgmt = (struct ieee80211_mgmt *)skb->data; 553 pos = mgmt->u.beacon.variable; 554 end = skb->data + skb->len; 555 556 while (pos + 2 < end) { 557 id = *pos++; 558 elen = *pos++; 559 if (pos + elen > end) 560 break; 561 562 if (id == WLAN_EID_TIM) { 563 if (elen < sizeof(*tim)) 564 break; 565 tim = (struct ieee80211_tim_ie *) pos; 566 if (tim->dtim_count != 0) 567 break; 568 return tim->bitmap_ctrl & 0x01; 569 } 570 571 pos += elen; 572 } 573 574 return false; 575 } 576 577 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 578 { 579 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 580 581 if (skb->len < 24 + 8 + 2 + 2) 582 return; 583 584 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 585 586 if (sc->ps_flags & PS_BEACON_SYNC) { 587 sc->ps_flags &= ~PS_BEACON_SYNC; 588 ath_dbg(common, PS, 589 "Reconfigure Beacon timers based on timestamp from the AP\n"); 590 ath_set_beacon(sc); 591 } 592 593 if (ath_beacon_dtim_pending_cab(skb)) { 594 /* 595 * Remain awake waiting for buffered broadcast/multicast 596 * frames. If the last broadcast/multicast frame is not 597 * received properly, the next beacon frame will work as 598 * a backup trigger for returning into NETWORK SLEEP state, 599 * so we are waiting for it as well. 600 */ 601 ath_dbg(common, PS, 602 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 603 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 604 return; 605 } 606 607 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 608 /* 609 * This can happen if a broadcast frame is dropped or the AP 610 * fails to send a frame indicating that all CAB frames have 611 * been delivered. 612 */ 613 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 614 ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 615 } 616 } 617 618 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 619 { 620 struct ieee80211_hdr *hdr; 621 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 622 623 hdr = (struct ieee80211_hdr *)skb->data; 624 625 /* Process Beacon and CAB receive in PS state */ 626 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 627 && mybeacon) 628 ath_rx_ps_beacon(sc, skb); 629 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 630 (ieee80211_is_data(hdr->frame_control) || 631 ieee80211_is_action(hdr->frame_control)) && 632 is_multicast_ether_addr(hdr->addr1) && 633 !ieee80211_has_moredata(hdr->frame_control)) { 634 /* 635 * No more broadcast/multicast frames to be received at this 636 * point. 637 */ 638 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 639 ath_dbg(common, PS, 640 "All PS CAB frames received, back to sleep\n"); 641 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 642 !is_multicast_ether_addr(hdr->addr1) && 643 !ieee80211_has_morefrags(hdr->frame_control)) { 644 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 645 ath_dbg(common, PS, 646 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 647 sc->ps_flags & (PS_WAIT_FOR_BEACON | 648 PS_WAIT_FOR_CAB | 649 PS_WAIT_FOR_PSPOLL_DATA | 650 PS_WAIT_FOR_TX_ACK)); 651 } 652 } 653 654 static bool ath_edma_get_buffers(struct ath_softc *sc, 655 enum ath9k_rx_qtype qtype, 656 struct ath_rx_status *rs, 657 struct ath_buf **dest) 658 { 659 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 660 struct ath_hw *ah = sc->sc_ah; 661 struct ath_common *common = ath9k_hw_common(ah); 662 struct sk_buff *skb; 663 struct ath_buf *bf; 664 int ret; 665 666 skb = skb_peek(&rx_edma->rx_fifo); 667 if (!skb) 668 return false; 669 670 bf = SKB_CB_ATHBUF(skb); 671 BUG_ON(!bf); 672 673 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 674 common->rx_bufsize, DMA_FROM_DEVICE); 675 676 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 677 if (ret == -EINPROGRESS) { 678 /*let device gain the buffer again*/ 679 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 680 common->rx_bufsize, DMA_FROM_DEVICE); 681 return false; 682 } 683 684 __skb_unlink(skb, &rx_edma->rx_fifo); 685 if (ret == -EINVAL) { 686 /* corrupt descriptor, skip this one and the following one */ 687 list_add_tail(&bf->list, &sc->rx.rxbuf); 688 ath_rx_edma_buf_link(sc, qtype); 689 690 skb = skb_peek(&rx_edma->rx_fifo); 691 if (skb) { 692 bf = SKB_CB_ATHBUF(skb); 693 BUG_ON(!bf); 694 695 __skb_unlink(skb, &rx_edma->rx_fifo); 696 list_add_tail(&bf->list, &sc->rx.rxbuf); 697 ath_rx_edma_buf_link(sc, qtype); 698 } else { 699 bf = NULL; 700 } 701 } 702 703 *dest = bf; 704 return true; 705 } 706 707 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 708 struct ath_rx_status *rs, 709 enum ath9k_rx_qtype qtype) 710 { 711 struct ath_buf *bf = NULL; 712 713 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 714 if (!bf) 715 continue; 716 717 return bf; 718 } 719 return NULL; 720 } 721 722 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 723 struct ath_rx_status *rs) 724 { 725 struct ath_hw *ah = sc->sc_ah; 726 struct ath_common *common = ath9k_hw_common(ah); 727 struct ath_desc *ds; 728 struct ath_buf *bf; 729 int ret; 730 731 if (list_empty(&sc->rx.rxbuf)) { 732 sc->rx.rxlink = NULL; 733 return NULL; 734 } 735 736 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 737 ds = bf->bf_desc; 738 739 /* 740 * Must provide the virtual address of the current 741 * descriptor, the physical address, and the virtual 742 * address of the next descriptor in the h/w chain. 743 * This allows the HAL to look ahead to see if the 744 * hardware is done with a descriptor by checking the 745 * done bit in the following descriptor and the address 746 * of the current descriptor the DMA engine is working 747 * on. All this is necessary because of our use of 748 * a self-linked list to avoid rx overruns. 749 */ 750 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 751 if (ret == -EINPROGRESS) { 752 struct ath_rx_status trs; 753 struct ath_buf *tbf; 754 struct ath_desc *tds; 755 756 memset(&trs, 0, sizeof(trs)); 757 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 758 sc->rx.rxlink = NULL; 759 return NULL; 760 } 761 762 tbf = list_entry(bf->list.next, struct ath_buf, list); 763 764 /* 765 * On some hardware the descriptor status words could 766 * get corrupted, including the done bit. Because of 767 * this, check if the next descriptor's done bit is 768 * set or not. 769 * 770 * If the next descriptor's done bit is set, the current 771 * descriptor has been corrupted. Force s/w to discard 772 * this descriptor and continue... 773 */ 774 775 tds = tbf->bf_desc; 776 ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 777 if (ret == -EINPROGRESS) 778 return NULL; 779 } 780 781 if (!bf->bf_mpdu) 782 return bf; 783 784 /* 785 * Synchronize the DMA transfer with CPU before 786 * 1. accessing the frame 787 * 2. requeueing the same buffer to h/w 788 */ 789 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 790 common->rx_bufsize, 791 DMA_FROM_DEVICE); 792 793 return bf; 794 } 795 796 /* Assumes you've already done the endian to CPU conversion */ 797 static bool ath9k_rx_accept(struct ath_common *common, 798 struct ieee80211_hdr *hdr, 799 struct ieee80211_rx_status *rxs, 800 struct ath_rx_status *rx_stats, 801 bool *decrypt_error) 802 { 803 struct ath_softc *sc = (struct ath_softc *) common->priv; 804 bool is_mc, is_valid_tkip, strip_mic, mic_error; 805 struct ath_hw *ah = common->ah; 806 __le16 fc; 807 u8 rx_status_len = ah->caps.rx_status_len; 808 809 fc = hdr->frame_control; 810 811 is_mc = !!is_multicast_ether_addr(hdr->addr1); 812 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 813 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 814 strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 815 !(rx_stats->rs_status & 816 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 817 ATH9K_RXERR_KEYMISS)); 818 819 /* 820 * Key miss events are only relevant for pairwise keys where the 821 * descriptor does contain a valid key index. This has been observed 822 * mostly with CCMP encryption. 823 */ 824 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID) 825 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 826 827 if (!rx_stats->rs_datalen) 828 return false; 829 /* 830 * rs_status follows rs_datalen so if rs_datalen is too large 831 * we can take a hint that hardware corrupted it, so ignore 832 * those frames. 833 */ 834 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 835 return false; 836 837 /* Only use error bits from the last fragment */ 838 if (rx_stats->rs_more) 839 return true; 840 841 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 842 !ieee80211_has_morefrags(fc) && 843 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 844 (rx_stats->rs_status & ATH9K_RXERR_MIC); 845 846 /* 847 * The rx_stats->rs_status will not be set until the end of the 848 * chained descriptors so it can be ignored if rs_more is set. The 849 * rs_more will be false at the last element of the chained 850 * descriptors. 851 */ 852 if (rx_stats->rs_status != 0) { 853 u8 status_mask; 854 855 if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 856 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 857 mic_error = false; 858 } 859 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 860 return false; 861 862 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 863 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 864 *decrypt_error = true; 865 mic_error = false; 866 } 867 868 /* 869 * Reject error frames with the exception of 870 * decryption and MIC failures. For monitor mode, 871 * we also ignore the CRC error. 872 */ 873 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 874 ATH9K_RXERR_KEYMISS; 875 876 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 877 status_mask |= ATH9K_RXERR_CRC; 878 879 if (rx_stats->rs_status & ~status_mask) 880 return false; 881 } 882 883 /* 884 * For unicast frames the MIC error bit can have false positives, 885 * so all MIC error reports need to be validated in software. 886 * False negatives are not common, so skip software verification 887 * if the hardware considers the MIC valid. 888 */ 889 if (strip_mic) 890 rxs->flag |= RX_FLAG_MMIC_STRIPPED; 891 else if (is_mc && mic_error) 892 rxs->flag |= RX_FLAG_MMIC_ERROR; 893 894 return true; 895 } 896 897 static int ath9k_process_rate(struct ath_common *common, 898 struct ieee80211_hw *hw, 899 struct ath_rx_status *rx_stats, 900 struct ieee80211_rx_status *rxs) 901 { 902 struct ieee80211_supported_band *sband; 903 enum ieee80211_band band; 904 unsigned int i = 0; 905 906 band = hw->conf.channel->band; 907 sband = hw->wiphy->bands[band]; 908 909 if (rx_stats->rs_rate & 0x80) { 910 /* HT rate */ 911 rxs->flag |= RX_FLAG_HT; 912 if (rx_stats->rs_flags & ATH9K_RX_2040) 913 rxs->flag |= RX_FLAG_40MHZ; 914 if (rx_stats->rs_flags & ATH9K_RX_GI) 915 rxs->flag |= RX_FLAG_SHORT_GI; 916 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 917 return 0; 918 } 919 920 for (i = 0; i < sband->n_bitrates; i++) { 921 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 922 rxs->rate_idx = i; 923 return 0; 924 } 925 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 926 rxs->flag |= RX_FLAG_SHORTPRE; 927 rxs->rate_idx = i; 928 return 0; 929 } 930 } 931 932 /* 933 * No valid hardware bitrate found -- we should not get here 934 * because hardware has already validated this frame as OK. 935 */ 936 ath_dbg(common, ANY, 937 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 938 rx_stats->rs_rate); 939 940 return -EINVAL; 941 } 942 943 static void ath9k_process_rssi(struct ath_common *common, 944 struct ieee80211_hw *hw, 945 struct ieee80211_hdr *hdr, 946 struct ath_rx_status *rx_stats) 947 { 948 struct ath_softc *sc = hw->priv; 949 struct ath_hw *ah = common->ah; 950 int last_rssi; 951 int rssi = rx_stats->rs_rssi; 952 953 if (!rx_stats->is_mybeacon || 954 ((ah->opmode != NL80211_IFTYPE_STATION) && 955 (ah->opmode != NL80211_IFTYPE_ADHOC))) 956 return; 957 958 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 959 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 960 961 last_rssi = sc->last_rssi; 962 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 963 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 964 if (rssi < 0) 965 rssi = 0; 966 967 /* Update Beacon RSSI, this is used by ANI. */ 968 ah->stats.avgbrssi = rssi; 969 } 970 971 /* 972 * For Decrypt or Demic errors, we only mark packet status here and always push 973 * up the frame up to let mac80211 handle the actual error case, be it no 974 * decryption key or real decryption error. This let us keep statistics there. 975 */ 976 static int ath9k_rx_skb_preprocess(struct ath_common *common, 977 struct ieee80211_hw *hw, 978 struct ieee80211_hdr *hdr, 979 struct ath_rx_status *rx_stats, 980 struct ieee80211_rx_status *rx_status, 981 bool *decrypt_error) 982 { 983 struct ath_hw *ah = common->ah; 984 985 /* 986 * everything but the rate is checked here, the rate check is done 987 * separately to avoid doing two lookups for a rate for each frame. 988 */ 989 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 990 return -EINVAL; 991 992 /* Only use status info from the last fragment */ 993 if (rx_stats->rs_more) 994 return 0; 995 996 ath9k_process_rssi(common, hw, hdr, rx_stats); 997 998 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 999 return -EINVAL; 1000 1001 rx_status->band = hw->conf.channel->band; 1002 rx_status->freq = hw->conf.channel->center_freq; 1003 rx_status->signal = ah->noise + rx_stats->rs_rssi; 1004 rx_status->antenna = rx_stats->rs_antenna; 1005 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 1006 if (rx_stats->rs_moreaggr) 1007 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1008 1009 return 0; 1010 } 1011 1012 static void ath9k_rx_skb_postprocess(struct ath_common *common, 1013 struct sk_buff *skb, 1014 struct ath_rx_status *rx_stats, 1015 struct ieee80211_rx_status *rxs, 1016 bool decrypt_error) 1017 { 1018 struct ath_hw *ah = common->ah; 1019 struct ieee80211_hdr *hdr; 1020 int hdrlen, padpos, padsize; 1021 u8 keyix; 1022 __le16 fc; 1023 1024 /* see if any padding is done by the hw and remove it */ 1025 hdr = (struct ieee80211_hdr *) skb->data; 1026 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1027 fc = hdr->frame_control; 1028 padpos = ath9k_cmn_padpos(hdr->frame_control); 1029 1030 /* The MAC header is padded to have 32-bit boundary if the 1031 * packet payload is non-zero. The general calculation for 1032 * padsize would take into account odd header lengths: 1033 * padsize = (4 - padpos % 4) % 4; However, since only 1034 * even-length headers are used, padding can only be 0 or 2 1035 * bytes and we can optimize this a bit. In addition, we must 1036 * not try to remove padding from short control frames that do 1037 * not have payload. */ 1038 padsize = padpos & 3; 1039 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1040 memmove(skb->data + padsize, skb->data, padpos); 1041 skb_pull(skb, padsize); 1042 } 1043 1044 keyix = rx_stats->rs_keyix; 1045 1046 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1047 ieee80211_has_protected(fc)) { 1048 rxs->flag |= RX_FLAG_DECRYPTED; 1049 } else if (ieee80211_has_protected(fc) 1050 && !decrypt_error && skb->len >= hdrlen + 4) { 1051 keyix = skb->data[hdrlen + 3] >> 6; 1052 1053 if (test_bit(keyix, common->keymap)) 1054 rxs->flag |= RX_FLAG_DECRYPTED; 1055 } 1056 if (ah->sw_mgmt_crypto && 1057 (rxs->flag & RX_FLAG_DECRYPTED) && 1058 ieee80211_is_mgmt(fc)) 1059 /* Use software decrypt for management frames. */ 1060 rxs->flag &= ~RX_FLAG_DECRYPTED; 1061 } 1062 1063 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, 1064 struct ath_hw_antcomb_conf ant_conf, 1065 int main_rssi_avg) 1066 { 1067 antcomb->quick_scan_cnt = 0; 1068 1069 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) 1070 antcomb->rssi_lna2 = main_rssi_avg; 1071 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) 1072 antcomb->rssi_lna1 = main_rssi_avg; 1073 1074 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { 1075 case 0x10: /* LNA2 A-B */ 1076 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1077 antcomb->first_quick_scan_conf = 1078 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1079 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1080 break; 1081 case 0x20: /* LNA1 A-B */ 1082 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1083 antcomb->first_quick_scan_conf = 1084 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1085 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1086 break; 1087 case 0x21: /* LNA1 LNA2 */ 1088 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; 1089 antcomb->first_quick_scan_conf = 1090 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1091 antcomb->second_quick_scan_conf = 1092 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1093 break; 1094 case 0x12: /* LNA2 LNA1 */ 1095 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; 1096 antcomb->first_quick_scan_conf = 1097 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1098 antcomb->second_quick_scan_conf = 1099 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1100 break; 1101 case 0x13: /* LNA2 A+B */ 1102 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1103 antcomb->first_quick_scan_conf = 1104 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1105 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1106 break; 1107 case 0x23: /* LNA1 A+B */ 1108 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1109 antcomb->first_quick_scan_conf = 1110 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1111 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1112 break; 1113 default: 1114 break; 1115 } 1116 } 1117 1118 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 1119 struct ath_hw_antcomb_conf *div_ant_conf, 1120 int main_rssi_avg, int alt_rssi_avg, 1121 int alt_ratio) 1122 { 1123 /* alt_good */ 1124 switch (antcomb->quick_scan_cnt) { 1125 case 0: 1126 /* set alt to main, and alt to first conf */ 1127 div_ant_conf->main_lna_conf = antcomb->main_conf; 1128 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; 1129 break; 1130 case 1: 1131 /* set alt to main, and alt to first conf */ 1132 div_ant_conf->main_lna_conf = antcomb->main_conf; 1133 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; 1134 antcomb->rssi_first = main_rssi_avg; 1135 antcomb->rssi_second = alt_rssi_avg; 1136 1137 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1138 /* main is LNA1 */ 1139 if (ath_is_alt_ant_ratio_better(alt_ratio, 1140 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1141 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1142 main_rssi_avg, alt_rssi_avg, 1143 antcomb->total_pkt_count)) 1144 antcomb->first_ratio = true; 1145 else 1146 antcomb->first_ratio = false; 1147 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1148 if (ath_is_alt_ant_ratio_better(alt_ratio, 1149 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1150 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1151 main_rssi_avg, alt_rssi_avg, 1152 antcomb->total_pkt_count)) 1153 antcomb->first_ratio = true; 1154 else 1155 antcomb->first_ratio = false; 1156 } else { 1157 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1158 (alt_rssi_avg > main_rssi_avg + 1159 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1160 (alt_rssi_avg > main_rssi_avg)) && 1161 (antcomb->total_pkt_count > 50)) 1162 antcomb->first_ratio = true; 1163 else 1164 antcomb->first_ratio = false; 1165 } 1166 break; 1167 case 2: 1168 antcomb->alt_good = false; 1169 antcomb->scan_not_start = false; 1170 antcomb->scan = false; 1171 antcomb->rssi_first = main_rssi_avg; 1172 antcomb->rssi_third = alt_rssi_avg; 1173 1174 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 1175 antcomb->rssi_lna1 = alt_rssi_avg; 1176 else if (antcomb->second_quick_scan_conf == 1177 ATH_ANT_DIV_COMB_LNA2) 1178 antcomb->rssi_lna2 = alt_rssi_avg; 1179 else if (antcomb->second_quick_scan_conf == 1180 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 1181 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 1182 antcomb->rssi_lna2 = main_rssi_avg; 1183 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 1184 antcomb->rssi_lna1 = main_rssi_avg; 1185 } 1186 1187 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 1188 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 1189 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1190 else 1191 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 1192 1193 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1194 if (ath_is_alt_ant_ratio_better(alt_ratio, 1195 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1196 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1197 main_rssi_avg, alt_rssi_avg, 1198 antcomb->total_pkt_count)) 1199 antcomb->second_ratio = true; 1200 else 1201 antcomb->second_ratio = false; 1202 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1203 if (ath_is_alt_ant_ratio_better(alt_ratio, 1204 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1205 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1206 main_rssi_avg, alt_rssi_avg, 1207 antcomb->total_pkt_count)) 1208 antcomb->second_ratio = true; 1209 else 1210 antcomb->second_ratio = false; 1211 } else { 1212 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1213 (alt_rssi_avg > main_rssi_avg + 1214 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1215 (alt_rssi_avg > main_rssi_avg)) && 1216 (antcomb->total_pkt_count > 50)) 1217 antcomb->second_ratio = true; 1218 else 1219 antcomb->second_ratio = false; 1220 } 1221 1222 /* set alt to the conf with maximun ratio */ 1223 if (antcomb->first_ratio && antcomb->second_ratio) { 1224 if (antcomb->rssi_second > antcomb->rssi_third) { 1225 /* first alt*/ 1226 if ((antcomb->first_quick_scan_conf == 1227 ATH_ANT_DIV_COMB_LNA1) || 1228 (antcomb->first_quick_scan_conf == 1229 ATH_ANT_DIV_COMB_LNA2)) 1230 /* Set alt LNA1 or LNA2*/ 1231 if (div_ant_conf->main_lna_conf == 1232 ATH_ANT_DIV_COMB_LNA2) 1233 div_ant_conf->alt_lna_conf = 1234 ATH_ANT_DIV_COMB_LNA1; 1235 else 1236 div_ant_conf->alt_lna_conf = 1237 ATH_ANT_DIV_COMB_LNA2; 1238 else 1239 /* Set alt to A+B or A-B */ 1240 div_ant_conf->alt_lna_conf = 1241 antcomb->first_quick_scan_conf; 1242 } else if ((antcomb->second_quick_scan_conf == 1243 ATH_ANT_DIV_COMB_LNA1) || 1244 (antcomb->second_quick_scan_conf == 1245 ATH_ANT_DIV_COMB_LNA2)) { 1246 /* Set alt LNA1 or LNA2 */ 1247 if (div_ant_conf->main_lna_conf == 1248 ATH_ANT_DIV_COMB_LNA2) 1249 div_ant_conf->alt_lna_conf = 1250 ATH_ANT_DIV_COMB_LNA1; 1251 else 1252 div_ant_conf->alt_lna_conf = 1253 ATH_ANT_DIV_COMB_LNA2; 1254 } else { 1255 /* Set alt to A+B or A-B */ 1256 div_ant_conf->alt_lna_conf = 1257 antcomb->second_quick_scan_conf; 1258 } 1259 } else if (antcomb->first_ratio) { 1260 /* first alt */ 1261 if ((antcomb->first_quick_scan_conf == 1262 ATH_ANT_DIV_COMB_LNA1) || 1263 (antcomb->first_quick_scan_conf == 1264 ATH_ANT_DIV_COMB_LNA2)) 1265 /* Set alt LNA1 or LNA2 */ 1266 if (div_ant_conf->main_lna_conf == 1267 ATH_ANT_DIV_COMB_LNA2) 1268 div_ant_conf->alt_lna_conf = 1269 ATH_ANT_DIV_COMB_LNA1; 1270 else 1271 div_ant_conf->alt_lna_conf = 1272 ATH_ANT_DIV_COMB_LNA2; 1273 else 1274 /* Set alt to A+B or A-B */ 1275 div_ant_conf->alt_lna_conf = 1276 antcomb->first_quick_scan_conf; 1277 } else if (antcomb->second_ratio) { 1278 /* second alt */ 1279 if ((antcomb->second_quick_scan_conf == 1280 ATH_ANT_DIV_COMB_LNA1) || 1281 (antcomb->second_quick_scan_conf == 1282 ATH_ANT_DIV_COMB_LNA2)) 1283 /* Set alt LNA1 or LNA2 */ 1284 if (div_ant_conf->main_lna_conf == 1285 ATH_ANT_DIV_COMB_LNA2) 1286 div_ant_conf->alt_lna_conf = 1287 ATH_ANT_DIV_COMB_LNA1; 1288 else 1289 div_ant_conf->alt_lna_conf = 1290 ATH_ANT_DIV_COMB_LNA2; 1291 else 1292 /* Set alt to A+B or A-B */ 1293 div_ant_conf->alt_lna_conf = 1294 antcomb->second_quick_scan_conf; 1295 } else { 1296 /* main is largest */ 1297 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || 1298 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) 1299 /* Set alt LNA1 or LNA2 */ 1300 if (div_ant_conf->main_lna_conf == 1301 ATH_ANT_DIV_COMB_LNA2) 1302 div_ant_conf->alt_lna_conf = 1303 ATH_ANT_DIV_COMB_LNA1; 1304 else 1305 div_ant_conf->alt_lna_conf = 1306 ATH_ANT_DIV_COMB_LNA2; 1307 else 1308 /* Set alt to A+B or A-B */ 1309 div_ant_conf->alt_lna_conf = antcomb->main_conf; 1310 } 1311 break; 1312 default: 1313 break; 1314 } 1315 } 1316 1317 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, 1318 struct ath_ant_comb *antcomb, int alt_ratio) 1319 { 1320 if (ant_conf->div_group == 0) { 1321 /* Adjust the fast_div_bias based on main and alt lna conf */ 1322 switch ((ant_conf->main_lna_conf << 4) | 1323 ant_conf->alt_lna_conf) { 1324 case 0x01: /* A-B LNA2 */ 1325 ant_conf->fast_div_bias = 0x3b; 1326 break; 1327 case 0x02: /* A-B LNA1 */ 1328 ant_conf->fast_div_bias = 0x3d; 1329 break; 1330 case 0x03: /* A-B A+B */ 1331 ant_conf->fast_div_bias = 0x1; 1332 break; 1333 case 0x10: /* LNA2 A-B */ 1334 ant_conf->fast_div_bias = 0x7; 1335 break; 1336 case 0x12: /* LNA2 LNA1 */ 1337 ant_conf->fast_div_bias = 0x2; 1338 break; 1339 case 0x13: /* LNA2 A+B */ 1340 ant_conf->fast_div_bias = 0x7; 1341 break; 1342 case 0x20: /* LNA1 A-B */ 1343 ant_conf->fast_div_bias = 0x6; 1344 break; 1345 case 0x21: /* LNA1 LNA2 */ 1346 ant_conf->fast_div_bias = 0x0; 1347 break; 1348 case 0x23: /* LNA1 A+B */ 1349 ant_conf->fast_div_bias = 0x6; 1350 break; 1351 case 0x30: /* A+B A-B */ 1352 ant_conf->fast_div_bias = 0x1; 1353 break; 1354 case 0x31: /* A+B LNA2 */ 1355 ant_conf->fast_div_bias = 0x3b; 1356 break; 1357 case 0x32: /* A+B LNA1 */ 1358 ant_conf->fast_div_bias = 0x3d; 1359 break; 1360 default: 1361 break; 1362 } 1363 } else if (ant_conf->div_group == 1) { 1364 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1365 switch ((ant_conf->main_lna_conf << 4) | 1366 ant_conf->alt_lna_conf) { 1367 case 0x01: /* A-B LNA2 */ 1368 ant_conf->fast_div_bias = 0x1; 1369 ant_conf->main_gaintb = 0; 1370 ant_conf->alt_gaintb = 0; 1371 break; 1372 case 0x02: /* A-B LNA1 */ 1373 ant_conf->fast_div_bias = 0x1; 1374 ant_conf->main_gaintb = 0; 1375 ant_conf->alt_gaintb = 0; 1376 break; 1377 case 0x03: /* A-B A+B */ 1378 ant_conf->fast_div_bias = 0x1; 1379 ant_conf->main_gaintb = 0; 1380 ant_conf->alt_gaintb = 0; 1381 break; 1382 case 0x10: /* LNA2 A-B */ 1383 if (!(antcomb->scan) && 1384 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1385 ant_conf->fast_div_bias = 0x3f; 1386 else 1387 ant_conf->fast_div_bias = 0x1; 1388 ant_conf->main_gaintb = 0; 1389 ant_conf->alt_gaintb = 0; 1390 break; 1391 case 0x12: /* LNA2 LNA1 */ 1392 ant_conf->fast_div_bias = 0x1; 1393 ant_conf->main_gaintb = 0; 1394 ant_conf->alt_gaintb = 0; 1395 break; 1396 case 0x13: /* LNA2 A+B */ 1397 if (!(antcomb->scan) && 1398 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1399 ant_conf->fast_div_bias = 0x3f; 1400 else 1401 ant_conf->fast_div_bias = 0x1; 1402 ant_conf->main_gaintb = 0; 1403 ant_conf->alt_gaintb = 0; 1404 break; 1405 case 0x20: /* LNA1 A-B */ 1406 if (!(antcomb->scan) && 1407 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1408 ant_conf->fast_div_bias = 0x3f; 1409 else 1410 ant_conf->fast_div_bias = 0x1; 1411 ant_conf->main_gaintb = 0; 1412 ant_conf->alt_gaintb = 0; 1413 break; 1414 case 0x21: /* LNA1 LNA2 */ 1415 ant_conf->fast_div_bias = 0x1; 1416 ant_conf->main_gaintb = 0; 1417 ant_conf->alt_gaintb = 0; 1418 break; 1419 case 0x23: /* LNA1 A+B */ 1420 if (!(antcomb->scan) && 1421 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1422 ant_conf->fast_div_bias = 0x3f; 1423 else 1424 ant_conf->fast_div_bias = 0x1; 1425 ant_conf->main_gaintb = 0; 1426 ant_conf->alt_gaintb = 0; 1427 break; 1428 case 0x30: /* A+B A-B */ 1429 ant_conf->fast_div_bias = 0x1; 1430 ant_conf->main_gaintb = 0; 1431 ant_conf->alt_gaintb = 0; 1432 break; 1433 case 0x31: /* A+B LNA2 */ 1434 ant_conf->fast_div_bias = 0x1; 1435 ant_conf->main_gaintb = 0; 1436 ant_conf->alt_gaintb = 0; 1437 break; 1438 case 0x32: /* A+B LNA1 */ 1439 ant_conf->fast_div_bias = 0x1; 1440 ant_conf->main_gaintb = 0; 1441 ant_conf->alt_gaintb = 0; 1442 break; 1443 default: 1444 break; 1445 } 1446 } else if (ant_conf->div_group == 2) { 1447 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1448 switch ((ant_conf->main_lna_conf << 4) | 1449 ant_conf->alt_lna_conf) { 1450 case 0x01: /* A-B LNA2 */ 1451 ant_conf->fast_div_bias = 0x1; 1452 ant_conf->main_gaintb = 0; 1453 ant_conf->alt_gaintb = 0; 1454 break; 1455 case 0x02: /* A-B LNA1 */ 1456 ant_conf->fast_div_bias = 0x1; 1457 ant_conf->main_gaintb = 0; 1458 ant_conf->alt_gaintb = 0; 1459 break; 1460 case 0x03: /* A-B A+B */ 1461 ant_conf->fast_div_bias = 0x1; 1462 ant_conf->main_gaintb = 0; 1463 ant_conf->alt_gaintb = 0; 1464 break; 1465 case 0x10: /* LNA2 A-B */ 1466 if (!(antcomb->scan) && 1467 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1468 ant_conf->fast_div_bias = 0x1; 1469 else 1470 ant_conf->fast_div_bias = 0x2; 1471 ant_conf->main_gaintb = 0; 1472 ant_conf->alt_gaintb = 0; 1473 break; 1474 case 0x12: /* LNA2 LNA1 */ 1475 ant_conf->fast_div_bias = 0x1; 1476 ant_conf->main_gaintb = 0; 1477 ant_conf->alt_gaintb = 0; 1478 break; 1479 case 0x13: /* LNA2 A+B */ 1480 if (!(antcomb->scan) && 1481 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1482 ant_conf->fast_div_bias = 0x1; 1483 else 1484 ant_conf->fast_div_bias = 0x2; 1485 ant_conf->main_gaintb = 0; 1486 ant_conf->alt_gaintb = 0; 1487 break; 1488 case 0x20: /* LNA1 A-B */ 1489 if (!(antcomb->scan) && 1490 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1491 ant_conf->fast_div_bias = 0x1; 1492 else 1493 ant_conf->fast_div_bias = 0x2; 1494 ant_conf->main_gaintb = 0; 1495 ant_conf->alt_gaintb = 0; 1496 break; 1497 case 0x21: /* LNA1 LNA2 */ 1498 ant_conf->fast_div_bias = 0x1; 1499 ant_conf->main_gaintb = 0; 1500 ant_conf->alt_gaintb = 0; 1501 break; 1502 case 0x23: /* LNA1 A+B */ 1503 if (!(antcomb->scan) && 1504 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1505 ant_conf->fast_div_bias = 0x1; 1506 else 1507 ant_conf->fast_div_bias = 0x2; 1508 ant_conf->main_gaintb = 0; 1509 ant_conf->alt_gaintb = 0; 1510 break; 1511 case 0x30: /* A+B A-B */ 1512 ant_conf->fast_div_bias = 0x1; 1513 ant_conf->main_gaintb = 0; 1514 ant_conf->alt_gaintb = 0; 1515 break; 1516 case 0x31: /* A+B LNA2 */ 1517 ant_conf->fast_div_bias = 0x1; 1518 ant_conf->main_gaintb = 0; 1519 ant_conf->alt_gaintb = 0; 1520 break; 1521 case 0x32: /* A+B LNA1 */ 1522 ant_conf->fast_div_bias = 0x1; 1523 ant_conf->main_gaintb = 0; 1524 ant_conf->alt_gaintb = 0; 1525 break; 1526 default: 1527 break; 1528 } 1529 } 1530 } 1531 1532 /* Antenna diversity and combining */ 1533 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 1534 { 1535 struct ath_hw_antcomb_conf div_ant_conf; 1536 struct ath_ant_comb *antcomb = &sc->ant_comb; 1537 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1538 int curr_main_set; 1539 int main_rssi = rs->rs_rssi_ctl0; 1540 int alt_rssi = rs->rs_rssi_ctl1; 1541 int rx_ant_conf, main_ant_conf; 1542 bool short_scan = false; 1543 1544 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 1545 ATH_ANT_RX_MASK; 1546 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1547 ATH_ANT_RX_MASK; 1548 1549 /* Record packet only when both main_rssi and alt_rssi is positive */ 1550 if (main_rssi > 0 && alt_rssi > 0) { 1551 antcomb->total_pkt_count++; 1552 antcomb->main_total_rssi += main_rssi; 1553 antcomb->alt_total_rssi += alt_rssi; 1554 if (main_ant_conf == rx_ant_conf) 1555 antcomb->main_recv_cnt++; 1556 else 1557 antcomb->alt_recv_cnt++; 1558 } 1559 1560 /* Short scan check */ 1561 if (antcomb->scan && antcomb->alt_good) { 1562 if (time_after(jiffies, antcomb->scan_start_time + 1563 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 1564 short_scan = true; 1565 else 1566 if (antcomb->total_pkt_count == 1567 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { 1568 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1569 antcomb->total_pkt_count); 1570 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 1571 short_scan = true; 1572 } 1573 } 1574 1575 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 1576 rs->rs_moreaggr) && !short_scan) 1577 return; 1578 1579 if (antcomb->total_pkt_count) { 1580 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1581 antcomb->total_pkt_count); 1582 main_rssi_avg = (antcomb->main_total_rssi / 1583 antcomb->total_pkt_count); 1584 alt_rssi_avg = (antcomb->alt_total_rssi / 1585 antcomb->total_pkt_count); 1586 } 1587 1588 1589 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1590 curr_alt_set = div_ant_conf.alt_lna_conf; 1591 curr_main_set = div_ant_conf.main_lna_conf; 1592 1593 antcomb->count++; 1594 1595 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 1596 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1597 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 1598 main_rssi_avg); 1599 antcomb->alt_good = true; 1600 } else { 1601 antcomb->alt_good = false; 1602 } 1603 1604 antcomb->count = 0; 1605 antcomb->scan = true; 1606 antcomb->scan_not_start = true; 1607 } 1608 1609 if (!antcomb->scan) { 1610 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, 1611 alt_ratio, curr_main_set, curr_alt_set, 1612 alt_rssi_avg, main_rssi_avg)) { 1613 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1614 /* Switch main and alt LNA */ 1615 div_ant_conf.main_lna_conf = 1616 ATH_ANT_DIV_COMB_LNA2; 1617 div_ant_conf.alt_lna_conf = 1618 ATH_ANT_DIV_COMB_LNA1; 1619 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { 1620 div_ant_conf.main_lna_conf = 1621 ATH_ANT_DIV_COMB_LNA1; 1622 div_ant_conf.alt_lna_conf = 1623 ATH_ANT_DIV_COMB_LNA2; 1624 } 1625 1626 goto div_comb_done; 1627 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && 1628 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { 1629 /* Set alt to another LNA */ 1630 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) 1631 div_ant_conf.alt_lna_conf = 1632 ATH_ANT_DIV_COMB_LNA1; 1633 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) 1634 div_ant_conf.alt_lna_conf = 1635 ATH_ANT_DIV_COMB_LNA2; 1636 1637 goto div_comb_done; 1638 } 1639 1640 if ((alt_rssi_avg < (main_rssi_avg + 1641 div_ant_conf.lna1_lna2_delta))) 1642 goto div_comb_done; 1643 } 1644 1645 if (!antcomb->scan_not_start) { 1646 switch (curr_alt_set) { 1647 case ATH_ANT_DIV_COMB_LNA2: 1648 antcomb->rssi_lna2 = alt_rssi_avg; 1649 antcomb->rssi_lna1 = main_rssi_avg; 1650 antcomb->scan = true; 1651 /* set to A+B */ 1652 div_ant_conf.main_lna_conf = 1653 ATH_ANT_DIV_COMB_LNA1; 1654 div_ant_conf.alt_lna_conf = 1655 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1656 break; 1657 case ATH_ANT_DIV_COMB_LNA1: 1658 antcomb->rssi_lna1 = alt_rssi_avg; 1659 antcomb->rssi_lna2 = main_rssi_avg; 1660 antcomb->scan = true; 1661 /* set to A+B */ 1662 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1663 div_ant_conf.alt_lna_conf = 1664 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1665 break; 1666 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: 1667 antcomb->rssi_add = alt_rssi_avg; 1668 antcomb->scan = true; 1669 /* set to A-B */ 1670 div_ant_conf.alt_lna_conf = 1671 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1672 break; 1673 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: 1674 antcomb->rssi_sub = alt_rssi_avg; 1675 antcomb->scan = false; 1676 if (antcomb->rssi_lna2 > 1677 (antcomb->rssi_lna1 + 1678 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 1679 /* use LNA2 as main LNA */ 1680 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 1681 (antcomb->rssi_add > antcomb->rssi_sub)) { 1682 /* set to A+B */ 1683 div_ant_conf.main_lna_conf = 1684 ATH_ANT_DIV_COMB_LNA2; 1685 div_ant_conf.alt_lna_conf = 1686 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1687 } else if (antcomb->rssi_sub > 1688 antcomb->rssi_lna1) { 1689 /* set to A-B */ 1690 div_ant_conf.main_lna_conf = 1691 ATH_ANT_DIV_COMB_LNA2; 1692 div_ant_conf.alt_lna_conf = 1693 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1694 } else { 1695 /* set to LNA1 */ 1696 div_ant_conf.main_lna_conf = 1697 ATH_ANT_DIV_COMB_LNA2; 1698 div_ant_conf.alt_lna_conf = 1699 ATH_ANT_DIV_COMB_LNA1; 1700 } 1701 } else { 1702 /* use LNA1 as main LNA */ 1703 if ((antcomb->rssi_add > antcomb->rssi_lna2) && 1704 (antcomb->rssi_add > antcomb->rssi_sub)) { 1705 /* set to A+B */ 1706 div_ant_conf.main_lna_conf = 1707 ATH_ANT_DIV_COMB_LNA1; 1708 div_ant_conf.alt_lna_conf = 1709 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1710 } else if (antcomb->rssi_sub > 1711 antcomb->rssi_lna1) { 1712 /* set to A-B */ 1713 div_ant_conf.main_lna_conf = 1714 ATH_ANT_DIV_COMB_LNA1; 1715 div_ant_conf.alt_lna_conf = 1716 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1717 } else { 1718 /* set to LNA2 */ 1719 div_ant_conf.main_lna_conf = 1720 ATH_ANT_DIV_COMB_LNA1; 1721 div_ant_conf.alt_lna_conf = 1722 ATH_ANT_DIV_COMB_LNA2; 1723 } 1724 } 1725 break; 1726 default: 1727 break; 1728 } 1729 } else { 1730 if (!antcomb->alt_good) { 1731 antcomb->scan_not_start = false; 1732 /* Set alt to another LNA */ 1733 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 1734 div_ant_conf.main_lna_conf = 1735 ATH_ANT_DIV_COMB_LNA2; 1736 div_ant_conf.alt_lna_conf = 1737 ATH_ANT_DIV_COMB_LNA1; 1738 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 1739 div_ant_conf.main_lna_conf = 1740 ATH_ANT_DIV_COMB_LNA1; 1741 div_ant_conf.alt_lna_conf = 1742 ATH_ANT_DIV_COMB_LNA2; 1743 } 1744 goto div_comb_done; 1745 } 1746 } 1747 1748 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, 1749 main_rssi_avg, alt_rssi_avg, 1750 alt_ratio); 1751 1752 antcomb->quick_scan_cnt++; 1753 1754 div_comb_done: 1755 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); 1756 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1757 1758 antcomb->scan_start_time = jiffies; 1759 antcomb->total_pkt_count = 0; 1760 antcomb->main_total_rssi = 0; 1761 antcomb->alt_total_rssi = 0; 1762 antcomb->main_recv_cnt = 0; 1763 antcomb->alt_recv_cnt = 0; 1764 } 1765 1766 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1767 { 1768 struct ath_buf *bf; 1769 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1770 struct ieee80211_rx_status *rxs; 1771 struct ath_hw *ah = sc->sc_ah; 1772 struct ath_common *common = ath9k_hw_common(ah); 1773 struct ieee80211_hw *hw = sc->hw; 1774 struct ieee80211_hdr *hdr; 1775 int retval; 1776 bool decrypt_error = false; 1777 struct ath_rx_status rs; 1778 enum ath9k_rx_qtype qtype; 1779 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1780 int dma_type; 1781 u8 rx_status_len = ah->caps.rx_status_len; 1782 u64 tsf = 0; 1783 u32 tsf_lower = 0; 1784 unsigned long flags; 1785 1786 if (edma) 1787 dma_type = DMA_BIDIRECTIONAL; 1788 else 1789 dma_type = DMA_FROM_DEVICE; 1790 1791 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1792 spin_lock_bh(&sc->rx.rxbuflock); 1793 1794 tsf = ath9k_hw_gettsf64(ah); 1795 tsf_lower = tsf & 0xffffffff; 1796 1797 do { 1798 /* If handling rx interrupt and flush is in progress => exit */ 1799 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1800 break; 1801 1802 memset(&rs, 0, sizeof(rs)); 1803 if (edma) 1804 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1805 else 1806 bf = ath_get_next_rx_buf(sc, &rs); 1807 1808 if (!bf) 1809 break; 1810 1811 skb = bf->bf_mpdu; 1812 if (!skb) 1813 continue; 1814 1815 /* 1816 * Take frame header from the first fragment and RX status from 1817 * the last one. 1818 */ 1819 if (sc->rx.frag) 1820 hdr_skb = sc->rx.frag; 1821 else 1822 hdr_skb = skb; 1823 1824 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1825 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1826 if (ieee80211_is_beacon(hdr->frame_control) && 1827 !is_zero_ether_addr(common->curbssid) && 1828 !compare_ether_addr(hdr->addr3, common->curbssid)) 1829 rs.is_mybeacon = true; 1830 else 1831 rs.is_mybeacon = false; 1832 1833 ath_debug_stat_rx(sc, &rs); 1834 1835 /* 1836 * If we're asked to flush receive queue, directly 1837 * chain it back at the queue without processing it. 1838 */ 1839 if (sc->sc_flags & SC_OP_RXFLUSH) 1840 goto requeue_drop_frag; 1841 1842 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1843 1844 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1845 if (rs.rs_tstamp > tsf_lower && 1846 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1847 rxs->mactime -= 0x100000000ULL; 1848 1849 if (rs.rs_tstamp < tsf_lower && 1850 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1851 rxs->mactime += 0x100000000ULL; 1852 1853 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1854 rxs, &decrypt_error); 1855 if (retval) 1856 goto requeue_drop_frag; 1857 1858 /* Ensure we always have an skb to requeue once we are done 1859 * processing the current buffer's skb */ 1860 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1861 1862 /* If there is no memory we ignore the current RX'd frame, 1863 * tell hardware it can give us a new frame using the old 1864 * skb and put it at the tail of the sc->rx.rxbuf list for 1865 * processing. */ 1866 if (!requeue_skb) 1867 goto requeue_drop_frag; 1868 1869 /* Unmap the frame */ 1870 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1871 common->rx_bufsize, 1872 dma_type); 1873 1874 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1875 if (ah->caps.rx_status_len) 1876 skb_pull(skb, ah->caps.rx_status_len); 1877 1878 if (!rs.rs_more) 1879 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1880 rxs, decrypt_error); 1881 1882 /* We will now give hardware our shiny new allocated skb */ 1883 bf->bf_mpdu = requeue_skb; 1884 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1885 common->rx_bufsize, 1886 dma_type); 1887 if (unlikely(dma_mapping_error(sc->dev, 1888 bf->bf_buf_addr))) { 1889 dev_kfree_skb_any(requeue_skb); 1890 bf->bf_mpdu = NULL; 1891 bf->bf_buf_addr = 0; 1892 ath_err(common, "dma_mapping_error() on RX\n"); 1893 ieee80211_rx(hw, skb); 1894 break; 1895 } 1896 1897 if (rs.rs_more) { 1898 /* 1899 * rs_more indicates chained descriptors which can be 1900 * used to link buffers together for a sort of 1901 * scatter-gather operation. 1902 */ 1903 if (sc->rx.frag) { 1904 /* too many fragments - cannot handle frame */ 1905 dev_kfree_skb_any(sc->rx.frag); 1906 dev_kfree_skb_any(skb); 1907 skb = NULL; 1908 } 1909 sc->rx.frag = skb; 1910 goto requeue; 1911 } 1912 1913 if (sc->rx.frag) { 1914 int space = skb->len - skb_tailroom(hdr_skb); 1915 1916 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1917 dev_kfree_skb(skb); 1918 goto requeue_drop_frag; 1919 } 1920 1921 sc->rx.frag = NULL; 1922 1923 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1924 skb->len); 1925 dev_kfree_skb_any(skb); 1926 skb = hdr_skb; 1927 } 1928 1929 1930 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 1931 1932 /* 1933 * change the default rx antenna if rx diversity 1934 * chooses the other antenna 3 times in a row. 1935 */ 1936 if (sc->rx.defant != rs.rs_antenna) { 1937 if (++sc->rx.rxotherant >= 3) 1938 ath_setdefantenna(sc, rs.rs_antenna); 1939 } else { 1940 sc->rx.rxotherant = 0; 1941 } 1942 1943 } 1944 1945 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1946 skb_trim(skb, skb->len - 8); 1947 1948 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1949 1950 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1951 PS_WAIT_FOR_CAB | 1952 PS_WAIT_FOR_PSPOLL_DATA)) || 1953 ath9k_check_auto_sleep(sc)) 1954 ath_rx_ps(sc, skb, rs.is_mybeacon); 1955 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1956 1957 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1958 ath_ant_comb_scan(sc, &rs); 1959 1960 ieee80211_rx(hw, skb); 1961 1962 requeue_drop_frag: 1963 if (sc->rx.frag) { 1964 dev_kfree_skb_any(sc->rx.frag); 1965 sc->rx.frag = NULL; 1966 } 1967 requeue: 1968 if (edma) { 1969 list_add_tail(&bf->list, &sc->rx.rxbuf); 1970 ath_rx_edma_buf_link(sc, qtype); 1971 } else { 1972 list_move_tail(&bf->list, &sc->rx.rxbuf); 1973 ath_rx_buf_link(sc, bf); 1974 if (!flush) 1975 ath9k_hw_rxena(ah); 1976 } 1977 } while (1); 1978 1979 spin_unlock_bh(&sc->rx.rxbuflock); 1980 1981 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1982 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 1983 ath9k_hw_set_interrupts(ah); 1984 } 1985 1986 return 0; 1987 } 1988