1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22 23 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 24 int mindelta, int main_rssi_avg, 25 int alt_rssi_avg, int pkt_count) 26 { 27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 28 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 30 } 31 32 static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, 33 int curr_main_set, int curr_alt_set, 34 int alt_rssi_avg, int main_rssi_avg) 35 { 36 bool result = false; 37 switch (div_group) { 38 case 0: 39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 40 result = true; 41 break; 42 case 1: 43 case 2: 44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && 45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && 46 (alt_rssi_avg >= (main_rssi_avg - 5))) || 47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && 48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && 49 (alt_rssi_avg >= (main_rssi_avg - 2)))) && 50 (alt_rssi_avg >= 4)) 51 result = true; 52 else 53 result = false; 54 break; 55 } 56 57 return result; 58 } 59 60 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 61 { 62 return sc->ps_enabled && 63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 64 } 65 66 /* 67 * Setup and link descriptors. 68 * 69 * 11N: we can no longer afford to self link the last descriptor. 70 * MAC acknowledges BA status as long as it copies frames to host 71 * buffer (or rx fifo). This can incorrectly acknowledge packets 72 * to a sender if last desc is self-linked. 73 */ 74 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 75 { 76 struct ath_hw *ah = sc->sc_ah; 77 struct ath_common *common = ath9k_hw_common(ah); 78 struct ath_desc *ds; 79 struct sk_buff *skb; 80 81 ATH_RXBUF_RESET(bf); 82 83 ds = bf->bf_desc; 84 ds->ds_link = 0; /* link to null */ 85 ds->ds_data = bf->bf_buf_addr; 86 87 /* virtual addr of the beginning of the buffer. */ 88 skb = bf->bf_mpdu; 89 BUG_ON(skb == NULL); 90 ds->ds_vdata = skb->data; 91 92 /* 93 * setup rx descriptors. The rx_bufsize here tells the hardware 94 * how much data it can DMA to us and that we are prepared 95 * to process 96 */ 97 ath9k_hw_setuprxdesc(ah, ds, 98 common->rx_bufsize, 99 0); 100 101 if (sc->rx.rxlink == NULL) 102 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 103 else 104 *sc->rx.rxlink = bf->bf_daddr; 105 106 sc->rx.rxlink = &ds->ds_link; 107 } 108 109 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 110 { 111 /* XXX block beacon interrupts */ 112 ath9k_hw_setantenna(sc->sc_ah, antenna); 113 sc->rx.defant = antenna; 114 sc->rx.rxotherant = 0; 115 } 116 117 static void ath_opmode_init(struct ath_softc *sc) 118 { 119 struct ath_hw *ah = sc->sc_ah; 120 struct ath_common *common = ath9k_hw_common(ah); 121 122 u32 rfilt, mfilt[2]; 123 124 /* configure rx filter */ 125 rfilt = ath_calcrxfilter(sc); 126 ath9k_hw_setrxfilter(ah, rfilt); 127 128 /* configure bssid mask */ 129 ath_hw_setbssidmask(common); 130 131 /* configure operational mode */ 132 ath9k_hw_setopmode(ah); 133 134 /* calculate and install multicast filter */ 135 mfilt[0] = mfilt[1] = ~0; 136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 137 } 138 139 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 140 enum ath9k_rx_qtype qtype) 141 { 142 struct ath_hw *ah = sc->sc_ah; 143 struct ath_rx_edma *rx_edma; 144 struct sk_buff *skb; 145 struct ath_buf *bf; 146 147 rx_edma = &sc->rx.rx_edma[qtype]; 148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 149 return false; 150 151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 152 list_del_init(&bf->list); 153 154 skb = bf->bf_mpdu; 155 156 ATH_RXBUF_RESET(bf); 157 memset(skb->data, 0, ah->caps.rx_status_len); 158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 159 ah->caps.rx_status_len, DMA_TO_DEVICE); 160 161 SKB_CB_ATHBUF(skb) = bf; 162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 163 skb_queue_tail(&rx_edma->rx_fifo, skb); 164 165 return true; 166 } 167 168 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 169 enum ath9k_rx_qtype qtype, int size) 170 { 171 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 172 struct ath_buf *bf, *tbf; 173 174 if (list_empty(&sc->rx.rxbuf)) { 175 ath_dbg(common, QUEUE, "No free rx buf available\n"); 176 return; 177 } 178 179 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 180 if (!ath_rx_edma_buf_link(sc, qtype)) 181 break; 182 183 } 184 185 static void ath_rx_remove_buffer(struct ath_softc *sc, 186 enum ath9k_rx_qtype qtype) 187 { 188 struct ath_buf *bf; 189 struct ath_rx_edma *rx_edma; 190 struct sk_buff *skb; 191 192 rx_edma = &sc->rx.rx_edma[qtype]; 193 194 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 195 bf = SKB_CB_ATHBUF(skb); 196 BUG_ON(!bf); 197 list_add_tail(&bf->list, &sc->rx.rxbuf); 198 } 199 } 200 201 static void ath_rx_edma_cleanup(struct ath_softc *sc) 202 { 203 struct ath_hw *ah = sc->sc_ah; 204 struct ath_common *common = ath9k_hw_common(ah); 205 struct ath_buf *bf; 206 207 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 208 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 209 210 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 211 if (bf->bf_mpdu) { 212 dma_unmap_single(sc->dev, bf->bf_buf_addr, 213 common->rx_bufsize, 214 DMA_BIDIRECTIONAL); 215 dev_kfree_skb_any(bf->bf_mpdu); 216 bf->bf_buf_addr = 0; 217 bf->bf_mpdu = NULL; 218 } 219 } 220 221 INIT_LIST_HEAD(&sc->rx.rxbuf); 222 223 kfree(sc->rx.rx_bufptr); 224 sc->rx.rx_bufptr = NULL; 225 } 226 227 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 228 { 229 skb_queue_head_init(&rx_edma->rx_fifo); 230 rx_edma->rx_fifo_hwsize = size; 231 } 232 233 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 234 { 235 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 236 struct ath_hw *ah = sc->sc_ah; 237 struct sk_buff *skb; 238 struct ath_buf *bf; 239 int error = 0, i; 240 u32 size; 241 242 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 243 ah->caps.rx_status_len); 244 245 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 246 ah->caps.rx_lp_qdepth); 247 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 248 ah->caps.rx_hp_qdepth); 249 250 size = sizeof(struct ath_buf) * nbufs; 251 bf = kzalloc(size, GFP_KERNEL); 252 if (!bf) 253 return -ENOMEM; 254 255 INIT_LIST_HEAD(&sc->rx.rxbuf); 256 sc->rx.rx_bufptr = bf; 257 258 for (i = 0; i < nbufs; i++, bf++) { 259 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 260 if (!skb) { 261 error = -ENOMEM; 262 goto rx_init_fail; 263 } 264 265 memset(skb->data, 0, common->rx_bufsize); 266 bf->bf_mpdu = skb; 267 268 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 269 common->rx_bufsize, 270 DMA_BIDIRECTIONAL); 271 if (unlikely(dma_mapping_error(sc->dev, 272 bf->bf_buf_addr))) { 273 dev_kfree_skb_any(skb); 274 bf->bf_mpdu = NULL; 275 bf->bf_buf_addr = 0; 276 ath_err(common, 277 "dma_mapping_error() on RX init\n"); 278 error = -ENOMEM; 279 goto rx_init_fail; 280 } 281 282 list_add_tail(&bf->list, &sc->rx.rxbuf); 283 } 284 285 return 0; 286 287 rx_init_fail: 288 ath_rx_edma_cleanup(sc); 289 return error; 290 } 291 292 static void ath_edma_start_recv(struct ath_softc *sc) 293 { 294 spin_lock_bh(&sc->rx.rxbuflock); 295 296 ath9k_hw_rxena(sc->sc_ah); 297 298 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 299 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 300 301 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 302 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 303 304 ath_opmode_init(sc); 305 306 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 307 308 spin_unlock_bh(&sc->rx.rxbuflock); 309 } 310 311 static void ath_edma_stop_recv(struct ath_softc *sc) 312 { 313 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 314 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 315 } 316 317 int ath_rx_init(struct ath_softc *sc, int nbufs) 318 { 319 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 320 struct sk_buff *skb; 321 struct ath_buf *bf; 322 int error = 0; 323 324 spin_lock_init(&sc->sc_pcu_lock); 325 sc->sc_flags &= ~SC_OP_RXFLUSH; 326 spin_lock_init(&sc->rx.rxbuflock); 327 328 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 329 sc->sc_ah->caps.rx_status_len; 330 331 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 332 return ath_rx_edma_init(sc, nbufs); 333 } else { 334 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 335 common->cachelsz, common->rx_bufsize); 336 337 /* Initialize rx descriptors */ 338 339 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 340 "rx", nbufs, 1, 0); 341 if (error != 0) { 342 ath_err(common, 343 "failed to allocate rx descriptors: %d\n", 344 error); 345 goto err; 346 } 347 348 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 349 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 350 GFP_KERNEL); 351 if (skb == NULL) { 352 error = -ENOMEM; 353 goto err; 354 } 355 356 bf->bf_mpdu = skb; 357 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 358 common->rx_bufsize, 359 DMA_FROM_DEVICE); 360 if (unlikely(dma_mapping_error(sc->dev, 361 bf->bf_buf_addr))) { 362 dev_kfree_skb_any(skb); 363 bf->bf_mpdu = NULL; 364 bf->bf_buf_addr = 0; 365 ath_err(common, 366 "dma_mapping_error() on RX init\n"); 367 error = -ENOMEM; 368 goto err; 369 } 370 } 371 sc->rx.rxlink = NULL; 372 } 373 374 err: 375 if (error) 376 ath_rx_cleanup(sc); 377 378 return error; 379 } 380 381 void ath_rx_cleanup(struct ath_softc *sc) 382 { 383 struct ath_hw *ah = sc->sc_ah; 384 struct ath_common *common = ath9k_hw_common(ah); 385 struct sk_buff *skb; 386 struct ath_buf *bf; 387 388 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 389 ath_rx_edma_cleanup(sc); 390 return; 391 } else { 392 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 393 skb = bf->bf_mpdu; 394 if (skb) { 395 dma_unmap_single(sc->dev, bf->bf_buf_addr, 396 common->rx_bufsize, 397 DMA_FROM_DEVICE); 398 dev_kfree_skb(skb); 399 bf->bf_buf_addr = 0; 400 bf->bf_mpdu = NULL; 401 } 402 } 403 404 if (sc->rx.rxdma.dd_desc_len != 0) 405 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 406 } 407 } 408 409 /* 410 * Calculate the receive filter according to the 411 * operating mode and state: 412 * 413 * o always accept unicast, broadcast, and multicast traffic 414 * o maintain current state of phy error reception (the hal 415 * may enable phy error frames for noise immunity work) 416 * o probe request frames are accepted only when operating in 417 * hostap, adhoc, or monitor modes 418 * o enable promiscuous mode according to the interface state 419 * o accept beacons: 420 * - when operating in adhoc mode so the 802.11 layer creates 421 * node table entries for peers, 422 * - when operating in station mode for collecting rssi data when 423 * the station is otherwise quiet, or 424 * - when operating as a repeater so we see repeater-sta beacons 425 * - when scanning 426 */ 427 428 u32 ath_calcrxfilter(struct ath_softc *sc) 429 { 430 u32 rfilt; 431 432 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 433 | ATH9K_RX_FILTER_MCAST; 434 435 if (sc->rx.rxfilter & FIF_PROBE_REQ) 436 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 437 438 /* 439 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 440 * mode interface or when in monitor mode. AP mode does not need this 441 * since it receives all in-BSS frames anyway. 442 */ 443 if (sc->sc_ah->is_monitoring) 444 rfilt |= ATH9K_RX_FILTER_PROM; 445 446 if (sc->rx.rxfilter & FIF_CONTROL) 447 rfilt |= ATH9K_RX_FILTER_CONTROL; 448 449 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 450 (sc->nvifs <= 1) && 451 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 452 rfilt |= ATH9K_RX_FILTER_MYBEACON; 453 else 454 rfilt |= ATH9K_RX_FILTER_BEACON; 455 456 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 457 (sc->rx.rxfilter & FIF_PSPOLL)) 458 rfilt |= ATH9K_RX_FILTER_PSPOLL; 459 460 if (conf_is_ht(&sc->hw->conf)) 461 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 462 463 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 464 /* The following may also be needed for other older chips */ 465 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 466 rfilt |= ATH9K_RX_FILTER_PROM; 467 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 468 } 469 470 return rfilt; 471 472 } 473 474 int ath_startrecv(struct ath_softc *sc) 475 { 476 struct ath_hw *ah = sc->sc_ah; 477 struct ath_buf *bf, *tbf; 478 479 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 480 ath_edma_start_recv(sc); 481 return 0; 482 } 483 484 spin_lock_bh(&sc->rx.rxbuflock); 485 if (list_empty(&sc->rx.rxbuf)) 486 goto start_recv; 487 488 sc->rx.rxlink = NULL; 489 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 490 ath_rx_buf_link(sc, bf); 491 } 492 493 /* We could have deleted elements so the list may be empty now */ 494 if (list_empty(&sc->rx.rxbuf)) 495 goto start_recv; 496 497 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 498 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 499 ath9k_hw_rxena(ah); 500 501 start_recv: 502 ath_opmode_init(sc); 503 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 504 505 spin_unlock_bh(&sc->rx.rxbuflock); 506 507 return 0; 508 } 509 510 bool ath_stoprecv(struct ath_softc *sc) 511 { 512 struct ath_hw *ah = sc->sc_ah; 513 bool stopped, reset = false; 514 515 spin_lock_bh(&sc->rx.rxbuflock); 516 ath9k_hw_abortpcurecv(ah); 517 ath9k_hw_setrxfilter(ah, 0); 518 stopped = ath9k_hw_stopdmarecv(ah, &reset); 519 520 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 521 ath_edma_stop_recv(sc); 522 else 523 sc->rx.rxlink = NULL; 524 spin_unlock_bh(&sc->rx.rxbuflock); 525 526 if (!(ah->ah_flags & AH_UNPLUGGED) && 527 unlikely(!stopped)) { 528 ath_err(ath9k_hw_common(sc->sc_ah), 529 "Could not stop RX, we could be " 530 "confusing the DMA engine when we start RX up\n"); 531 ATH_DBG_WARN_ON_ONCE(!stopped); 532 } 533 return stopped && !reset; 534 } 535 536 void ath_flushrecv(struct ath_softc *sc) 537 { 538 sc->sc_flags |= SC_OP_RXFLUSH; 539 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 540 ath_rx_tasklet(sc, 1, true); 541 ath_rx_tasklet(sc, 1, false); 542 sc->sc_flags &= ~SC_OP_RXFLUSH; 543 } 544 545 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 546 { 547 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 548 struct ieee80211_mgmt *mgmt; 549 u8 *pos, *end, id, elen; 550 struct ieee80211_tim_ie *tim; 551 552 mgmt = (struct ieee80211_mgmt *)skb->data; 553 pos = mgmt->u.beacon.variable; 554 end = skb->data + skb->len; 555 556 while (pos + 2 < end) { 557 id = *pos++; 558 elen = *pos++; 559 if (pos + elen > end) 560 break; 561 562 if (id == WLAN_EID_TIM) { 563 if (elen < sizeof(*tim)) 564 break; 565 tim = (struct ieee80211_tim_ie *) pos; 566 if (tim->dtim_count != 0) 567 break; 568 return tim->bitmap_ctrl & 0x01; 569 } 570 571 pos += elen; 572 } 573 574 return false; 575 } 576 577 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 578 { 579 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 580 581 if (skb->len < 24 + 8 + 2 + 2) 582 return; 583 584 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 585 586 if (sc->ps_flags & PS_BEACON_SYNC) { 587 sc->ps_flags &= ~PS_BEACON_SYNC; 588 ath_dbg(common, PS, 589 "Reconfigure Beacon timers based on timestamp from the AP\n"); 590 ath_set_beacon(sc); 591 } 592 593 if (ath_beacon_dtim_pending_cab(skb)) { 594 /* 595 * Remain awake waiting for buffered broadcast/multicast 596 * frames. If the last broadcast/multicast frame is not 597 * received properly, the next beacon frame will work as 598 * a backup trigger for returning into NETWORK SLEEP state, 599 * so we are waiting for it as well. 600 */ 601 ath_dbg(common, PS, 602 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 603 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 604 return; 605 } 606 607 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 608 /* 609 * This can happen if a broadcast frame is dropped or the AP 610 * fails to send a frame indicating that all CAB frames have 611 * been delivered. 612 */ 613 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 614 ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 615 } 616 } 617 618 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 619 { 620 struct ieee80211_hdr *hdr; 621 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 622 623 hdr = (struct ieee80211_hdr *)skb->data; 624 625 /* Process Beacon and CAB receive in PS state */ 626 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 627 && mybeacon) 628 ath_rx_ps_beacon(sc, skb); 629 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 630 (ieee80211_is_data(hdr->frame_control) || 631 ieee80211_is_action(hdr->frame_control)) && 632 is_multicast_ether_addr(hdr->addr1) && 633 !ieee80211_has_moredata(hdr->frame_control)) { 634 /* 635 * No more broadcast/multicast frames to be received at this 636 * point. 637 */ 638 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 639 ath_dbg(common, PS, 640 "All PS CAB frames received, back to sleep\n"); 641 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 642 !is_multicast_ether_addr(hdr->addr1) && 643 !ieee80211_has_morefrags(hdr->frame_control)) { 644 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 645 ath_dbg(common, PS, 646 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 647 sc->ps_flags & (PS_WAIT_FOR_BEACON | 648 PS_WAIT_FOR_CAB | 649 PS_WAIT_FOR_PSPOLL_DATA | 650 PS_WAIT_FOR_TX_ACK)); 651 } 652 } 653 654 static bool ath_edma_get_buffers(struct ath_softc *sc, 655 enum ath9k_rx_qtype qtype, 656 struct ath_rx_status *rs, 657 struct ath_buf **dest) 658 { 659 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 660 struct ath_hw *ah = sc->sc_ah; 661 struct ath_common *common = ath9k_hw_common(ah); 662 struct sk_buff *skb; 663 struct ath_buf *bf; 664 int ret; 665 666 skb = skb_peek(&rx_edma->rx_fifo); 667 if (!skb) 668 return false; 669 670 bf = SKB_CB_ATHBUF(skb); 671 BUG_ON(!bf); 672 673 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 674 common->rx_bufsize, DMA_FROM_DEVICE); 675 676 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 677 if (ret == -EINPROGRESS) { 678 /*let device gain the buffer again*/ 679 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 680 common->rx_bufsize, DMA_FROM_DEVICE); 681 return false; 682 } 683 684 __skb_unlink(skb, &rx_edma->rx_fifo); 685 if (ret == -EINVAL) { 686 /* corrupt descriptor, skip this one and the following one */ 687 list_add_tail(&bf->list, &sc->rx.rxbuf); 688 ath_rx_edma_buf_link(sc, qtype); 689 690 skb = skb_peek(&rx_edma->rx_fifo); 691 if (skb) { 692 bf = SKB_CB_ATHBUF(skb); 693 BUG_ON(!bf); 694 695 __skb_unlink(skb, &rx_edma->rx_fifo); 696 list_add_tail(&bf->list, &sc->rx.rxbuf); 697 ath_rx_edma_buf_link(sc, qtype); 698 } else { 699 bf = NULL; 700 } 701 } 702 703 *dest = bf; 704 return true; 705 } 706 707 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 708 struct ath_rx_status *rs, 709 enum ath9k_rx_qtype qtype) 710 { 711 struct ath_buf *bf = NULL; 712 713 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 714 if (!bf) 715 continue; 716 717 return bf; 718 } 719 return NULL; 720 } 721 722 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 723 struct ath_rx_status *rs) 724 { 725 struct ath_hw *ah = sc->sc_ah; 726 struct ath_common *common = ath9k_hw_common(ah); 727 struct ath_desc *ds; 728 struct ath_buf *bf; 729 int ret; 730 731 if (list_empty(&sc->rx.rxbuf)) { 732 sc->rx.rxlink = NULL; 733 return NULL; 734 } 735 736 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 737 ds = bf->bf_desc; 738 739 /* 740 * Must provide the virtual address of the current 741 * descriptor, the physical address, and the virtual 742 * address of the next descriptor in the h/w chain. 743 * This allows the HAL to look ahead to see if the 744 * hardware is done with a descriptor by checking the 745 * done bit in the following descriptor and the address 746 * of the current descriptor the DMA engine is working 747 * on. All this is necessary because of our use of 748 * a self-linked list to avoid rx overruns. 749 */ 750 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 751 if (ret == -EINPROGRESS) { 752 struct ath_rx_status trs; 753 struct ath_buf *tbf; 754 struct ath_desc *tds; 755 756 memset(&trs, 0, sizeof(trs)); 757 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 758 sc->rx.rxlink = NULL; 759 return NULL; 760 } 761 762 tbf = list_entry(bf->list.next, struct ath_buf, list); 763 764 /* 765 * On some hardware the descriptor status words could 766 * get corrupted, including the done bit. Because of 767 * this, check if the next descriptor's done bit is 768 * set or not. 769 * 770 * If the next descriptor's done bit is set, the current 771 * descriptor has been corrupted. Force s/w to discard 772 * this descriptor and continue... 773 */ 774 775 tds = tbf->bf_desc; 776 ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 777 if (ret == -EINPROGRESS) 778 return NULL; 779 } 780 781 if (!bf->bf_mpdu) 782 return bf; 783 784 /* 785 * Synchronize the DMA transfer with CPU before 786 * 1. accessing the frame 787 * 2. requeueing the same buffer to h/w 788 */ 789 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 790 common->rx_bufsize, 791 DMA_FROM_DEVICE); 792 793 return bf; 794 } 795 796 /* Assumes you've already done the endian to CPU conversion */ 797 static bool ath9k_rx_accept(struct ath_common *common, 798 struct ieee80211_hdr *hdr, 799 struct ieee80211_rx_status *rxs, 800 struct ath_rx_status *rx_stats, 801 bool *decrypt_error) 802 { 803 struct ath_softc *sc = (struct ath_softc *) common->priv; 804 bool is_mc, is_valid_tkip, strip_mic, mic_error; 805 struct ath_hw *ah = common->ah; 806 __le16 fc; 807 u8 rx_status_len = ah->caps.rx_status_len; 808 809 fc = hdr->frame_control; 810 811 is_mc = !!is_multicast_ether_addr(hdr->addr1); 812 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 813 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 814 strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 815 ieee80211_has_protected(fc) && 816 !(rx_stats->rs_status & 817 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 818 ATH9K_RXERR_KEYMISS)); 819 820 /* 821 * Key miss events are only relevant for pairwise keys where the 822 * descriptor does contain a valid key index. This has been observed 823 * mostly with CCMP encryption. 824 */ 825 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || 826 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) 827 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 828 829 if (!rx_stats->rs_datalen) { 830 RX_STAT_INC(rx_len_err); 831 return false; 832 } 833 834 /* 835 * rs_status follows rs_datalen so if rs_datalen is too large 836 * we can take a hint that hardware corrupted it, so ignore 837 * those frames. 838 */ 839 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { 840 RX_STAT_INC(rx_len_err); 841 return false; 842 } 843 844 /* Only use error bits from the last fragment */ 845 if (rx_stats->rs_more) 846 return true; 847 848 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 849 !ieee80211_has_morefrags(fc) && 850 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 851 (rx_stats->rs_status & ATH9K_RXERR_MIC); 852 853 /* 854 * The rx_stats->rs_status will not be set until the end of the 855 * chained descriptors so it can be ignored if rs_more is set. The 856 * rs_more will be false at the last element of the chained 857 * descriptors. 858 */ 859 if (rx_stats->rs_status != 0) { 860 u8 status_mask; 861 862 if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 863 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 864 mic_error = false; 865 } 866 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 867 return false; 868 869 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 870 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 871 *decrypt_error = true; 872 mic_error = false; 873 } 874 875 /* 876 * Reject error frames with the exception of 877 * decryption and MIC failures. For monitor mode, 878 * we also ignore the CRC error. 879 */ 880 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 881 ATH9K_RXERR_KEYMISS; 882 883 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 884 status_mask |= ATH9K_RXERR_CRC; 885 886 if (rx_stats->rs_status & ~status_mask) 887 return false; 888 } 889 890 /* 891 * For unicast frames the MIC error bit can have false positives, 892 * so all MIC error reports need to be validated in software. 893 * False negatives are not common, so skip software verification 894 * if the hardware considers the MIC valid. 895 */ 896 if (strip_mic) 897 rxs->flag |= RX_FLAG_MMIC_STRIPPED; 898 else if (is_mc && mic_error) 899 rxs->flag |= RX_FLAG_MMIC_ERROR; 900 901 return true; 902 } 903 904 static int ath9k_process_rate(struct ath_common *common, 905 struct ieee80211_hw *hw, 906 struct ath_rx_status *rx_stats, 907 struct ieee80211_rx_status *rxs) 908 { 909 struct ieee80211_supported_band *sband; 910 enum ieee80211_band band; 911 unsigned int i = 0; 912 struct ath_softc __maybe_unused *sc = common->priv; 913 914 band = hw->conf.channel->band; 915 sband = hw->wiphy->bands[band]; 916 917 if (rx_stats->rs_rate & 0x80) { 918 /* HT rate */ 919 rxs->flag |= RX_FLAG_HT; 920 if (rx_stats->rs_flags & ATH9K_RX_2040) 921 rxs->flag |= RX_FLAG_40MHZ; 922 if (rx_stats->rs_flags & ATH9K_RX_GI) 923 rxs->flag |= RX_FLAG_SHORT_GI; 924 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 925 return 0; 926 } 927 928 for (i = 0; i < sband->n_bitrates; i++) { 929 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 930 rxs->rate_idx = i; 931 return 0; 932 } 933 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 934 rxs->flag |= RX_FLAG_SHORTPRE; 935 rxs->rate_idx = i; 936 return 0; 937 } 938 } 939 940 /* 941 * No valid hardware bitrate found -- we should not get here 942 * because hardware has already validated this frame as OK. 943 */ 944 ath_dbg(common, ANY, 945 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 946 rx_stats->rs_rate); 947 RX_STAT_INC(rx_rate_err); 948 return -EINVAL; 949 } 950 951 static void ath9k_process_rssi(struct ath_common *common, 952 struct ieee80211_hw *hw, 953 struct ieee80211_hdr *hdr, 954 struct ath_rx_status *rx_stats) 955 { 956 struct ath_softc *sc = hw->priv; 957 struct ath_hw *ah = common->ah; 958 int last_rssi; 959 int rssi = rx_stats->rs_rssi; 960 961 if (!rx_stats->is_mybeacon || 962 ((ah->opmode != NL80211_IFTYPE_STATION) && 963 (ah->opmode != NL80211_IFTYPE_ADHOC))) 964 return; 965 966 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 967 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 968 969 last_rssi = sc->last_rssi; 970 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 971 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 972 if (rssi < 0) 973 rssi = 0; 974 975 /* Update Beacon RSSI, this is used by ANI. */ 976 ah->stats.avgbrssi = rssi; 977 } 978 979 /* 980 * For Decrypt or Demic errors, we only mark packet status here and always push 981 * up the frame up to let mac80211 handle the actual error case, be it no 982 * decryption key or real decryption error. This let us keep statistics there. 983 */ 984 static int ath9k_rx_skb_preprocess(struct ath_common *common, 985 struct ieee80211_hw *hw, 986 struct ieee80211_hdr *hdr, 987 struct ath_rx_status *rx_stats, 988 struct ieee80211_rx_status *rx_status, 989 bool *decrypt_error) 990 { 991 struct ath_hw *ah = common->ah; 992 993 /* 994 * everything but the rate is checked here, the rate check is done 995 * separately to avoid doing two lookups for a rate for each frame. 996 */ 997 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 998 return -EINVAL; 999 1000 /* Only use status info from the last fragment */ 1001 if (rx_stats->rs_more) 1002 return 0; 1003 1004 ath9k_process_rssi(common, hw, hdr, rx_stats); 1005 1006 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1007 return -EINVAL; 1008 1009 rx_status->band = hw->conf.channel->band; 1010 rx_status->freq = hw->conf.channel->center_freq; 1011 rx_status->signal = ah->noise + rx_stats->rs_rssi; 1012 rx_status->antenna = rx_stats->rs_antenna; 1013 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 1014 if (rx_stats->rs_moreaggr) 1015 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1016 1017 return 0; 1018 } 1019 1020 static void ath9k_rx_skb_postprocess(struct ath_common *common, 1021 struct sk_buff *skb, 1022 struct ath_rx_status *rx_stats, 1023 struct ieee80211_rx_status *rxs, 1024 bool decrypt_error) 1025 { 1026 struct ath_hw *ah = common->ah; 1027 struct ieee80211_hdr *hdr; 1028 int hdrlen, padpos, padsize; 1029 u8 keyix; 1030 __le16 fc; 1031 1032 /* see if any padding is done by the hw and remove it */ 1033 hdr = (struct ieee80211_hdr *) skb->data; 1034 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1035 fc = hdr->frame_control; 1036 padpos = ath9k_cmn_padpos(hdr->frame_control); 1037 1038 /* The MAC header is padded to have 32-bit boundary if the 1039 * packet payload is non-zero. The general calculation for 1040 * padsize would take into account odd header lengths: 1041 * padsize = (4 - padpos % 4) % 4; However, since only 1042 * even-length headers are used, padding can only be 0 or 2 1043 * bytes and we can optimize this a bit. In addition, we must 1044 * not try to remove padding from short control frames that do 1045 * not have payload. */ 1046 padsize = padpos & 3; 1047 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1048 memmove(skb->data + padsize, skb->data, padpos); 1049 skb_pull(skb, padsize); 1050 } 1051 1052 keyix = rx_stats->rs_keyix; 1053 1054 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1055 ieee80211_has_protected(fc)) { 1056 rxs->flag |= RX_FLAG_DECRYPTED; 1057 } else if (ieee80211_has_protected(fc) 1058 && !decrypt_error && skb->len >= hdrlen + 4) { 1059 keyix = skb->data[hdrlen + 3] >> 6; 1060 1061 if (test_bit(keyix, common->keymap)) 1062 rxs->flag |= RX_FLAG_DECRYPTED; 1063 } 1064 if (ah->sw_mgmt_crypto && 1065 (rxs->flag & RX_FLAG_DECRYPTED) && 1066 ieee80211_is_mgmt(fc)) 1067 /* Use software decrypt for management frames. */ 1068 rxs->flag &= ~RX_FLAG_DECRYPTED; 1069 } 1070 1071 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, 1072 struct ath_hw_antcomb_conf ant_conf, 1073 int main_rssi_avg) 1074 { 1075 antcomb->quick_scan_cnt = 0; 1076 1077 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) 1078 antcomb->rssi_lna2 = main_rssi_avg; 1079 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) 1080 antcomb->rssi_lna1 = main_rssi_avg; 1081 1082 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { 1083 case 0x10: /* LNA2 A-B */ 1084 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1085 antcomb->first_quick_scan_conf = 1086 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1087 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1088 break; 1089 case 0x20: /* LNA1 A-B */ 1090 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1091 antcomb->first_quick_scan_conf = 1092 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1093 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1094 break; 1095 case 0x21: /* LNA1 LNA2 */ 1096 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; 1097 antcomb->first_quick_scan_conf = 1098 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1099 antcomb->second_quick_scan_conf = 1100 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1101 break; 1102 case 0x12: /* LNA2 LNA1 */ 1103 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; 1104 antcomb->first_quick_scan_conf = 1105 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1106 antcomb->second_quick_scan_conf = 1107 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1108 break; 1109 case 0x13: /* LNA2 A+B */ 1110 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1111 antcomb->first_quick_scan_conf = 1112 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1113 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1114 break; 1115 case 0x23: /* LNA1 A+B */ 1116 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1117 antcomb->first_quick_scan_conf = 1118 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1119 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1120 break; 1121 default: 1122 break; 1123 } 1124 } 1125 1126 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 1127 struct ath_hw_antcomb_conf *div_ant_conf, 1128 int main_rssi_avg, int alt_rssi_avg, 1129 int alt_ratio) 1130 { 1131 /* alt_good */ 1132 switch (antcomb->quick_scan_cnt) { 1133 case 0: 1134 /* set alt to main, and alt to first conf */ 1135 div_ant_conf->main_lna_conf = antcomb->main_conf; 1136 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; 1137 break; 1138 case 1: 1139 /* set alt to main, and alt to first conf */ 1140 div_ant_conf->main_lna_conf = antcomb->main_conf; 1141 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; 1142 antcomb->rssi_first = main_rssi_avg; 1143 antcomb->rssi_second = alt_rssi_avg; 1144 1145 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1146 /* main is LNA1 */ 1147 if (ath_is_alt_ant_ratio_better(alt_ratio, 1148 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1149 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1150 main_rssi_avg, alt_rssi_avg, 1151 antcomb->total_pkt_count)) 1152 antcomb->first_ratio = true; 1153 else 1154 antcomb->first_ratio = false; 1155 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1156 if (ath_is_alt_ant_ratio_better(alt_ratio, 1157 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1158 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1159 main_rssi_avg, alt_rssi_avg, 1160 antcomb->total_pkt_count)) 1161 antcomb->first_ratio = true; 1162 else 1163 antcomb->first_ratio = false; 1164 } else { 1165 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1166 (alt_rssi_avg > main_rssi_avg + 1167 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1168 (alt_rssi_avg > main_rssi_avg)) && 1169 (antcomb->total_pkt_count > 50)) 1170 antcomb->first_ratio = true; 1171 else 1172 antcomb->first_ratio = false; 1173 } 1174 break; 1175 case 2: 1176 antcomb->alt_good = false; 1177 antcomb->scan_not_start = false; 1178 antcomb->scan = false; 1179 antcomb->rssi_first = main_rssi_avg; 1180 antcomb->rssi_third = alt_rssi_avg; 1181 1182 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 1183 antcomb->rssi_lna1 = alt_rssi_avg; 1184 else if (antcomb->second_quick_scan_conf == 1185 ATH_ANT_DIV_COMB_LNA2) 1186 antcomb->rssi_lna2 = alt_rssi_avg; 1187 else if (antcomb->second_quick_scan_conf == 1188 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 1189 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 1190 antcomb->rssi_lna2 = main_rssi_avg; 1191 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 1192 antcomb->rssi_lna1 = main_rssi_avg; 1193 } 1194 1195 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 1196 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 1197 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1198 else 1199 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 1200 1201 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1202 if (ath_is_alt_ant_ratio_better(alt_ratio, 1203 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1204 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1205 main_rssi_avg, alt_rssi_avg, 1206 antcomb->total_pkt_count)) 1207 antcomb->second_ratio = true; 1208 else 1209 antcomb->second_ratio = false; 1210 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1211 if (ath_is_alt_ant_ratio_better(alt_ratio, 1212 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1213 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1214 main_rssi_avg, alt_rssi_avg, 1215 antcomb->total_pkt_count)) 1216 antcomb->second_ratio = true; 1217 else 1218 antcomb->second_ratio = false; 1219 } else { 1220 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1221 (alt_rssi_avg > main_rssi_avg + 1222 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1223 (alt_rssi_avg > main_rssi_avg)) && 1224 (antcomb->total_pkt_count > 50)) 1225 antcomb->second_ratio = true; 1226 else 1227 antcomb->second_ratio = false; 1228 } 1229 1230 /* set alt to the conf with maximun ratio */ 1231 if (antcomb->first_ratio && antcomb->second_ratio) { 1232 if (antcomb->rssi_second > antcomb->rssi_third) { 1233 /* first alt*/ 1234 if ((antcomb->first_quick_scan_conf == 1235 ATH_ANT_DIV_COMB_LNA1) || 1236 (antcomb->first_quick_scan_conf == 1237 ATH_ANT_DIV_COMB_LNA2)) 1238 /* Set alt LNA1 or LNA2*/ 1239 if (div_ant_conf->main_lna_conf == 1240 ATH_ANT_DIV_COMB_LNA2) 1241 div_ant_conf->alt_lna_conf = 1242 ATH_ANT_DIV_COMB_LNA1; 1243 else 1244 div_ant_conf->alt_lna_conf = 1245 ATH_ANT_DIV_COMB_LNA2; 1246 else 1247 /* Set alt to A+B or A-B */ 1248 div_ant_conf->alt_lna_conf = 1249 antcomb->first_quick_scan_conf; 1250 } else if ((antcomb->second_quick_scan_conf == 1251 ATH_ANT_DIV_COMB_LNA1) || 1252 (antcomb->second_quick_scan_conf == 1253 ATH_ANT_DIV_COMB_LNA2)) { 1254 /* Set alt LNA1 or LNA2 */ 1255 if (div_ant_conf->main_lna_conf == 1256 ATH_ANT_DIV_COMB_LNA2) 1257 div_ant_conf->alt_lna_conf = 1258 ATH_ANT_DIV_COMB_LNA1; 1259 else 1260 div_ant_conf->alt_lna_conf = 1261 ATH_ANT_DIV_COMB_LNA2; 1262 } else { 1263 /* Set alt to A+B or A-B */ 1264 div_ant_conf->alt_lna_conf = 1265 antcomb->second_quick_scan_conf; 1266 } 1267 } else if (antcomb->first_ratio) { 1268 /* first alt */ 1269 if ((antcomb->first_quick_scan_conf == 1270 ATH_ANT_DIV_COMB_LNA1) || 1271 (antcomb->first_quick_scan_conf == 1272 ATH_ANT_DIV_COMB_LNA2)) 1273 /* Set alt LNA1 or LNA2 */ 1274 if (div_ant_conf->main_lna_conf == 1275 ATH_ANT_DIV_COMB_LNA2) 1276 div_ant_conf->alt_lna_conf = 1277 ATH_ANT_DIV_COMB_LNA1; 1278 else 1279 div_ant_conf->alt_lna_conf = 1280 ATH_ANT_DIV_COMB_LNA2; 1281 else 1282 /* Set alt to A+B or A-B */ 1283 div_ant_conf->alt_lna_conf = 1284 antcomb->first_quick_scan_conf; 1285 } else if (antcomb->second_ratio) { 1286 /* second alt */ 1287 if ((antcomb->second_quick_scan_conf == 1288 ATH_ANT_DIV_COMB_LNA1) || 1289 (antcomb->second_quick_scan_conf == 1290 ATH_ANT_DIV_COMB_LNA2)) 1291 /* Set alt LNA1 or LNA2 */ 1292 if (div_ant_conf->main_lna_conf == 1293 ATH_ANT_DIV_COMB_LNA2) 1294 div_ant_conf->alt_lna_conf = 1295 ATH_ANT_DIV_COMB_LNA1; 1296 else 1297 div_ant_conf->alt_lna_conf = 1298 ATH_ANT_DIV_COMB_LNA2; 1299 else 1300 /* Set alt to A+B or A-B */ 1301 div_ant_conf->alt_lna_conf = 1302 antcomb->second_quick_scan_conf; 1303 } else { 1304 /* main is largest */ 1305 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || 1306 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) 1307 /* Set alt LNA1 or LNA2 */ 1308 if (div_ant_conf->main_lna_conf == 1309 ATH_ANT_DIV_COMB_LNA2) 1310 div_ant_conf->alt_lna_conf = 1311 ATH_ANT_DIV_COMB_LNA1; 1312 else 1313 div_ant_conf->alt_lna_conf = 1314 ATH_ANT_DIV_COMB_LNA2; 1315 else 1316 /* Set alt to A+B or A-B */ 1317 div_ant_conf->alt_lna_conf = antcomb->main_conf; 1318 } 1319 break; 1320 default: 1321 break; 1322 } 1323 } 1324 1325 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, 1326 struct ath_ant_comb *antcomb, int alt_ratio) 1327 { 1328 if (ant_conf->div_group == 0) { 1329 /* Adjust the fast_div_bias based on main and alt lna conf */ 1330 switch ((ant_conf->main_lna_conf << 4) | 1331 ant_conf->alt_lna_conf) { 1332 case 0x01: /* A-B LNA2 */ 1333 ant_conf->fast_div_bias = 0x3b; 1334 break; 1335 case 0x02: /* A-B LNA1 */ 1336 ant_conf->fast_div_bias = 0x3d; 1337 break; 1338 case 0x03: /* A-B A+B */ 1339 ant_conf->fast_div_bias = 0x1; 1340 break; 1341 case 0x10: /* LNA2 A-B */ 1342 ant_conf->fast_div_bias = 0x7; 1343 break; 1344 case 0x12: /* LNA2 LNA1 */ 1345 ant_conf->fast_div_bias = 0x2; 1346 break; 1347 case 0x13: /* LNA2 A+B */ 1348 ant_conf->fast_div_bias = 0x7; 1349 break; 1350 case 0x20: /* LNA1 A-B */ 1351 ant_conf->fast_div_bias = 0x6; 1352 break; 1353 case 0x21: /* LNA1 LNA2 */ 1354 ant_conf->fast_div_bias = 0x0; 1355 break; 1356 case 0x23: /* LNA1 A+B */ 1357 ant_conf->fast_div_bias = 0x6; 1358 break; 1359 case 0x30: /* A+B A-B */ 1360 ant_conf->fast_div_bias = 0x1; 1361 break; 1362 case 0x31: /* A+B LNA2 */ 1363 ant_conf->fast_div_bias = 0x3b; 1364 break; 1365 case 0x32: /* A+B LNA1 */ 1366 ant_conf->fast_div_bias = 0x3d; 1367 break; 1368 default: 1369 break; 1370 } 1371 } else if (ant_conf->div_group == 1) { 1372 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1373 switch ((ant_conf->main_lna_conf << 4) | 1374 ant_conf->alt_lna_conf) { 1375 case 0x01: /* A-B LNA2 */ 1376 ant_conf->fast_div_bias = 0x1; 1377 ant_conf->main_gaintb = 0; 1378 ant_conf->alt_gaintb = 0; 1379 break; 1380 case 0x02: /* A-B LNA1 */ 1381 ant_conf->fast_div_bias = 0x1; 1382 ant_conf->main_gaintb = 0; 1383 ant_conf->alt_gaintb = 0; 1384 break; 1385 case 0x03: /* A-B A+B */ 1386 ant_conf->fast_div_bias = 0x1; 1387 ant_conf->main_gaintb = 0; 1388 ant_conf->alt_gaintb = 0; 1389 break; 1390 case 0x10: /* LNA2 A-B */ 1391 if (!(antcomb->scan) && 1392 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1393 ant_conf->fast_div_bias = 0x3f; 1394 else 1395 ant_conf->fast_div_bias = 0x1; 1396 ant_conf->main_gaintb = 0; 1397 ant_conf->alt_gaintb = 0; 1398 break; 1399 case 0x12: /* LNA2 LNA1 */ 1400 ant_conf->fast_div_bias = 0x1; 1401 ant_conf->main_gaintb = 0; 1402 ant_conf->alt_gaintb = 0; 1403 break; 1404 case 0x13: /* LNA2 A+B */ 1405 if (!(antcomb->scan) && 1406 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1407 ant_conf->fast_div_bias = 0x3f; 1408 else 1409 ant_conf->fast_div_bias = 0x1; 1410 ant_conf->main_gaintb = 0; 1411 ant_conf->alt_gaintb = 0; 1412 break; 1413 case 0x20: /* LNA1 A-B */ 1414 if (!(antcomb->scan) && 1415 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1416 ant_conf->fast_div_bias = 0x3f; 1417 else 1418 ant_conf->fast_div_bias = 0x1; 1419 ant_conf->main_gaintb = 0; 1420 ant_conf->alt_gaintb = 0; 1421 break; 1422 case 0x21: /* LNA1 LNA2 */ 1423 ant_conf->fast_div_bias = 0x1; 1424 ant_conf->main_gaintb = 0; 1425 ant_conf->alt_gaintb = 0; 1426 break; 1427 case 0x23: /* LNA1 A+B */ 1428 if (!(antcomb->scan) && 1429 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1430 ant_conf->fast_div_bias = 0x3f; 1431 else 1432 ant_conf->fast_div_bias = 0x1; 1433 ant_conf->main_gaintb = 0; 1434 ant_conf->alt_gaintb = 0; 1435 break; 1436 case 0x30: /* A+B A-B */ 1437 ant_conf->fast_div_bias = 0x1; 1438 ant_conf->main_gaintb = 0; 1439 ant_conf->alt_gaintb = 0; 1440 break; 1441 case 0x31: /* A+B LNA2 */ 1442 ant_conf->fast_div_bias = 0x1; 1443 ant_conf->main_gaintb = 0; 1444 ant_conf->alt_gaintb = 0; 1445 break; 1446 case 0x32: /* A+B LNA1 */ 1447 ant_conf->fast_div_bias = 0x1; 1448 ant_conf->main_gaintb = 0; 1449 ant_conf->alt_gaintb = 0; 1450 break; 1451 default: 1452 break; 1453 } 1454 } else if (ant_conf->div_group == 2) { 1455 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1456 switch ((ant_conf->main_lna_conf << 4) | 1457 ant_conf->alt_lna_conf) { 1458 case 0x01: /* A-B LNA2 */ 1459 ant_conf->fast_div_bias = 0x1; 1460 ant_conf->main_gaintb = 0; 1461 ant_conf->alt_gaintb = 0; 1462 break; 1463 case 0x02: /* A-B LNA1 */ 1464 ant_conf->fast_div_bias = 0x1; 1465 ant_conf->main_gaintb = 0; 1466 ant_conf->alt_gaintb = 0; 1467 break; 1468 case 0x03: /* A-B A+B */ 1469 ant_conf->fast_div_bias = 0x1; 1470 ant_conf->main_gaintb = 0; 1471 ant_conf->alt_gaintb = 0; 1472 break; 1473 case 0x10: /* LNA2 A-B */ 1474 if (!(antcomb->scan) && 1475 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1476 ant_conf->fast_div_bias = 0x1; 1477 else 1478 ant_conf->fast_div_bias = 0x2; 1479 ant_conf->main_gaintb = 0; 1480 ant_conf->alt_gaintb = 0; 1481 break; 1482 case 0x12: /* LNA2 LNA1 */ 1483 ant_conf->fast_div_bias = 0x1; 1484 ant_conf->main_gaintb = 0; 1485 ant_conf->alt_gaintb = 0; 1486 break; 1487 case 0x13: /* LNA2 A+B */ 1488 if (!(antcomb->scan) && 1489 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1490 ant_conf->fast_div_bias = 0x1; 1491 else 1492 ant_conf->fast_div_bias = 0x2; 1493 ant_conf->main_gaintb = 0; 1494 ant_conf->alt_gaintb = 0; 1495 break; 1496 case 0x20: /* LNA1 A-B */ 1497 if (!(antcomb->scan) && 1498 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1499 ant_conf->fast_div_bias = 0x1; 1500 else 1501 ant_conf->fast_div_bias = 0x2; 1502 ant_conf->main_gaintb = 0; 1503 ant_conf->alt_gaintb = 0; 1504 break; 1505 case 0x21: /* LNA1 LNA2 */ 1506 ant_conf->fast_div_bias = 0x1; 1507 ant_conf->main_gaintb = 0; 1508 ant_conf->alt_gaintb = 0; 1509 break; 1510 case 0x23: /* LNA1 A+B */ 1511 if (!(antcomb->scan) && 1512 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1513 ant_conf->fast_div_bias = 0x1; 1514 else 1515 ant_conf->fast_div_bias = 0x2; 1516 ant_conf->main_gaintb = 0; 1517 ant_conf->alt_gaintb = 0; 1518 break; 1519 case 0x30: /* A+B A-B */ 1520 ant_conf->fast_div_bias = 0x1; 1521 ant_conf->main_gaintb = 0; 1522 ant_conf->alt_gaintb = 0; 1523 break; 1524 case 0x31: /* A+B LNA2 */ 1525 ant_conf->fast_div_bias = 0x1; 1526 ant_conf->main_gaintb = 0; 1527 ant_conf->alt_gaintb = 0; 1528 break; 1529 case 0x32: /* A+B LNA1 */ 1530 ant_conf->fast_div_bias = 0x1; 1531 ant_conf->main_gaintb = 0; 1532 ant_conf->alt_gaintb = 0; 1533 break; 1534 default: 1535 break; 1536 } 1537 } 1538 } 1539 1540 /* Antenna diversity and combining */ 1541 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 1542 { 1543 struct ath_hw_antcomb_conf div_ant_conf; 1544 struct ath_ant_comb *antcomb = &sc->ant_comb; 1545 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1546 int curr_main_set; 1547 int main_rssi = rs->rs_rssi_ctl0; 1548 int alt_rssi = rs->rs_rssi_ctl1; 1549 int rx_ant_conf, main_ant_conf; 1550 bool short_scan = false; 1551 1552 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 1553 ATH_ANT_RX_MASK; 1554 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1555 ATH_ANT_RX_MASK; 1556 1557 /* Record packet only when both main_rssi and alt_rssi is positive */ 1558 if (main_rssi > 0 && alt_rssi > 0) { 1559 antcomb->total_pkt_count++; 1560 antcomb->main_total_rssi += main_rssi; 1561 antcomb->alt_total_rssi += alt_rssi; 1562 if (main_ant_conf == rx_ant_conf) 1563 antcomb->main_recv_cnt++; 1564 else 1565 antcomb->alt_recv_cnt++; 1566 } 1567 1568 /* Short scan check */ 1569 if (antcomb->scan && antcomb->alt_good) { 1570 if (time_after(jiffies, antcomb->scan_start_time + 1571 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 1572 short_scan = true; 1573 else 1574 if (antcomb->total_pkt_count == 1575 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { 1576 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1577 antcomb->total_pkt_count); 1578 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 1579 short_scan = true; 1580 } 1581 } 1582 1583 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 1584 rs->rs_moreaggr) && !short_scan) 1585 return; 1586 1587 if (antcomb->total_pkt_count) { 1588 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1589 antcomb->total_pkt_count); 1590 main_rssi_avg = (antcomb->main_total_rssi / 1591 antcomb->total_pkt_count); 1592 alt_rssi_avg = (antcomb->alt_total_rssi / 1593 antcomb->total_pkt_count); 1594 } 1595 1596 1597 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1598 curr_alt_set = div_ant_conf.alt_lna_conf; 1599 curr_main_set = div_ant_conf.main_lna_conf; 1600 1601 antcomb->count++; 1602 1603 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 1604 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1605 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 1606 main_rssi_avg); 1607 antcomb->alt_good = true; 1608 } else { 1609 antcomb->alt_good = false; 1610 } 1611 1612 antcomb->count = 0; 1613 antcomb->scan = true; 1614 antcomb->scan_not_start = true; 1615 } 1616 1617 if (!antcomb->scan) { 1618 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, 1619 alt_ratio, curr_main_set, curr_alt_set, 1620 alt_rssi_avg, main_rssi_avg)) { 1621 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1622 /* Switch main and alt LNA */ 1623 div_ant_conf.main_lna_conf = 1624 ATH_ANT_DIV_COMB_LNA2; 1625 div_ant_conf.alt_lna_conf = 1626 ATH_ANT_DIV_COMB_LNA1; 1627 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { 1628 div_ant_conf.main_lna_conf = 1629 ATH_ANT_DIV_COMB_LNA1; 1630 div_ant_conf.alt_lna_conf = 1631 ATH_ANT_DIV_COMB_LNA2; 1632 } 1633 1634 goto div_comb_done; 1635 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && 1636 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { 1637 /* Set alt to another LNA */ 1638 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) 1639 div_ant_conf.alt_lna_conf = 1640 ATH_ANT_DIV_COMB_LNA1; 1641 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) 1642 div_ant_conf.alt_lna_conf = 1643 ATH_ANT_DIV_COMB_LNA2; 1644 1645 goto div_comb_done; 1646 } 1647 1648 if ((alt_rssi_avg < (main_rssi_avg + 1649 div_ant_conf.lna1_lna2_delta))) 1650 goto div_comb_done; 1651 } 1652 1653 if (!antcomb->scan_not_start) { 1654 switch (curr_alt_set) { 1655 case ATH_ANT_DIV_COMB_LNA2: 1656 antcomb->rssi_lna2 = alt_rssi_avg; 1657 antcomb->rssi_lna1 = main_rssi_avg; 1658 antcomb->scan = true; 1659 /* set to A+B */ 1660 div_ant_conf.main_lna_conf = 1661 ATH_ANT_DIV_COMB_LNA1; 1662 div_ant_conf.alt_lna_conf = 1663 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1664 break; 1665 case ATH_ANT_DIV_COMB_LNA1: 1666 antcomb->rssi_lna1 = alt_rssi_avg; 1667 antcomb->rssi_lna2 = main_rssi_avg; 1668 antcomb->scan = true; 1669 /* set to A+B */ 1670 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1671 div_ant_conf.alt_lna_conf = 1672 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1673 break; 1674 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: 1675 antcomb->rssi_add = alt_rssi_avg; 1676 antcomb->scan = true; 1677 /* set to A-B */ 1678 div_ant_conf.alt_lna_conf = 1679 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1680 break; 1681 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: 1682 antcomb->rssi_sub = alt_rssi_avg; 1683 antcomb->scan = false; 1684 if (antcomb->rssi_lna2 > 1685 (antcomb->rssi_lna1 + 1686 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 1687 /* use LNA2 as main LNA */ 1688 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 1689 (antcomb->rssi_add > antcomb->rssi_sub)) { 1690 /* set to A+B */ 1691 div_ant_conf.main_lna_conf = 1692 ATH_ANT_DIV_COMB_LNA2; 1693 div_ant_conf.alt_lna_conf = 1694 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1695 } else if (antcomb->rssi_sub > 1696 antcomb->rssi_lna1) { 1697 /* set to A-B */ 1698 div_ant_conf.main_lna_conf = 1699 ATH_ANT_DIV_COMB_LNA2; 1700 div_ant_conf.alt_lna_conf = 1701 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1702 } else { 1703 /* set to LNA1 */ 1704 div_ant_conf.main_lna_conf = 1705 ATH_ANT_DIV_COMB_LNA2; 1706 div_ant_conf.alt_lna_conf = 1707 ATH_ANT_DIV_COMB_LNA1; 1708 } 1709 } else { 1710 /* use LNA1 as main LNA */ 1711 if ((antcomb->rssi_add > antcomb->rssi_lna2) && 1712 (antcomb->rssi_add > antcomb->rssi_sub)) { 1713 /* set to A+B */ 1714 div_ant_conf.main_lna_conf = 1715 ATH_ANT_DIV_COMB_LNA1; 1716 div_ant_conf.alt_lna_conf = 1717 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1718 } else if (antcomb->rssi_sub > 1719 antcomb->rssi_lna1) { 1720 /* set to A-B */ 1721 div_ant_conf.main_lna_conf = 1722 ATH_ANT_DIV_COMB_LNA1; 1723 div_ant_conf.alt_lna_conf = 1724 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1725 } else { 1726 /* set to LNA2 */ 1727 div_ant_conf.main_lna_conf = 1728 ATH_ANT_DIV_COMB_LNA1; 1729 div_ant_conf.alt_lna_conf = 1730 ATH_ANT_DIV_COMB_LNA2; 1731 } 1732 } 1733 break; 1734 default: 1735 break; 1736 } 1737 } else { 1738 if (!antcomb->alt_good) { 1739 antcomb->scan_not_start = false; 1740 /* Set alt to another LNA */ 1741 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 1742 div_ant_conf.main_lna_conf = 1743 ATH_ANT_DIV_COMB_LNA2; 1744 div_ant_conf.alt_lna_conf = 1745 ATH_ANT_DIV_COMB_LNA1; 1746 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 1747 div_ant_conf.main_lna_conf = 1748 ATH_ANT_DIV_COMB_LNA1; 1749 div_ant_conf.alt_lna_conf = 1750 ATH_ANT_DIV_COMB_LNA2; 1751 } 1752 goto div_comb_done; 1753 } 1754 } 1755 1756 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, 1757 main_rssi_avg, alt_rssi_avg, 1758 alt_ratio); 1759 1760 antcomb->quick_scan_cnt++; 1761 1762 div_comb_done: 1763 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); 1764 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1765 1766 antcomb->scan_start_time = jiffies; 1767 antcomb->total_pkt_count = 0; 1768 antcomb->main_total_rssi = 0; 1769 antcomb->alt_total_rssi = 0; 1770 antcomb->main_recv_cnt = 0; 1771 antcomb->alt_recv_cnt = 0; 1772 } 1773 1774 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1775 { 1776 struct ath_buf *bf; 1777 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1778 struct ieee80211_rx_status *rxs; 1779 struct ath_hw *ah = sc->sc_ah; 1780 struct ath_common *common = ath9k_hw_common(ah); 1781 struct ieee80211_hw *hw = sc->hw; 1782 struct ieee80211_hdr *hdr; 1783 int retval; 1784 bool decrypt_error = false; 1785 struct ath_rx_status rs; 1786 enum ath9k_rx_qtype qtype; 1787 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1788 int dma_type; 1789 u8 rx_status_len = ah->caps.rx_status_len; 1790 u64 tsf = 0; 1791 u32 tsf_lower = 0; 1792 unsigned long flags; 1793 1794 if (edma) 1795 dma_type = DMA_BIDIRECTIONAL; 1796 else 1797 dma_type = DMA_FROM_DEVICE; 1798 1799 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1800 spin_lock_bh(&sc->rx.rxbuflock); 1801 1802 tsf = ath9k_hw_gettsf64(ah); 1803 tsf_lower = tsf & 0xffffffff; 1804 1805 do { 1806 /* If handling rx interrupt and flush is in progress => exit */ 1807 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1808 break; 1809 1810 memset(&rs, 0, sizeof(rs)); 1811 if (edma) 1812 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1813 else 1814 bf = ath_get_next_rx_buf(sc, &rs); 1815 1816 if (!bf) 1817 break; 1818 1819 skb = bf->bf_mpdu; 1820 if (!skb) 1821 continue; 1822 1823 /* 1824 * Take frame header from the first fragment and RX status from 1825 * the last one. 1826 */ 1827 if (sc->rx.frag) 1828 hdr_skb = sc->rx.frag; 1829 else 1830 hdr_skb = skb; 1831 1832 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1833 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1834 if (ieee80211_is_beacon(hdr->frame_control)) { 1835 RX_STAT_INC(rx_beacons); 1836 if (!is_zero_ether_addr(common->curbssid) && 1837 ether_addr_equal(hdr->addr3, common->curbssid)) 1838 rs.is_mybeacon = true; 1839 else 1840 rs.is_mybeacon = false; 1841 } 1842 else 1843 rs.is_mybeacon = false; 1844 1845 ath_debug_stat_rx(sc, &rs); 1846 1847 /* 1848 * If we're asked to flush receive queue, directly 1849 * chain it back at the queue without processing it. 1850 */ 1851 if (sc->sc_flags & SC_OP_RXFLUSH) { 1852 RX_STAT_INC(rx_drop_rxflush); 1853 goto requeue_drop_frag; 1854 } 1855 1856 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1857 1858 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1859 if (rs.rs_tstamp > tsf_lower && 1860 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1861 rxs->mactime -= 0x100000000ULL; 1862 1863 if (rs.rs_tstamp < tsf_lower && 1864 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1865 rxs->mactime += 0x100000000ULL; 1866 1867 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1868 rxs, &decrypt_error); 1869 if (retval) 1870 goto requeue_drop_frag; 1871 1872 if (rs.is_mybeacon) { 1873 sc->hw_busy_count = 0; 1874 ath_start_rx_poll(sc, 3); 1875 } 1876 /* Ensure we always have an skb to requeue once we are done 1877 * processing the current buffer's skb */ 1878 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1879 1880 /* If there is no memory we ignore the current RX'd frame, 1881 * tell hardware it can give us a new frame using the old 1882 * skb and put it at the tail of the sc->rx.rxbuf list for 1883 * processing. */ 1884 if (!requeue_skb) { 1885 RX_STAT_INC(rx_oom_err); 1886 goto requeue_drop_frag; 1887 } 1888 1889 /* Unmap the frame */ 1890 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1891 common->rx_bufsize, 1892 dma_type); 1893 1894 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1895 if (ah->caps.rx_status_len) 1896 skb_pull(skb, ah->caps.rx_status_len); 1897 1898 if (!rs.rs_more) 1899 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1900 rxs, decrypt_error); 1901 1902 /* We will now give hardware our shiny new allocated skb */ 1903 bf->bf_mpdu = requeue_skb; 1904 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1905 common->rx_bufsize, 1906 dma_type); 1907 if (unlikely(dma_mapping_error(sc->dev, 1908 bf->bf_buf_addr))) { 1909 dev_kfree_skb_any(requeue_skb); 1910 bf->bf_mpdu = NULL; 1911 bf->bf_buf_addr = 0; 1912 ath_err(common, "dma_mapping_error() on RX\n"); 1913 ieee80211_rx(hw, skb); 1914 break; 1915 } 1916 1917 if (rs.rs_more) { 1918 RX_STAT_INC(rx_frags); 1919 /* 1920 * rs_more indicates chained descriptors which can be 1921 * used to link buffers together for a sort of 1922 * scatter-gather operation. 1923 */ 1924 if (sc->rx.frag) { 1925 /* too many fragments - cannot handle frame */ 1926 dev_kfree_skb_any(sc->rx.frag); 1927 dev_kfree_skb_any(skb); 1928 RX_STAT_INC(rx_too_many_frags_err); 1929 skb = NULL; 1930 } 1931 sc->rx.frag = skb; 1932 goto requeue; 1933 } 1934 1935 if (sc->rx.frag) { 1936 int space = skb->len - skb_tailroom(hdr_skb); 1937 1938 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1939 dev_kfree_skb(skb); 1940 RX_STAT_INC(rx_oom_err); 1941 goto requeue_drop_frag; 1942 } 1943 1944 sc->rx.frag = NULL; 1945 1946 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1947 skb->len); 1948 dev_kfree_skb_any(skb); 1949 skb = hdr_skb; 1950 } 1951 1952 1953 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 1954 1955 /* 1956 * change the default rx antenna if rx diversity 1957 * chooses the other antenna 3 times in a row. 1958 */ 1959 if (sc->rx.defant != rs.rs_antenna) { 1960 if (++sc->rx.rxotherant >= 3) 1961 ath_setdefantenna(sc, rs.rs_antenna); 1962 } else { 1963 sc->rx.rxotherant = 0; 1964 } 1965 1966 } 1967 1968 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1969 skb_trim(skb, skb->len - 8); 1970 1971 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1972 1973 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1974 PS_WAIT_FOR_CAB | 1975 PS_WAIT_FOR_PSPOLL_DATA)) || 1976 ath9k_check_auto_sleep(sc)) 1977 ath_rx_ps(sc, skb, rs.is_mybeacon); 1978 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1979 1980 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1981 ath_ant_comb_scan(sc, &rs); 1982 1983 ieee80211_rx(hw, skb); 1984 1985 requeue_drop_frag: 1986 if (sc->rx.frag) { 1987 dev_kfree_skb_any(sc->rx.frag); 1988 sc->rx.frag = NULL; 1989 } 1990 requeue: 1991 if (edma) { 1992 list_add_tail(&bf->list, &sc->rx.rxbuf); 1993 ath_rx_edma_buf_link(sc, qtype); 1994 } else { 1995 list_move_tail(&bf->list, &sc->rx.rxbuf); 1996 ath_rx_buf_link(sc, bf); 1997 if (!flush) 1998 ath9k_hw_rxena(ah); 1999 } 2000 } while (1); 2001 2002 spin_unlock_bh(&sc->rx.rxbuflock); 2003 2004 if (!(ah->imask & ATH9K_INT_RXEOL)) { 2005 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 2006 ath9k_hw_set_interrupts(ah); 2007 } 2008 2009 return 0; 2010 } 2011