1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22 23 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 24 int mindelta, int main_rssi_avg, 25 int alt_rssi_avg, int pkt_count) 26 { 27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 28 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 30 } 31 32 static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, 33 int curr_main_set, int curr_alt_set, 34 int alt_rssi_avg, int main_rssi_avg) 35 { 36 bool result = false; 37 switch (div_group) { 38 case 0: 39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 40 result = true; 41 break; 42 case 1: 43 case 2: 44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && 45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && 46 (alt_rssi_avg >= (main_rssi_avg - 5))) || 47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && 48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && 49 (alt_rssi_avg >= (main_rssi_avg - 2)))) && 50 (alt_rssi_avg >= 4)) 51 result = true; 52 else 53 result = false; 54 break; 55 } 56 57 return result; 58 } 59 60 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 61 { 62 return sc->ps_enabled && 63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 64 } 65 66 /* 67 * Setup and link descriptors. 68 * 69 * 11N: we can no longer afford to self link the last descriptor. 70 * MAC acknowledges BA status as long as it copies frames to host 71 * buffer (or rx fifo). This can incorrectly acknowledge packets 72 * to a sender if last desc is self-linked. 73 */ 74 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 75 { 76 struct ath_hw *ah = sc->sc_ah; 77 struct ath_common *common = ath9k_hw_common(ah); 78 struct ath_desc *ds; 79 struct sk_buff *skb; 80 81 ATH_RXBUF_RESET(bf); 82 83 ds = bf->bf_desc; 84 ds->ds_link = 0; /* link to null */ 85 ds->ds_data = bf->bf_buf_addr; 86 87 /* virtual addr of the beginning of the buffer. */ 88 skb = bf->bf_mpdu; 89 BUG_ON(skb == NULL); 90 ds->ds_vdata = skb->data; 91 92 /* 93 * setup rx descriptors. The rx_bufsize here tells the hardware 94 * how much data it can DMA to us and that we are prepared 95 * to process 96 */ 97 ath9k_hw_setuprxdesc(ah, ds, 98 common->rx_bufsize, 99 0); 100 101 if (sc->rx.rxlink == NULL) 102 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 103 else 104 *sc->rx.rxlink = bf->bf_daddr; 105 106 sc->rx.rxlink = &ds->ds_link; 107 } 108 109 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 110 { 111 /* XXX block beacon interrupts */ 112 ath9k_hw_setantenna(sc->sc_ah, antenna); 113 sc->rx.defant = antenna; 114 sc->rx.rxotherant = 0; 115 } 116 117 static void ath_opmode_init(struct ath_softc *sc) 118 { 119 struct ath_hw *ah = sc->sc_ah; 120 struct ath_common *common = ath9k_hw_common(ah); 121 122 u32 rfilt, mfilt[2]; 123 124 /* configure rx filter */ 125 rfilt = ath_calcrxfilter(sc); 126 ath9k_hw_setrxfilter(ah, rfilt); 127 128 /* configure bssid mask */ 129 ath_hw_setbssidmask(common); 130 131 /* configure operational mode */ 132 ath9k_hw_setopmode(ah); 133 134 /* calculate and install multicast filter */ 135 mfilt[0] = mfilt[1] = ~0; 136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 137 } 138 139 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 140 enum ath9k_rx_qtype qtype) 141 { 142 struct ath_hw *ah = sc->sc_ah; 143 struct ath_rx_edma *rx_edma; 144 struct sk_buff *skb; 145 struct ath_buf *bf; 146 147 rx_edma = &sc->rx.rx_edma[qtype]; 148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 149 return false; 150 151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 152 list_del_init(&bf->list); 153 154 skb = bf->bf_mpdu; 155 156 ATH_RXBUF_RESET(bf); 157 memset(skb->data, 0, ah->caps.rx_status_len); 158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 159 ah->caps.rx_status_len, DMA_TO_DEVICE); 160 161 SKB_CB_ATHBUF(skb) = bf; 162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 163 skb_queue_tail(&rx_edma->rx_fifo, skb); 164 165 return true; 166 } 167 168 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 169 enum ath9k_rx_qtype qtype, int size) 170 { 171 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 172 struct ath_buf *bf, *tbf; 173 174 if (list_empty(&sc->rx.rxbuf)) { 175 ath_dbg(common, QUEUE, "No free rx buf available\n"); 176 return; 177 } 178 179 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 180 if (!ath_rx_edma_buf_link(sc, qtype)) 181 break; 182 183 } 184 185 static void ath_rx_remove_buffer(struct ath_softc *sc, 186 enum ath9k_rx_qtype qtype) 187 { 188 struct ath_buf *bf; 189 struct ath_rx_edma *rx_edma; 190 struct sk_buff *skb; 191 192 rx_edma = &sc->rx.rx_edma[qtype]; 193 194 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 195 bf = SKB_CB_ATHBUF(skb); 196 BUG_ON(!bf); 197 list_add_tail(&bf->list, &sc->rx.rxbuf); 198 } 199 } 200 201 static void ath_rx_edma_cleanup(struct ath_softc *sc) 202 { 203 struct ath_hw *ah = sc->sc_ah; 204 struct ath_common *common = ath9k_hw_common(ah); 205 struct ath_buf *bf; 206 207 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 208 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 209 210 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 211 if (bf->bf_mpdu) { 212 dma_unmap_single(sc->dev, bf->bf_buf_addr, 213 common->rx_bufsize, 214 DMA_BIDIRECTIONAL); 215 dev_kfree_skb_any(bf->bf_mpdu); 216 bf->bf_buf_addr = 0; 217 bf->bf_mpdu = NULL; 218 } 219 } 220 221 INIT_LIST_HEAD(&sc->rx.rxbuf); 222 223 kfree(sc->rx.rx_bufptr); 224 sc->rx.rx_bufptr = NULL; 225 } 226 227 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 228 { 229 skb_queue_head_init(&rx_edma->rx_fifo); 230 rx_edma->rx_fifo_hwsize = size; 231 } 232 233 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 234 { 235 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 236 struct ath_hw *ah = sc->sc_ah; 237 struct sk_buff *skb; 238 struct ath_buf *bf; 239 int error = 0, i; 240 u32 size; 241 242 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 243 ah->caps.rx_status_len); 244 245 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 246 ah->caps.rx_lp_qdepth); 247 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 248 ah->caps.rx_hp_qdepth); 249 250 size = sizeof(struct ath_buf) * nbufs; 251 bf = kzalloc(size, GFP_KERNEL); 252 if (!bf) 253 return -ENOMEM; 254 255 INIT_LIST_HEAD(&sc->rx.rxbuf); 256 sc->rx.rx_bufptr = bf; 257 258 for (i = 0; i < nbufs; i++, bf++) { 259 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 260 if (!skb) { 261 error = -ENOMEM; 262 goto rx_init_fail; 263 } 264 265 memset(skb->data, 0, common->rx_bufsize); 266 bf->bf_mpdu = skb; 267 268 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 269 common->rx_bufsize, 270 DMA_BIDIRECTIONAL); 271 if (unlikely(dma_mapping_error(sc->dev, 272 bf->bf_buf_addr))) { 273 dev_kfree_skb_any(skb); 274 bf->bf_mpdu = NULL; 275 bf->bf_buf_addr = 0; 276 ath_err(common, 277 "dma_mapping_error() on RX init\n"); 278 error = -ENOMEM; 279 goto rx_init_fail; 280 } 281 282 list_add_tail(&bf->list, &sc->rx.rxbuf); 283 } 284 285 return 0; 286 287 rx_init_fail: 288 ath_rx_edma_cleanup(sc); 289 return error; 290 } 291 292 static void ath_edma_start_recv(struct ath_softc *sc) 293 { 294 spin_lock_bh(&sc->rx.rxbuflock); 295 296 ath9k_hw_rxena(sc->sc_ah); 297 298 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 299 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 300 301 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 302 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 303 304 ath_opmode_init(sc); 305 306 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 307 308 spin_unlock_bh(&sc->rx.rxbuflock); 309 } 310 311 static void ath_edma_stop_recv(struct ath_softc *sc) 312 { 313 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 314 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 315 } 316 317 int ath_rx_init(struct ath_softc *sc, int nbufs) 318 { 319 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 320 struct sk_buff *skb; 321 struct ath_buf *bf; 322 int error = 0; 323 324 spin_lock_init(&sc->sc_pcu_lock); 325 sc->sc_flags &= ~SC_OP_RXFLUSH; 326 spin_lock_init(&sc->rx.rxbuflock); 327 328 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 329 sc->sc_ah->caps.rx_status_len; 330 331 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 332 return ath_rx_edma_init(sc, nbufs); 333 } else { 334 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 335 common->cachelsz, common->rx_bufsize); 336 337 /* Initialize rx descriptors */ 338 339 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 340 "rx", nbufs, 1, 0); 341 if (error != 0) { 342 ath_err(common, 343 "failed to allocate rx descriptors: %d\n", 344 error); 345 goto err; 346 } 347 348 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 349 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 350 GFP_KERNEL); 351 if (skb == NULL) { 352 error = -ENOMEM; 353 goto err; 354 } 355 356 bf->bf_mpdu = skb; 357 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 358 common->rx_bufsize, 359 DMA_FROM_DEVICE); 360 if (unlikely(dma_mapping_error(sc->dev, 361 bf->bf_buf_addr))) { 362 dev_kfree_skb_any(skb); 363 bf->bf_mpdu = NULL; 364 bf->bf_buf_addr = 0; 365 ath_err(common, 366 "dma_mapping_error() on RX init\n"); 367 error = -ENOMEM; 368 goto err; 369 } 370 } 371 sc->rx.rxlink = NULL; 372 } 373 374 err: 375 if (error) 376 ath_rx_cleanup(sc); 377 378 return error; 379 } 380 381 void ath_rx_cleanup(struct ath_softc *sc) 382 { 383 struct ath_hw *ah = sc->sc_ah; 384 struct ath_common *common = ath9k_hw_common(ah); 385 struct sk_buff *skb; 386 struct ath_buf *bf; 387 388 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 389 ath_rx_edma_cleanup(sc); 390 return; 391 } else { 392 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 393 skb = bf->bf_mpdu; 394 if (skb) { 395 dma_unmap_single(sc->dev, bf->bf_buf_addr, 396 common->rx_bufsize, 397 DMA_FROM_DEVICE); 398 dev_kfree_skb(skb); 399 bf->bf_buf_addr = 0; 400 bf->bf_mpdu = NULL; 401 } 402 } 403 404 if (sc->rx.rxdma.dd_desc_len != 0) 405 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 406 } 407 } 408 409 /* 410 * Calculate the receive filter according to the 411 * operating mode and state: 412 * 413 * o always accept unicast, broadcast, and multicast traffic 414 * o maintain current state of phy error reception (the hal 415 * may enable phy error frames for noise immunity work) 416 * o probe request frames are accepted only when operating in 417 * hostap, adhoc, or monitor modes 418 * o enable promiscuous mode according to the interface state 419 * o accept beacons: 420 * - when operating in adhoc mode so the 802.11 layer creates 421 * node table entries for peers, 422 * - when operating in station mode for collecting rssi data when 423 * the station is otherwise quiet, or 424 * - when operating as a repeater so we see repeater-sta beacons 425 * - when scanning 426 */ 427 428 u32 ath_calcrxfilter(struct ath_softc *sc) 429 { 430 u32 rfilt; 431 432 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 433 | ATH9K_RX_FILTER_MCAST; 434 435 if (sc->rx.rxfilter & FIF_PROBE_REQ) 436 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 437 438 /* 439 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 440 * mode interface or when in monitor mode. AP mode does not need this 441 * since it receives all in-BSS frames anyway. 442 */ 443 if (sc->sc_ah->is_monitoring) 444 rfilt |= ATH9K_RX_FILTER_PROM; 445 446 if (sc->rx.rxfilter & FIF_CONTROL) 447 rfilt |= ATH9K_RX_FILTER_CONTROL; 448 449 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 450 (sc->nvifs <= 1) && 451 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 452 rfilt |= ATH9K_RX_FILTER_MYBEACON; 453 else 454 rfilt |= ATH9K_RX_FILTER_BEACON; 455 456 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 457 (sc->rx.rxfilter & FIF_PSPOLL)) 458 rfilt |= ATH9K_RX_FILTER_PSPOLL; 459 460 if (conf_is_ht(&sc->hw->conf)) 461 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 462 463 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 464 /* The following may also be needed for other older chips */ 465 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 466 rfilt |= ATH9K_RX_FILTER_PROM; 467 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 468 } 469 470 return rfilt; 471 472 } 473 474 int ath_startrecv(struct ath_softc *sc) 475 { 476 struct ath_hw *ah = sc->sc_ah; 477 struct ath_buf *bf, *tbf; 478 479 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 480 ath_edma_start_recv(sc); 481 return 0; 482 } 483 484 spin_lock_bh(&sc->rx.rxbuflock); 485 if (list_empty(&sc->rx.rxbuf)) 486 goto start_recv; 487 488 sc->rx.rxlink = NULL; 489 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 490 ath_rx_buf_link(sc, bf); 491 } 492 493 /* We could have deleted elements so the list may be empty now */ 494 if (list_empty(&sc->rx.rxbuf)) 495 goto start_recv; 496 497 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 498 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 499 ath9k_hw_rxena(ah); 500 501 start_recv: 502 ath_opmode_init(sc); 503 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 504 505 spin_unlock_bh(&sc->rx.rxbuflock); 506 507 return 0; 508 } 509 510 bool ath_stoprecv(struct ath_softc *sc) 511 { 512 struct ath_hw *ah = sc->sc_ah; 513 bool stopped, reset = false; 514 515 spin_lock_bh(&sc->rx.rxbuflock); 516 ath9k_hw_abortpcurecv(ah); 517 ath9k_hw_setrxfilter(ah, 0); 518 stopped = ath9k_hw_stopdmarecv(ah, &reset); 519 520 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 521 ath_edma_stop_recv(sc); 522 else 523 sc->rx.rxlink = NULL; 524 spin_unlock_bh(&sc->rx.rxbuflock); 525 526 if (!(ah->ah_flags & AH_UNPLUGGED) && 527 unlikely(!stopped)) { 528 ath_err(ath9k_hw_common(sc->sc_ah), 529 "Could not stop RX, we could be " 530 "confusing the DMA engine when we start RX up\n"); 531 ATH_DBG_WARN_ON_ONCE(!stopped); 532 } 533 return stopped && !reset; 534 } 535 536 void ath_flushrecv(struct ath_softc *sc) 537 { 538 sc->sc_flags |= SC_OP_RXFLUSH; 539 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 540 ath_rx_tasklet(sc, 1, true); 541 ath_rx_tasklet(sc, 1, false); 542 sc->sc_flags &= ~SC_OP_RXFLUSH; 543 } 544 545 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 546 { 547 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 548 struct ieee80211_mgmt *mgmt; 549 u8 *pos, *end, id, elen; 550 struct ieee80211_tim_ie *tim; 551 552 mgmt = (struct ieee80211_mgmt *)skb->data; 553 pos = mgmt->u.beacon.variable; 554 end = skb->data + skb->len; 555 556 while (pos + 2 < end) { 557 id = *pos++; 558 elen = *pos++; 559 if (pos + elen > end) 560 break; 561 562 if (id == WLAN_EID_TIM) { 563 if (elen < sizeof(*tim)) 564 break; 565 tim = (struct ieee80211_tim_ie *) pos; 566 if (tim->dtim_count != 0) 567 break; 568 return tim->bitmap_ctrl & 0x01; 569 } 570 571 pos += elen; 572 } 573 574 return false; 575 } 576 577 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 578 { 579 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 580 581 if (skb->len < 24 + 8 + 2 + 2) 582 return; 583 584 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 585 586 if (sc->ps_flags & PS_BEACON_SYNC) { 587 sc->ps_flags &= ~PS_BEACON_SYNC; 588 ath_dbg(common, PS, 589 "Reconfigure Beacon timers based on timestamp from the AP\n"); 590 ath_set_beacon(sc); 591 } 592 593 if (ath_beacon_dtim_pending_cab(skb)) { 594 /* 595 * Remain awake waiting for buffered broadcast/multicast 596 * frames. If the last broadcast/multicast frame is not 597 * received properly, the next beacon frame will work as 598 * a backup trigger for returning into NETWORK SLEEP state, 599 * so we are waiting for it as well. 600 */ 601 ath_dbg(common, PS, 602 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 603 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 604 return; 605 } 606 607 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 608 /* 609 * This can happen if a broadcast frame is dropped or the AP 610 * fails to send a frame indicating that all CAB frames have 611 * been delivered. 612 */ 613 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 614 ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 615 } 616 } 617 618 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 619 { 620 struct ieee80211_hdr *hdr; 621 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 622 623 hdr = (struct ieee80211_hdr *)skb->data; 624 625 /* Process Beacon and CAB receive in PS state */ 626 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 627 && mybeacon) 628 ath_rx_ps_beacon(sc, skb); 629 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 630 (ieee80211_is_data(hdr->frame_control) || 631 ieee80211_is_action(hdr->frame_control)) && 632 is_multicast_ether_addr(hdr->addr1) && 633 !ieee80211_has_moredata(hdr->frame_control)) { 634 /* 635 * No more broadcast/multicast frames to be received at this 636 * point. 637 */ 638 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 639 ath_dbg(common, PS, 640 "All PS CAB frames received, back to sleep\n"); 641 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 642 !is_multicast_ether_addr(hdr->addr1) && 643 !ieee80211_has_morefrags(hdr->frame_control)) { 644 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 645 ath_dbg(common, PS, 646 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 647 sc->ps_flags & (PS_WAIT_FOR_BEACON | 648 PS_WAIT_FOR_CAB | 649 PS_WAIT_FOR_PSPOLL_DATA | 650 PS_WAIT_FOR_TX_ACK)); 651 } 652 } 653 654 static bool ath_edma_get_buffers(struct ath_softc *sc, 655 enum ath9k_rx_qtype qtype, 656 struct ath_rx_status *rs, 657 struct ath_buf **dest) 658 { 659 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 660 struct ath_hw *ah = sc->sc_ah; 661 struct ath_common *common = ath9k_hw_common(ah); 662 struct sk_buff *skb; 663 struct ath_buf *bf; 664 int ret; 665 666 skb = skb_peek(&rx_edma->rx_fifo); 667 if (!skb) 668 return false; 669 670 bf = SKB_CB_ATHBUF(skb); 671 BUG_ON(!bf); 672 673 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 674 common->rx_bufsize, DMA_FROM_DEVICE); 675 676 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 677 if (ret == -EINPROGRESS) { 678 /*let device gain the buffer again*/ 679 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 680 common->rx_bufsize, DMA_FROM_DEVICE); 681 return false; 682 } 683 684 __skb_unlink(skb, &rx_edma->rx_fifo); 685 if (ret == -EINVAL) { 686 /* corrupt descriptor, skip this one and the following one */ 687 list_add_tail(&bf->list, &sc->rx.rxbuf); 688 ath_rx_edma_buf_link(sc, qtype); 689 690 skb = skb_peek(&rx_edma->rx_fifo); 691 if (skb) { 692 bf = SKB_CB_ATHBUF(skb); 693 BUG_ON(!bf); 694 695 __skb_unlink(skb, &rx_edma->rx_fifo); 696 list_add_tail(&bf->list, &sc->rx.rxbuf); 697 ath_rx_edma_buf_link(sc, qtype); 698 } else { 699 bf = NULL; 700 } 701 } 702 703 *dest = bf; 704 return true; 705 } 706 707 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 708 struct ath_rx_status *rs, 709 enum ath9k_rx_qtype qtype) 710 { 711 struct ath_buf *bf = NULL; 712 713 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 714 if (!bf) 715 continue; 716 717 return bf; 718 } 719 return NULL; 720 } 721 722 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 723 struct ath_rx_status *rs) 724 { 725 struct ath_hw *ah = sc->sc_ah; 726 struct ath_common *common = ath9k_hw_common(ah); 727 struct ath_desc *ds; 728 struct ath_buf *bf; 729 int ret; 730 731 if (list_empty(&sc->rx.rxbuf)) { 732 sc->rx.rxlink = NULL; 733 return NULL; 734 } 735 736 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 737 ds = bf->bf_desc; 738 739 /* 740 * Must provide the virtual address of the current 741 * descriptor, the physical address, and the virtual 742 * address of the next descriptor in the h/w chain. 743 * This allows the HAL to look ahead to see if the 744 * hardware is done with a descriptor by checking the 745 * done bit in the following descriptor and the address 746 * of the current descriptor the DMA engine is working 747 * on. All this is necessary because of our use of 748 * a self-linked list to avoid rx overruns. 749 */ 750 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 751 if (ret == -EINPROGRESS) { 752 struct ath_rx_status trs; 753 struct ath_buf *tbf; 754 struct ath_desc *tds; 755 756 memset(&trs, 0, sizeof(trs)); 757 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 758 sc->rx.rxlink = NULL; 759 return NULL; 760 } 761 762 tbf = list_entry(bf->list.next, struct ath_buf, list); 763 764 /* 765 * On some hardware the descriptor status words could 766 * get corrupted, including the done bit. Because of 767 * this, check if the next descriptor's done bit is 768 * set or not. 769 * 770 * If the next descriptor's done bit is set, the current 771 * descriptor has been corrupted. Force s/w to discard 772 * this descriptor and continue... 773 */ 774 775 tds = tbf->bf_desc; 776 ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 777 if (ret == -EINPROGRESS) 778 return NULL; 779 } 780 781 if (!bf->bf_mpdu) 782 return bf; 783 784 /* 785 * Synchronize the DMA transfer with CPU before 786 * 1. accessing the frame 787 * 2. requeueing the same buffer to h/w 788 */ 789 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 790 common->rx_bufsize, 791 DMA_FROM_DEVICE); 792 793 return bf; 794 } 795 796 /* Assumes you've already done the endian to CPU conversion */ 797 static bool ath9k_rx_accept(struct ath_common *common, 798 struct ieee80211_hdr *hdr, 799 struct ieee80211_rx_status *rxs, 800 struct ath_rx_status *rx_stats, 801 bool *decrypt_error) 802 { 803 struct ath_softc *sc = (struct ath_softc *) common->priv; 804 bool is_mc, is_valid_tkip, strip_mic, mic_error; 805 struct ath_hw *ah = common->ah; 806 __le16 fc; 807 u8 rx_status_len = ah->caps.rx_status_len; 808 809 fc = hdr->frame_control; 810 811 is_mc = !!is_multicast_ether_addr(hdr->addr1); 812 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 813 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 814 strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 815 ieee80211_has_protected(fc) && 816 !(rx_stats->rs_status & 817 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 818 ATH9K_RXERR_KEYMISS)); 819 820 /* 821 * Key miss events are only relevant for pairwise keys where the 822 * descriptor does contain a valid key index. This has been observed 823 * mostly with CCMP encryption. 824 */ 825 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID) 826 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 827 828 if (!rx_stats->rs_datalen) { 829 RX_STAT_INC(rx_len_err); 830 return false; 831 } 832 833 /* 834 * rs_status follows rs_datalen so if rs_datalen is too large 835 * we can take a hint that hardware corrupted it, so ignore 836 * those frames. 837 */ 838 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { 839 RX_STAT_INC(rx_len_err); 840 return false; 841 } 842 843 /* Only use error bits from the last fragment */ 844 if (rx_stats->rs_more) 845 return true; 846 847 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 848 !ieee80211_has_morefrags(fc) && 849 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 850 (rx_stats->rs_status & ATH9K_RXERR_MIC); 851 852 /* 853 * The rx_stats->rs_status will not be set until the end of the 854 * chained descriptors so it can be ignored if rs_more is set. The 855 * rs_more will be false at the last element of the chained 856 * descriptors. 857 */ 858 if (rx_stats->rs_status != 0) { 859 u8 status_mask; 860 861 if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 862 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 863 mic_error = false; 864 } 865 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 866 return false; 867 868 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 869 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 870 *decrypt_error = true; 871 mic_error = false; 872 } 873 874 /* 875 * Reject error frames with the exception of 876 * decryption and MIC failures. For monitor mode, 877 * we also ignore the CRC error. 878 */ 879 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 880 ATH9K_RXERR_KEYMISS; 881 882 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 883 status_mask |= ATH9K_RXERR_CRC; 884 885 if (rx_stats->rs_status & ~status_mask) 886 return false; 887 } 888 889 /* 890 * For unicast frames the MIC error bit can have false positives, 891 * so all MIC error reports need to be validated in software. 892 * False negatives are not common, so skip software verification 893 * if the hardware considers the MIC valid. 894 */ 895 if (strip_mic) 896 rxs->flag |= RX_FLAG_MMIC_STRIPPED; 897 else if (is_mc && mic_error) 898 rxs->flag |= RX_FLAG_MMIC_ERROR; 899 900 return true; 901 } 902 903 static int ath9k_process_rate(struct ath_common *common, 904 struct ieee80211_hw *hw, 905 struct ath_rx_status *rx_stats, 906 struct ieee80211_rx_status *rxs) 907 { 908 struct ieee80211_supported_band *sband; 909 enum ieee80211_band band; 910 unsigned int i = 0; 911 struct ath_softc __maybe_unused *sc = common->priv; 912 913 band = hw->conf.channel->band; 914 sband = hw->wiphy->bands[band]; 915 916 if (rx_stats->rs_rate & 0x80) { 917 /* HT rate */ 918 rxs->flag |= RX_FLAG_HT; 919 if (rx_stats->rs_flags & ATH9K_RX_2040) 920 rxs->flag |= RX_FLAG_40MHZ; 921 if (rx_stats->rs_flags & ATH9K_RX_GI) 922 rxs->flag |= RX_FLAG_SHORT_GI; 923 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 924 return 0; 925 } 926 927 for (i = 0; i < sband->n_bitrates; i++) { 928 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 929 rxs->rate_idx = i; 930 return 0; 931 } 932 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 933 rxs->flag |= RX_FLAG_SHORTPRE; 934 rxs->rate_idx = i; 935 return 0; 936 } 937 } 938 939 /* 940 * No valid hardware bitrate found -- we should not get here 941 * because hardware has already validated this frame as OK. 942 */ 943 ath_dbg(common, ANY, 944 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 945 rx_stats->rs_rate); 946 RX_STAT_INC(rx_rate_err); 947 return -EINVAL; 948 } 949 950 static void ath9k_process_rssi(struct ath_common *common, 951 struct ieee80211_hw *hw, 952 struct ieee80211_hdr *hdr, 953 struct ath_rx_status *rx_stats) 954 { 955 struct ath_softc *sc = hw->priv; 956 struct ath_hw *ah = common->ah; 957 int last_rssi; 958 int rssi = rx_stats->rs_rssi; 959 960 if (!rx_stats->is_mybeacon || 961 ((ah->opmode != NL80211_IFTYPE_STATION) && 962 (ah->opmode != NL80211_IFTYPE_ADHOC))) 963 return; 964 965 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 966 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 967 968 last_rssi = sc->last_rssi; 969 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 970 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 971 if (rssi < 0) 972 rssi = 0; 973 974 /* Update Beacon RSSI, this is used by ANI. */ 975 ah->stats.avgbrssi = rssi; 976 } 977 978 /* 979 * For Decrypt or Demic errors, we only mark packet status here and always push 980 * up the frame up to let mac80211 handle the actual error case, be it no 981 * decryption key or real decryption error. This let us keep statistics there. 982 */ 983 static int ath9k_rx_skb_preprocess(struct ath_common *common, 984 struct ieee80211_hw *hw, 985 struct ieee80211_hdr *hdr, 986 struct ath_rx_status *rx_stats, 987 struct ieee80211_rx_status *rx_status, 988 bool *decrypt_error) 989 { 990 struct ath_hw *ah = common->ah; 991 992 /* 993 * everything but the rate is checked here, the rate check is done 994 * separately to avoid doing two lookups for a rate for each frame. 995 */ 996 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 997 return -EINVAL; 998 999 /* Only use status info from the last fragment */ 1000 if (rx_stats->rs_more) 1001 return 0; 1002 1003 ath9k_process_rssi(common, hw, hdr, rx_stats); 1004 1005 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1006 return -EINVAL; 1007 1008 rx_status->band = hw->conf.channel->band; 1009 rx_status->freq = hw->conf.channel->center_freq; 1010 rx_status->signal = ah->noise + rx_stats->rs_rssi; 1011 rx_status->antenna = rx_stats->rs_antenna; 1012 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 1013 if (rx_stats->rs_moreaggr) 1014 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1015 1016 return 0; 1017 } 1018 1019 static void ath9k_rx_skb_postprocess(struct ath_common *common, 1020 struct sk_buff *skb, 1021 struct ath_rx_status *rx_stats, 1022 struct ieee80211_rx_status *rxs, 1023 bool decrypt_error) 1024 { 1025 struct ath_hw *ah = common->ah; 1026 struct ieee80211_hdr *hdr; 1027 int hdrlen, padpos, padsize; 1028 u8 keyix; 1029 __le16 fc; 1030 1031 /* see if any padding is done by the hw and remove it */ 1032 hdr = (struct ieee80211_hdr *) skb->data; 1033 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1034 fc = hdr->frame_control; 1035 padpos = ath9k_cmn_padpos(hdr->frame_control); 1036 1037 /* The MAC header is padded to have 32-bit boundary if the 1038 * packet payload is non-zero. The general calculation for 1039 * padsize would take into account odd header lengths: 1040 * padsize = (4 - padpos % 4) % 4; However, since only 1041 * even-length headers are used, padding can only be 0 or 2 1042 * bytes and we can optimize this a bit. In addition, we must 1043 * not try to remove padding from short control frames that do 1044 * not have payload. */ 1045 padsize = padpos & 3; 1046 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1047 memmove(skb->data + padsize, skb->data, padpos); 1048 skb_pull(skb, padsize); 1049 } 1050 1051 keyix = rx_stats->rs_keyix; 1052 1053 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1054 ieee80211_has_protected(fc)) { 1055 rxs->flag |= RX_FLAG_DECRYPTED; 1056 } else if (ieee80211_has_protected(fc) 1057 && !decrypt_error && skb->len >= hdrlen + 4) { 1058 keyix = skb->data[hdrlen + 3] >> 6; 1059 1060 if (test_bit(keyix, common->keymap)) 1061 rxs->flag |= RX_FLAG_DECRYPTED; 1062 } 1063 if (ah->sw_mgmt_crypto && 1064 (rxs->flag & RX_FLAG_DECRYPTED) && 1065 ieee80211_is_mgmt(fc)) 1066 /* Use software decrypt for management frames. */ 1067 rxs->flag &= ~RX_FLAG_DECRYPTED; 1068 } 1069 1070 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, 1071 struct ath_hw_antcomb_conf ant_conf, 1072 int main_rssi_avg) 1073 { 1074 antcomb->quick_scan_cnt = 0; 1075 1076 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) 1077 antcomb->rssi_lna2 = main_rssi_avg; 1078 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) 1079 antcomb->rssi_lna1 = main_rssi_avg; 1080 1081 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { 1082 case 0x10: /* LNA2 A-B */ 1083 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1084 antcomb->first_quick_scan_conf = 1085 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1086 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1087 break; 1088 case 0x20: /* LNA1 A-B */ 1089 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1090 antcomb->first_quick_scan_conf = 1091 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1092 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1093 break; 1094 case 0x21: /* LNA1 LNA2 */ 1095 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; 1096 antcomb->first_quick_scan_conf = 1097 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1098 antcomb->second_quick_scan_conf = 1099 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1100 break; 1101 case 0x12: /* LNA2 LNA1 */ 1102 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; 1103 antcomb->first_quick_scan_conf = 1104 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1105 antcomb->second_quick_scan_conf = 1106 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1107 break; 1108 case 0x13: /* LNA2 A+B */ 1109 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1110 antcomb->first_quick_scan_conf = 1111 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1112 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1113 break; 1114 case 0x23: /* LNA1 A+B */ 1115 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1116 antcomb->first_quick_scan_conf = 1117 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1118 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1119 break; 1120 default: 1121 break; 1122 } 1123 } 1124 1125 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 1126 struct ath_hw_antcomb_conf *div_ant_conf, 1127 int main_rssi_avg, int alt_rssi_avg, 1128 int alt_ratio) 1129 { 1130 /* alt_good */ 1131 switch (antcomb->quick_scan_cnt) { 1132 case 0: 1133 /* set alt to main, and alt to first conf */ 1134 div_ant_conf->main_lna_conf = antcomb->main_conf; 1135 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; 1136 break; 1137 case 1: 1138 /* set alt to main, and alt to first conf */ 1139 div_ant_conf->main_lna_conf = antcomb->main_conf; 1140 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; 1141 antcomb->rssi_first = main_rssi_avg; 1142 antcomb->rssi_second = alt_rssi_avg; 1143 1144 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1145 /* main is LNA1 */ 1146 if (ath_is_alt_ant_ratio_better(alt_ratio, 1147 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1148 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1149 main_rssi_avg, alt_rssi_avg, 1150 antcomb->total_pkt_count)) 1151 antcomb->first_ratio = true; 1152 else 1153 antcomb->first_ratio = false; 1154 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1155 if (ath_is_alt_ant_ratio_better(alt_ratio, 1156 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1157 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1158 main_rssi_avg, alt_rssi_avg, 1159 antcomb->total_pkt_count)) 1160 antcomb->first_ratio = true; 1161 else 1162 antcomb->first_ratio = false; 1163 } else { 1164 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1165 (alt_rssi_avg > main_rssi_avg + 1166 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1167 (alt_rssi_avg > main_rssi_avg)) && 1168 (antcomb->total_pkt_count > 50)) 1169 antcomb->first_ratio = true; 1170 else 1171 antcomb->first_ratio = false; 1172 } 1173 break; 1174 case 2: 1175 antcomb->alt_good = false; 1176 antcomb->scan_not_start = false; 1177 antcomb->scan = false; 1178 antcomb->rssi_first = main_rssi_avg; 1179 antcomb->rssi_third = alt_rssi_avg; 1180 1181 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 1182 antcomb->rssi_lna1 = alt_rssi_avg; 1183 else if (antcomb->second_quick_scan_conf == 1184 ATH_ANT_DIV_COMB_LNA2) 1185 antcomb->rssi_lna2 = alt_rssi_avg; 1186 else if (antcomb->second_quick_scan_conf == 1187 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 1188 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 1189 antcomb->rssi_lna2 = main_rssi_avg; 1190 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 1191 antcomb->rssi_lna1 = main_rssi_avg; 1192 } 1193 1194 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 1195 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 1196 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1197 else 1198 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 1199 1200 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1201 if (ath_is_alt_ant_ratio_better(alt_ratio, 1202 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1203 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1204 main_rssi_avg, alt_rssi_avg, 1205 antcomb->total_pkt_count)) 1206 antcomb->second_ratio = true; 1207 else 1208 antcomb->second_ratio = false; 1209 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1210 if (ath_is_alt_ant_ratio_better(alt_ratio, 1211 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1212 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1213 main_rssi_avg, alt_rssi_avg, 1214 antcomb->total_pkt_count)) 1215 antcomb->second_ratio = true; 1216 else 1217 antcomb->second_ratio = false; 1218 } else { 1219 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1220 (alt_rssi_avg > main_rssi_avg + 1221 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1222 (alt_rssi_avg > main_rssi_avg)) && 1223 (antcomb->total_pkt_count > 50)) 1224 antcomb->second_ratio = true; 1225 else 1226 antcomb->second_ratio = false; 1227 } 1228 1229 /* set alt to the conf with maximun ratio */ 1230 if (antcomb->first_ratio && antcomb->second_ratio) { 1231 if (antcomb->rssi_second > antcomb->rssi_third) { 1232 /* first alt*/ 1233 if ((antcomb->first_quick_scan_conf == 1234 ATH_ANT_DIV_COMB_LNA1) || 1235 (antcomb->first_quick_scan_conf == 1236 ATH_ANT_DIV_COMB_LNA2)) 1237 /* Set alt LNA1 or LNA2*/ 1238 if (div_ant_conf->main_lna_conf == 1239 ATH_ANT_DIV_COMB_LNA2) 1240 div_ant_conf->alt_lna_conf = 1241 ATH_ANT_DIV_COMB_LNA1; 1242 else 1243 div_ant_conf->alt_lna_conf = 1244 ATH_ANT_DIV_COMB_LNA2; 1245 else 1246 /* Set alt to A+B or A-B */ 1247 div_ant_conf->alt_lna_conf = 1248 antcomb->first_quick_scan_conf; 1249 } else if ((antcomb->second_quick_scan_conf == 1250 ATH_ANT_DIV_COMB_LNA1) || 1251 (antcomb->second_quick_scan_conf == 1252 ATH_ANT_DIV_COMB_LNA2)) { 1253 /* Set alt LNA1 or LNA2 */ 1254 if (div_ant_conf->main_lna_conf == 1255 ATH_ANT_DIV_COMB_LNA2) 1256 div_ant_conf->alt_lna_conf = 1257 ATH_ANT_DIV_COMB_LNA1; 1258 else 1259 div_ant_conf->alt_lna_conf = 1260 ATH_ANT_DIV_COMB_LNA2; 1261 } else { 1262 /* Set alt to A+B or A-B */ 1263 div_ant_conf->alt_lna_conf = 1264 antcomb->second_quick_scan_conf; 1265 } 1266 } else if (antcomb->first_ratio) { 1267 /* first alt */ 1268 if ((antcomb->first_quick_scan_conf == 1269 ATH_ANT_DIV_COMB_LNA1) || 1270 (antcomb->first_quick_scan_conf == 1271 ATH_ANT_DIV_COMB_LNA2)) 1272 /* Set alt LNA1 or LNA2 */ 1273 if (div_ant_conf->main_lna_conf == 1274 ATH_ANT_DIV_COMB_LNA2) 1275 div_ant_conf->alt_lna_conf = 1276 ATH_ANT_DIV_COMB_LNA1; 1277 else 1278 div_ant_conf->alt_lna_conf = 1279 ATH_ANT_DIV_COMB_LNA2; 1280 else 1281 /* Set alt to A+B or A-B */ 1282 div_ant_conf->alt_lna_conf = 1283 antcomb->first_quick_scan_conf; 1284 } else if (antcomb->second_ratio) { 1285 /* second alt */ 1286 if ((antcomb->second_quick_scan_conf == 1287 ATH_ANT_DIV_COMB_LNA1) || 1288 (antcomb->second_quick_scan_conf == 1289 ATH_ANT_DIV_COMB_LNA2)) 1290 /* Set alt LNA1 or LNA2 */ 1291 if (div_ant_conf->main_lna_conf == 1292 ATH_ANT_DIV_COMB_LNA2) 1293 div_ant_conf->alt_lna_conf = 1294 ATH_ANT_DIV_COMB_LNA1; 1295 else 1296 div_ant_conf->alt_lna_conf = 1297 ATH_ANT_DIV_COMB_LNA2; 1298 else 1299 /* Set alt to A+B or A-B */ 1300 div_ant_conf->alt_lna_conf = 1301 antcomb->second_quick_scan_conf; 1302 } else { 1303 /* main is largest */ 1304 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || 1305 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) 1306 /* Set alt LNA1 or LNA2 */ 1307 if (div_ant_conf->main_lna_conf == 1308 ATH_ANT_DIV_COMB_LNA2) 1309 div_ant_conf->alt_lna_conf = 1310 ATH_ANT_DIV_COMB_LNA1; 1311 else 1312 div_ant_conf->alt_lna_conf = 1313 ATH_ANT_DIV_COMB_LNA2; 1314 else 1315 /* Set alt to A+B or A-B */ 1316 div_ant_conf->alt_lna_conf = antcomb->main_conf; 1317 } 1318 break; 1319 default: 1320 break; 1321 } 1322 } 1323 1324 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, 1325 struct ath_ant_comb *antcomb, int alt_ratio) 1326 { 1327 if (ant_conf->div_group == 0) { 1328 /* Adjust the fast_div_bias based on main and alt lna conf */ 1329 switch ((ant_conf->main_lna_conf << 4) | 1330 ant_conf->alt_lna_conf) { 1331 case 0x01: /* A-B LNA2 */ 1332 ant_conf->fast_div_bias = 0x3b; 1333 break; 1334 case 0x02: /* A-B LNA1 */ 1335 ant_conf->fast_div_bias = 0x3d; 1336 break; 1337 case 0x03: /* A-B A+B */ 1338 ant_conf->fast_div_bias = 0x1; 1339 break; 1340 case 0x10: /* LNA2 A-B */ 1341 ant_conf->fast_div_bias = 0x7; 1342 break; 1343 case 0x12: /* LNA2 LNA1 */ 1344 ant_conf->fast_div_bias = 0x2; 1345 break; 1346 case 0x13: /* LNA2 A+B */ 1347 ant_conf->fast_div_bias = 0x7; 1348 break; 1349 case 0x20: /* LNA1 A-B */ 1350 ant_conf->fast_div_bias = 0x6; 1351 break; 1352 case 0x21: /* LNA1 LNA2 */ 1353 ant_conf->fast_div_bias = 0x0; 1354 break; 1355 case 0x23: /* LNA1 A+B */ 1356 ant_conf->fast_div_bias = 0x6; 1357 break; 1358 case 0x30: /* A+B A-B */ 1359 ant_conf->fast_div_bias = 0x1; 1360 break; 1361 case 0x31: /* A+B LNA2 */ 1362 ant_conf->fast_div_bias = 0x3b; 1363 break; 1364 case 0x32: /* A+B LNA1 */ 1365 ant_conf->fast_div_bias = 0x3d; 1366 break; 1367 default: 1368 break; 1369 } 1370 } else if (ant_conf->div_group == 1) { 1371 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1372 switch ((ant_conf->main_lna_conf << 4) | 1373 ant_conf->alt_lna_conf) { 1374 case 0x01: /* A-B LNA2 */ 1375 ant_conf->fast_div_bias = 0x1; 1376 ant_conf->main_gaintb = 0; 1377 ant_conf->alt_gaintb = 0; 1378 break; 1379 case 0x02: /* A-B LNA1 */ 1380 ant_conf->fast_div_bias = 0x1; 1381 ant_conf->main_gaintb = 0; 1382 ant_conf->alt_gaintb = 0; 1383 break; 1384 case 0x03: /* A-B A+B */ 1385 ant_conf->fast_div_bias = 0x1; 1386 ant_conf->main_gaintb = 0; 1387 ant_conf->alt_gaintb = 0; 1388 break; 1389 case 0x10: /* LNA2 A-B */ 1390 if (!(antcomb->scan) && 1391 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1392 ant_conf->fast_div_bias = 0x3f; 1393 else 1394 ant_conf->fast_div_bias = 0x1; 1395 ant_conf->main_gaintb = 0; 1396 ant_conf->alt_gaintb = 0; 1397 break; 1398 case 0x12: /* LNA2 LNA1 */ 1399 ant_conf->fast_div_bias = 0x1; 1400 ant_conf->main_gaintb = 0; 1401 ant_conf->alt_gaintb = 0; 1402 break; 1403 case 0x13: /* LNA2 A+B */ 1404 if (!(antcomb->scan) && 1405 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1406 ant_conf->fast_div_bias = 0x3f; 1407 else 1408 ant_conf->fast_div_bias = 0x1; 1409 ant_conf->main_gaintb = 0; 1410 ant_conf->alt_gaintb = 0; 1411 break; 1412 case 0x20: /* LNA1 A-B */ 1413 if (!(antcomb->scan) && 1414 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1415 ant_conf->fast_div_bias = 0x3f; 1416 else 1417 ant_conf->fast_div_bias = 0x1; 1418 ant_conf->main_gaintb = 0; 1419 ant_conf->alt_gaintb = 0; 1420 break; 1421 case 0x21: /* LNA1 LNA2 */ 1422 ant_conf->fast_div_bias = 0x1; 1423 ant_conf->main_gaintb = 0; 1424 ant_conf->alt_gaintb = 0; 1425 break; 1426 case 0x23: /* LNA1 A+B */ 1427 if (!(antcomb->scan) && 1428 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1429 ant_conf->fast_div_bias = 0x3f; 1430 else 1431 ant_conf->fast_div_bias = 0x1; 1432 ant_conf->main_gaintb = 0; 1433 ant_conf->alt_gaintb = 0; 1434 break; 1435 case 0x30: /* A+B A-B */ 1436 ant_conf->fast_div_bias = 0x1; 1437 ant_conf->main_gaintb = 0; 1438 ant_conf->alt_gaintb = 0; 1439 break; 1440 case 0x31: /* A+B LNA2 */ 1441 ant_conf->fast_div_bias = 0x1; 1442 ant_conf->main_gaintb = 0; 1443 ant_conf->alt_gaintb = 0; 1444 break; 1445 case 0x32: /* A+B LNA1 */ 1446 ant_conf->fast_div_bias = 0x1; 1447 ant_conf->main_gaintb = 0; 1448 ant_conf->alt_gaintb = 0; 1449 break; 1450 default: 1451 break; 1452 } 1453 } else if (ant_conf->div_group == 2) { 1454 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1455 switch ((ant_conf->main_lna_conf << 4) | 1456 ant_conf->alt_lna_conf) { 1457 case 0x01: /* A-B LNA2 */ 1458 ant_conf->fast_div_bias = 0x1; 1459 ant_conf->main_gaintb = 0; 1460 ant_conf->alt_gaintb = 0; 1461 break; 1462 case 0x02: /* A-B LNA1 */ 1463 ant_conf->fast_div_bias = 0x1; 1464 ant_conf->main_gaintb = 0; 1465 ant_conf->alt_gaintb = 0; 1466 break; 1467 case 0x03: /* A-B A+B */ 1468 ant_conf->fast_div_bias = 0x1; 1469 ant_conf->main_gaintb = 0; 1470 ant_conf->alt_gaintb = 0; 1471 break; 1472 case 0x10: /* LNA2 A-B */ 1473 if (!(antcomb->scan) && 1474 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1475 ant_conf->fast_div_bias = 0x1; 1476 else 1477 ant_conf->fast_div_bias = 0x2; 1478 ant_conf->main_gaintb = 0; 1479 ant_conf->alt_gaintb = 0; 1480 break; 1481 case 0x12: /* LNA2 LNA1 */ 1482 ant_conf->fast_div_bias = 0x1; 1483 ant_conf->main_gaintb = 0; 1484 ant_conf->alt_gaintb = 0; 1485 break; 1486 case 0x13: /* LNA2 A+B */ 1487 if (!(antcomb->scan) && 1488 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1489 ant_conf->fast_div_bias = 0x1; 1490 else 1491 ant_conf->fast_div_bias = 0x2; 1492 ant_conf->main_gaintb = 0; 1493 ant_conf->alt_gaintb = 0; 1494 break; 1495 case 0x20: /* LNA1 A-B */ 1496 if (!(antcomb->scan) && 1497 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1498 ant_conf->fast_div_bias = 0x1; 1499 else 1500 ant_conf->fast_div_bias = 0x2; 1501 ant_conf->main_gaintb = 0; 1502 ant_conf->alt_gaintb = 0; 1503 break; 1504 case 0x21: /* LNA1 LNA2 */ 1505 ant_conf->fast_div_bias = 0x1; 1506 ant_conf->main_gaintb = 0; 1507 ant_conf->alt_gaintb = 0; 1508 break; 1509 case 0x23: /* LNA1 A+B */ 1510 if (!(antcomb->scan) && 1511 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1512 ant_conf->fast_div_bias = 0x1; 1513 else 1514 ant_conf->fast_div_bias = 0x2; 1515 ant_conf->main_gaintb = 0; 1516 ant_conf->alt_gaintb = 0; 1517 break; 1518 case 0x30: /* A+B A-B */ 1519 ant_conf->fast_div_bias = 0x1; 1520 ant_conf->main_gaintb = 0; 1521 ant_conf->alt_gaintb = 0; 1522 break; 1523 case 0x31: /* A+B LNA2 */ 1524 ant_conf->fast_div_bias = 0x1; 1525 ant_conf->main_gaintb = 0; 1526 ant_conf->alt_gaintb = 0; 1527 break; 1528 case 0x32: /* A+B LNA1 */ 1529 ant_conf->fast_div_bias = 0x1; 1530 ant_conf->main_gaintb = 0; 1531 ant_conf->alt_gaintb = 0; 1532 break; 1533 default: 1534 break; 1535 } 1536 } 1537 } 1538 1539 /* Antenna diversity and combining */ 1540 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 1541 { 1542 struct ath_hw_antcomb_conf div_ant_conf; 1543 struct ath_ant_comb *antcomb = &sc->ant_comb; 1544 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1545 int curr_main_set; 1546 int main_rssi = rs->rs_rssi_ctl0; 1547 int alt_rssi = rs->rs_rssi_ctl1; 1548 int rx_ant_conf, main_ant_conf; 1549 bool short_scan = false; 1550 1551 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 1552 ATH_ANT_RX_MASK; 1553 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1554 ATH_ANT_RX_MASK; 1555 1556 /* Record packet only when both main_rssi and alt_rssi is positive */ 1557 if (main_rssi > 0 && alt_rssi > 0) { 1558 antcomb->total_pkt_count++; 1559 antcomb->main_total_rssi += main_rssi; 1560 antcomb->alt_total_rssi += alt_rssi; 1561 if (main_ant_conf == rx_ant_conf) 1562 antcomb->main_recv_cnt++; 1563 else 1564 antcomb->alt_recv_cnt++; 1565 } 1566 1567 /* Short scan check */ 1568 if (antcomb->scan && antcomb->alt_good) { 1569 if (time_after(jiffies, antcomb->scan_start_time + 1570 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 1571 short_scan = true; 1572 else 1573 if (antcomb->total_pkt_count == 1574 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { 1575 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1576 antcomb->total_pkt_count); 1577 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 1578 short_scan = true; 1579 } 1580 } 1581 1582 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 1583 rs->rs_moreaggr) && !short_scan) 1584 return; 1585 1586 if (antcomb->total_pkt_count) { 1587 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1588 antcomb->total_pkt_count); 1589 main_rssi_avg = (antcomb->main_total_rssi / 1590 antcomb->total_pkt_count); 1591 alt_rssi_avg = (antcomb->alt_total_rssi / 1592 antcomb->total_pkt_count); 1593 } 1594 1595 1596 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1597 curr_alt_set = div_ant_conf.alt_lna_conf; 1598 curr_main_set = div_ant_conf.main_lna_conf; 1599 1600 antcomb->count++; 1601 1602 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 1603 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1604 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 1605 main_rssi_avg); 1606 antcomb->alt_good = true; 1607 } else { 1608 antcomb->alt_good = false; 1609 } 1610 1611 antcomb->count = 0; 1612 antcomb->scan = true; 1613 antcomb->scan_not_start = true; 1614 } 1615 1616 if (!antcomb->scan) { 1617 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, 1618 alt_ratio, curr_main_set, curr_alt_set, 1619 alt_rssi_avg, main_rssi_avg)) { 1620 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1621 /* Switch main and alt LNA */ 1622 div_ant_conf.main_lna_conf = 1623 ATH_ANT_DIV_COMB_LNA2; 1624 div_ant_conf.alt_lna_conf = 1625 ATH_ANT_DIV_COMB_LNA1; 1626 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { 1627 div_ant_conf.main_lna_conf = 1628 ATH_ANT_DIV_COMB_LNA1; 1629 div_ant_conf.alt_lna_conf = 1630 ATH_ANT_DIV_COMB_LNA2; 1631 } 1632 1633 goto div_comb_done; 1634 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && 1635 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { 1636 /* Set alt to another LNA */ 1637 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) 1638 div_ant_conf.alt_lna_conf = 1639 ATH_ANT_DIV_COMB_LNA1; 1640 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) 1641 div_ant_conf.alt_lna_conf = 1642 ATH_ANT_DIV_COMB_LNA2; 1643 1644 goto div_comb_done; 1645 } 1646 1647 if ((alt_rssi_avg < (main_rssi_avg + 1648 div_ant_conf.lna1_lna2_delta))) 1649 goto div_comb_done; 1650 } 1651 1652 if (!antcomb->scan_not_start) { 1653 switch (curr_alt_set) { 1654 case ATH_ANT_DIV_COMB_LNA2: 1655 antcomb->rssi_lna2 = alt_rssi_avg; 1656 antcomb->rssi_lna1 = main_rssi_avg; 1657 antcomb->scan = true; 1658 /* set to A+B */ 1659 div_ant_conf.main_lna_conf = 1660 ATH_ANT_DIV_COMB_LNA1; 1661 div_ant_conf.alt_lna_conf = 1662 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1663 break; 1664 case ATH_ANT_DIV_COMB_LNA1: 1665 antcomb->rssi_lna1 = alt_rssi_avg; 1666 antcomb->rssi_lna2 = main_rssi_avg; 1667 antcomb->scan = true; 1668 /* set to A+B */ 1669 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1670 div_ant_conf.alt_lna_conf = 1671 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1672 break; 1673 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: 1674 antcomb->rssi_add = alt_rssi_avg; 1675 antcomb->scan = true; 1676 /* set to A-B */ 1677 div_ant_conf.alt_lna_conf = 1678 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1679 break; 1680 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: 1681 antcomb->rssi_sub = alt_rssi_avg; 1682 antcomb->scan = false; 1683 if (antcomb->rssi_lna2 > 1684 (antcomb->rssi_lna1 + 1685 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 1686 /* use LNA2 as main LNA */ 1687 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 1688 (antcomb->rssi_add > antcomb->rssi_sub)) { 1689 /* set to A+B */ 1690 div_ant_conf.main_lna_conf = 1691 ATH_ANT_DIV_COMB_LNA2; 1692 div_ant_conf.alt_lna_conf = 1693 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1694 } else if (antcomb->rssi_sub > 1695 antcomb->rssi_lna1) { 1696 /* set to A-B */ 1697 div_ant_conf.main_lna_conf = 1698 ATH_ANT_DIV_COMB_LNA2; 1699 div_ant_conf.alt_lna_conf = 1700 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1701 } else { 1702 /* set to LNA1 */ 1703 div_ant_conf.main_lna_conf = 1704 ATH_ANT_DIV_COMB_LNA2; 1705 div_ant_conf.alt_lna_conf = 1706 ATH_ANT_DIV_COMB_LNA1; 1707 } 1708 } else { 1709 /* use LNA1 as main LNA */ 1710 if ((antcomb->rssi_add > antcomb->rssi_lna2) && 1711 (antcomb->rssi_add > antcomb->rssi_sub)) { 1712 /* set to A+B */ 1713 div_ant_conf.main_lna_conf = 1714 ATH_ANT_DIV_COMB_LNA1; 1715 div_ant_conf.alt_lna_conf = 1716 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1717 } else if (antcomb->rssi_sub > 1718 antcomb->rssi_lna1) { 1719 /* set to A-B */ 1720 div_ant_conf.main_lna_conf = 1721 ATH_ANT_DIV_COMB_LNA1; 1722 div_ant_conf.alt_lna_conf = 1723 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1724 } else { 1725 /* set to LNA2 */ 1726 div_ant_conf.main_lna_conf = 1727 ATH_ANT_DIV_COMB_LNA1; 1728 div_ant_conf.alt_lna_conf = 1729 ATH_ANT_DIV_COMB_LNA2; 1730 } 1731 } 1732 break; 1733 default: 1734 break; 1735 } 1736 } else { 1737 if (!antcomb->alt_good) { 1738 antcomb->scan_not_start = false; 1739 /* Set alt to another LNA */ 1740 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 1741 div_ant_conf.main_lna_conf = 1742 ATH_ANT_DIV_COMB_LNA2; 1743 div_ant_conf.alt_lna_conf = 1744 ATH_ANT_DIV_COMB_LNA1; 1745 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 1746 div_ant_conf.main_lna_conf = 1747 ATH_ANT_DIV_COMB_LNA1; 1748 div_ant_conf.alt_lna_conf = 1749 ATH_ANT_DIV_COMB_LNA2; 1750 } 1751 goto div_comb_done; 1752 } 1753 } 1754 1755 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, 1756 main_rssi_avg, alt_rssi_avg, 1757 alt_ratio); 1758 1759 antcomb->quick_scan_cnt++; 1760 1761 div_comb_done: 1762 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); 1763 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1764 1765 antcomb->scan_start_time = jiffies; 1766 antcomb->total_pkt_count = 0; 1767 antcomb->main_total_rssi = 0; 1768 antcomb->alt_total_rssi = 0; 1769 antcomb->main_recv_cnt = 0; 1770 antcomb->alt_recv_cnt = 0; 1771 } 1772 1773 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1774 { 1775 struct ath_buf *bf; 1776 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1777 struct ieee80211_rx_status *rxs; 1778 struct ath_hw *ah = sc->sc_ah; 1779 struct ath_common *common = ath9k_hw_common(ah); 1780 struct ieee80211_hw *hw = sc->hw; 1781 struct ieee80211_hdr *hdr; 1782 int retval; 1783 bool decrypt_error = false; 1784 struct ath_rx_status rs; 1785 enum ath9k_rx_qtype qtype; 1786 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1787 int dma_type; 1788 u8 rx_status_len = ah->caps.rx_status_len; 1789 u64 tsf = 0; 1790 u32 tsf_lower = 0; 1791 unsigned long flags; 1792 1793 if (edma) 1794 dma_type = DMA_BIDIRECTIONAL; 1795 else 1796 dma_type = DMA_FROM_DEVICE; 1797 1798 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1799 spin_lock_bh(&sc->rx.rxbuflock); 1800 1801 tsf = ath9k_hw_gettsf64(ah); 1802 tsf_lower = tsf & 0xffffffff; 1803 1804 do { 1805 /* If handling rx interrupt and flush is in progress => exit */ 1806 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1807 break; 1808 1809 memset(&rs, 0, sizeof(rs)); 1810 if (edma) 1811 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1812 else 1813 bf = ath_get_next_rx_buf(sc, &rs); 1814 1815 if (!bf) 1816 break; 1817 1818 skb = bf->bf_mpdu; 1819 if (!skb) 1820 continue; 1821 1822 /* 1823 * Take frame header from the first fragment and RX status from 1824 * the last one. 1825 */ 1826 if (sc->rx.frag) 1827 hdr_skb = sc->rx.frag; 1828 else 1829 hdr_skb = skb; 1830 1831 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1832 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1833 if (ieee80211_is_beacon(hdr->frame_control)) { 1834 RX_STAT_INC(rx_beacons); 1835 if (!is_zero_ether_addr(common->curbssid) && 1836 !compare_ether_addr(hdr->addr3, common->curbssid)) 1837 rs.is_mybeacon = true; 1838 else 1839 rs.is_mybeacon = false; 1840 } 1841 else 1842 rs.is_mybeacon = false; 1843 1844 ath_debug_stat_rx(sc, &rs); 1845 1846 /* 1847 * If we're asked to flush receive queue, directly 1848 * chain it back at the queue without processing it. 1849 */ 1850 if (sc->sc_flags & SC_OP_RXFLUSH) { 1851 RX_STAT_INC(rx_drop_rxflush); 1852 goto requeue_drop_frag; 1853 } 1854 1855 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1856 1857 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1858 if (rs.rs_tstamp > tsf_lower && 1859 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1860 rxs->mactime -= 0x100000000ULL; 1861 1862 if (rs.rs_tstamp < tsf_lower && 1863 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1864 rxs->mactime += 0x100000000ULL; 1865 1866 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1867 rxs, &decrypt_error); 1868 if (retval) 1869 goto requeue_drop_frag; 1870 1871 if (rs.is_mybeacon) { 1872 sc->hw_busy_count = 0; 1873 ath_start_rx_poll(sc, 3); 1874 } 1875 /* Ensure we always have an skb to requeue once we are done 1876 * processing the current buffer's skb */ 1877 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1878 1879 /* If there is no memory we ignore the current RX'd frame, 1880 * tell hardware it can give us a new frame using the old 1881 * skb and put it at the tail of the sc->rx.rxbuf list for 1882 * processing. */ 1883 if (!requeue_skb) { 1884 RX_STAT_INC(rx_oom_err); 1885 goto requeue_drop_frag; 1886 } 1887 1888 /* Unmap the frame */ 1889 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1890 common->rx_bufsize, 1891 dma_type); 1892 1893 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1894 if (ah->caps.rx_status_len) 1895 skb_pull(skb, ah->caps.rx_status_len); 1896 1897 if (!rs.rs_more) 1898 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1899 rxs, decrypt_error); 1900 1901 /* We will now give hardware our shiny new allocated skb */ 1902 bf->bf_mpdu = requeue_skb; 1903 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1904 common->rx_bufsize, 1905 dma_type); 1906 if (unlikely(dma_mapping_error(sc->dev, 1907 bf->bf_buf_addr))) { 1908 dev_kfree_skb_any(requeue_skb); 1909 bf->bf_mpdu = NULL; 1910 bf->bf_buf_addr = 0; 1911 ath_err(common, "dma_mapping_error() on RX\n"); 1912 ieee80211_rx(hw, skb); 1913 break; 1914 } 1915 1916 if (rs.rs_more) { 1917 RX_STAT_INC(rx_frags); 1918 /* 1919 * rs_more indicates chained descriptors which can be 1920 * used to link buffers together for a sort of 1921 * scatter-gather operation. 1922 */ 1923 if (sc->rx.frag) { 1924 /* too many fragments - cannot handle frame */ 1925 dev_kfree_skb_any(sc->rx.frag); 1926 dev_kfree_skb_any(skb); 1927 RX_STAT_INC(rx_too_many_frags_err); 1928 skb = NULL; 1929 } 1930 sc->rx.frag = skb; 1931 goto requeue; 1932 } 1933 1934 if (sc->rx.frag) { 1935 int space = skb->len - skb_tailroom(hdr_skb); 1936 1937 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1938 dev_kfree_skb(skb); 1939 RX_STAT_INC(rx_oom_err); 1940 goto requeue_drop_frag; 1941 } 1942 1943 sc->rx.frag = NULL; 1944 1945 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1946 skb->len); 1947 dev_kfree_skb_any(skb); 1948 skb = hdr_skb; 1949 } 1950 1951 1952 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 1953 1954 /* 1955 * change the default rx antenna if rx diversity 1956 * chooses the other antenna 3 times in a row. 1957 */ 1958 if (sc->rx.defant != rs.rs_antenna) { 1959 if (++sc->rx.rxotherant >= 3) 1960 ath_setdefantenna(sc, rs.rs_antenna); 1961 } else { 1962 sc->rx.rxotherant = 0; 1963 } 1964 1965 } 1966 1967 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1968 skb_trim(skb, skb->len - 8); 1969 1970 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1971 1972 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1973 PS_WAIT_FOR_CAB | 1974 PS_WAIT_FOR_PSPOLL_DATA)) || 1975 ath9k_check_auto_sleep(sc)) 1976 ath_rx_ps(sc, skb, rs.is_mybeacon); 1977 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1978 1979 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1980 ath_ant_comb_scan(sc, &rs); 1981 1982 ieee80211_rx(hw, skb); 1983 1984 requeue_drop_frag: 1985 if (sc->rx.frag) { 1986 dev_kfree_skb_any(sc->rx.frag); 1987 sc->rx.frag = NULL; 1988 } 1989 requeue: 1990 if (edma) { 1991 list_add_tail(&bf->list, &sc->rx.rxbuf); 1992 ath_rx_edma_buf_link(sc, qtype); 1993 } else { 1994 list_move_tail(&bf->list, &sc->rx.rxbuf); 1995 ath_rx_buf_link(sc, bf); 1996 if (!flush) 1997 ath9k_hw_rxena(ah); 1998 } 1999 } while (1); 2000 2001 spin_unlock_bh(&sc->rx.rxbuflock); 2002 2003 if (!(ah->imask & ATH9K_INT_RXEOL)) { 2004 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 2005 ath9k_hw_set_interrupts(ah); 2006 } 2007 2008 return 0; 2009 } 2010