1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22 23 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 24 int mindelta, int main_rssi_avg, 25 int alt_rssi_avg, int pkt_count) 26 { 27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 28 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 30 } 31 32 static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, 33 int curr_main_set, int curr_alt_set, 34 int alt_rssi_avg, int main_rssi_avg) 35 { 36 bool result = false; 37 switch (div_group) { 38 case 0: 39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 40 result = true; 41 break; 42 case 1: 43 case 2: 44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && 45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && 46 (alt_rssi_avg >= (main_rssi_avg - 5))) || 47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && 48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && 49 (alt_rssi_avg >= (main_rssi_avg - 2)))) && 50 (alt_rssi_avg >= 4)) 51 result = true; 52 else 53 result = false; 54 break; 55 } 56 57 return result; 58 } 59 60 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 61 { 62 return sc->ps_enabled && 63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 64 } 65 66 /* 67 * Setup and link descriptors. 68 * 69 * 11N: we can no longer afford to self link the last descriptor. 70 * MAC acknowledges BA status as long as it copies frames to host 71 * buffer (or rx fifo). This can incorrectly acknowledge packets 72 * to a sender if last desc is self-linked. 73 */ 74 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 75 { 76 struct ath_hw *ah = sc->sc_ah; 77 struct ath_common *common = ath9k_hw_common(ah); 78 struct ath_desc *ds; 79 struct sk_buff *skb; 80 81 ATH_RXBUF_RESET(bf); 82 83 ds = bf->bf_desc; 84 ds->ds_link = 0; /* link to null */ 85 ds->ds_data = bf->bf_buf_addr; 86 87 /* virtual addr of the beginning of the buffer. */ 88 skb = bf->bf_mpdu; 89 BUG_ON(skb == NULL); 90 ds->ds_vdata = skb->data; 91 92 /* 93 * setup rx descriptors. The rx_bufsize here tells the hardware 94 * how much data it can DMA to us and that we are prepared 95 * to process 96 */ 97 ath9k_hw_setuprxdesc(ah, ds, 98 common->rx_bufsize, 99 0); 100 101 if (sc->rx.rxlink == NULL) 102 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 103 else 104 *sc->rx.rxlink = bf->bf_daddr; 105 106 sc->rx.rxlink = &ds->ds_link; 107 } 108 109 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 110 { 111 /* XXX block beacon interrupts */ 112 ath9k_hw_setantenna(sc->sc_ah, antenna); 113 sc->rx.defant = antenna; 114 sc->rx.rxotherant = 0; 115 } 116 117 static void ath_opmode_init(struct ath_softc *sc) 118 { 119 struct ath_hw *ah = sc->sc_ah; 120 struct ath_common *common = ath9k_hw_common(ah); 121 122 u32 rfilt, mfilt[2]; 123 124 /* configure rx filter */ 125 rfilt = ath_calcrxfilter(sc); 126 ath9k_hw_setrxfilter(ah, rfilt); 127 128 /* configure bssid mask */ 129 ath_hw_setbssidmask(common); 130 131 /* configure operational mode */ 132 ath9k_hw_setopmode(ah); 133 134 /* calculate and install multicast filter */ 135 mfilt[0] = mfilt[1] = ~0; 136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 137 } 138 139 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 140 enum ath9k_rx_qtype qtype) 141 { 142 struct ath_hw *ah = sc->sc_ah; 143 struct ath_rx_edma *rx_edma; 144 struct sk_buff *skb; 145 struct ath_buf *bf; 146 147 rx_edma = &sc->rx.rx_edma[qtype]; 148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 149 return false; 150 151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 152 list_del_init(&bf->list); 153 154 skb = bf->bf_mpdu; 155 156 ATH_RXBUF_RESET(bf); 157 memset(skb->data, 0, ah->caps.rx_status_len); 158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 159 ah->caps.rx_status_len, DMA_TO_DEVICE); 160 161 SKB_CB_ATHBUF(skb) = bf; 162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 163 skb_queue_tail(&rx_edma->rx_fifo, skb); 164 165 return true; 166 } 167 168 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 169 enum ath9k_rx_qtype qtype, int size) 170 { 171 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 172 u32 nbuf = 0; 173 174 if (list_empty(&sc->rx.rxbuf)) { 175 ath_dbg(common, QUEUE, "No free rx buf available\n"); 176 return; 177 } 178 179 while (!list_empty(&sc->rx.rxbuf)) { 180 nbuf++; 181 182 if (!ath_rx_edma_buf_link(sc, qtype)) 183 break; 184 185 if (nbuf >= size) 186 break; 187 } 188 } 189 190 static void ath_rx_remove_buffer(struct ath_softc *sc, 191 enum ath9k_rx_qtype qtype) 192 { 193 struct ath_buf *bf; 194 struct ath_rx_edma *rx_edma; 195 struct sk_buff *skb; 196 197 rx_edma = &sc->rx.rx_edma[qtype]; 198 199 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 200 bf = SKB_CB_ATHBUF(skb); 201 BUG_ON(!bf); 202 list_add_tail(&bf->list, &sc->rx.rxbuf); 203 } 204 } 205 206 static void ath_rx_edma_cleanup(struct ath_softc *sc) 207 { 208 struct ath_hw *ah = sc->sc_ah; 209 struct ath_common *common = ath9k_hw_common(ah); 210 struct ath_buf *bf; 211 212 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 213 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 214 215 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 216 if (bf->bf_mpdu) { 217 dma_unmap_single(sc->dev, bf->bf_buf_addr, 218 common->rx_bufsize, 219 DMA_BIDIRECTIONAL); 220 dev_kfree_skb_any(bf->bf_mpdu); 221 bf->bf_buf_addr = 0; 222 bf->bf_mpdu = NULL; 223 } 224 } 225 226 INIT_LIST_HEAD(&sc->rx.rxbuf); 227 228 kfree(sc->rx.rx_bufptr); 229 sc->rx.rx_bufptr = NULL; 230 } 231 232 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 233 { 234 skb_queue_head_init(&rx_edma->rx_fifo); 235 skb_queue_head_init(&rx_edma->rx_buffers); 236 rx_edma->rx_fifo_hwsize = size; 237 } 238 239 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 240 { 241 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 242 struct ath_hw *ah = sc->sc_ah; 243 struct sk_buff *skb; 244 struct ath_buf *bf; 245 int error = 0, i; 246 u32 size; 247 248 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 249 ah->caps.rx_status_len); 250 251 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 252 ah->caps.rx_lp_qdepth); 253 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 254 ah->caps.rx_hp_qdepth); 255 256 size = sizeof(struct ath_buf) * nbufs; 257 bf = kzalloc(size, GFP_KERNEL); 258 if (!bf) 259 return -ENOMEM; 260 261 INIT_LIST_HEAD(&sc->rx.rxbuf); 262 sc->rx.rx_bufptr = bf; 263 264 for (i = 0; i < nbufs; i++, bf++) { 265 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 266 if (!skb) { 267 error = -ENOMEM; 268 goto rx_init_fail; 269 } 270 271 memset(skb->data, 0, common->rx_bufsize); 272 bf->bf_mpdu = skb; 273 274 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 275 common->rx_bufsize, 276 DMA_BIDIRECTIONAL); 277 if (unlikely(dma_mapping_error(sc->dev, 278 bf->bf_buf_addr))) { 279 dev_kfree_skb_any(skb); 280 bf->bf_mpdu = NULL; 281 bf->bf_buf_addr = 0; 282 ath_err(common, 283 "dma_mapping_error() on RX init\n"); 284 error = -ENOMEM; 285 goto rx_init_fail; 286 } 287 288 list_add_tail(&bf->list, &sc->rx.rxbuf); 289 } 290 291 return 0; 292 293 rx_init_fail: 294 ath_rx_edma_cleanup(sc); 295 return error; 296 } 297 298 static void ath_edma_start_recv(struct ath_softc *sc) 299 { 300 spin_lock_bh(&sc->rx.rxbuflock); 301 302 ath9k_hw_rxena(sc->sc_ah); 303 304 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 305 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 306 307 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 308 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 309 310 ath_opmode_init(sc); 311 312 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 313 314 spin_unlock_bh(&sc->rx.rxbuflock); 315 } 316 317 static void ath_edma_stop_recv(struct ath_softc *sc) 318 { 319 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 320 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 321 } 322 323 int ath_rx_init(struct ath_softc *sc, int nbufs) 324 { 325 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 326 struct sk_buff *skb; 327 struct ath_buf *bf; 328 int error = 0; 329 330 spin_lock_init(&sc->sc_pcu_lock); 331 sc->sc_flags &= ~SC_OP_RXFLUSH; 332 spin_lock_init(&sc->rx.rxbuflock); 333 334 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 335 sc->sc_ah->caps.rx_status_len; 336 337 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 338 return ath_rx_edma_init(sc, nbufs); 339 } else { 340 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 341 common->cachelsz, common->rx_bufsize); 342 343 /* Initialize rx descriptors */ 344 345 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 346 "rx", nbufs, 1, 0); 347 if (error != 0) { 348 ath_err(common, 349 "failed to allocate rx descriptors: %d\n", 350 error); 351 goto err; 352 } 353 354 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 355 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 356 GFP_KERNEL); 357 if (skb == NULL) { 358 error = -ENOMEM; 359 goto err; 360 } 361 362 bf->bf_mpdu = skb; 363 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 364 common->rx_bufsize, 365 DMA_FROM_DEVICE); 366 if (unlikely(dma_mapping_error(sc->dev, 367 bf->bf_buf_addr))) { 368 dev_kfree_skb_any(skb); 369 bf->bf_mpdu = NULL; 370 bf->bf_buf_addr = 0; 371 ath_err(common, 372 "dma_mapping_error() on RX init\n"); 373 error = -ENOMEM; 374 goto err; 375 } 376 } 377 sc->rx.rxlink = NULL; 378 } 379 380 err: 381 if (error) 382 ath_rx_cleanup(sc); 383 384 return error; 385 } 386 387 void ath_rx_cleanup(struct ath_softc *sc) 388 { 389 struct ath_hw *ah = sc->sc_ah; 390 struct ath_common *common = ath9k_hw_common(ah); 391 struct sk_buff *skb; 392 struct ath_buf *bf; 393 394 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 395 ath_rx_edma_cleanup(sc); 396 return; 397 } else { 398 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 399 skb = bf->bf_mpdu; 400 if (skb) { 401 dma_unmap_single(sc->dev, bf->bf_buf_addr, 402 common->rx_bufsize, 403 DMA_FROM_DEVICE); 404 dev_kfree_skb(skb); 405 bf->bf_buf_addr = 0; 406 bf->bf_mpdu = NULL; 407 } 408 } 409 410 if (sc->rx.rxdma.dd_desc_len != 0) 411 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 412 } 413 } 414 415 /* 416 * Calculate the receive filter according to the 417 * operating mode and state: 418 * 419 * o always accept unicast, broadcast, and multicast traffic 420 * o maintain current state of phy error reception (the hal 421 * may enable phy error frames for noise immunity work) 422 * o probe request frames are accepted only when operating in 423 * hostap, adhoc, or monitor modes 424 * o enable promiscuous mode according to the interface state 425 * o accept beacons: 426 * - when operating in adhoc mode so the 802.11 layer creates 427 * node table entries for peers, 428 * - when operating in station mode for collecting rssi data when 429 * the station is otherwise quiet, or 430 * - when operating as a repeater so we see repeater-sta beacons 431 * - when scanning 432 */ 433 434 u32 ath_calcrxfilter(struct ath_softc *sc) 435 { 436 u32 rfilt; 437 438 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 439 | ATH9K_RX_FILTER_MCAST; 440 441 if (sc->rx.rxfilter & FIF_PROBE_REQ) 442 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 443 444 /* 445 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 446 * mode interface or when in monitor mode. AP mode does not need this 447 * since it receives all in-BSS frames anyway. 448 */ 449 if (sc->sc_ah->is_monitoring) 450 rfilt |= ATH9K_RX_FILTER_PROM; 451 452 if (sc->rx.rxfilter & FIF_CONTROL) 453 rfilt |= ATH9K_RX_FILTER_CONTROL; 454 455 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 456 (sc->nvifs <= 1) && 457 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 458 rfilt |= ATH9K_RX_FILTER_MYBEACON; 459 else 460 rfilt |= ATH9K_RX_FILTER_BEACON; 461 462 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 463 (sc->rx.rxfilter & FIF_PSPOLL)) 464 rfilt |= ATH9K_RX_FILTER_PSPOLL; 465 466 if (conf_is_ht(&sc->hw->conf)) 467 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 468 469 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 470 /* The following may also be needed for other older chips */ 471 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 472 rfilt |= ATH9K_RX_FILTER_PROM; 473 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 474 } 475 476 return rfilt; 477 478 } 479 480 int ath_startrecv(struct ath_softc *sc) 481 { 482 struct ath_hw *ah = sc->sc_ah; 483 struct ath_buf *bf, *tbf; 484 485 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 486 ath_edma_start_recv(sc); 487 return 0; 488 } 489 490 spin_lock_bh(&sc->rx.rxbuflock); 491 if (list_empty(&sc->rx.rxbuf)) 492 goto start_recv; 493 494 sc->rx.rxlink = NULL; 495 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 496 ath_rx_buf_link(sc, bf); 497 } 498 499 /* We could have deleted elements so the list may be empty now */ 500 if (list_empty(&sc->rx.rxbuf)) 501 goto start_recv; 502 503 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 504 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 505 ath9k_hw_rxena(ah); 506 507 start_recv: 508 ath_opmode_init(sc); 509 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 510 511 spin_unlock_bh(&sc->rx.rxbuflock); 512 513 return 0; 514 } 515 516 bool ath_stoprecv(struct ath_softc *sc) 517 { 518 struct ath_hw *ah = sc->sc_ah; 519 bool stopped, reset = false; 520 521 spin_lock_bh(&sc->rx.rxbuflock); 522 ath9k_hw_abortpcurecv(ah); 523 ath9k_hw_setrxfilter(ah, 0); 524 stopped = ath9k_hw_stopdmarecv(ah, &reset); 525 526 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 527 ath_edma_stop_recv(sc); 528 else 529 sc->rx.rxlink = NULL; 530 spin_unlock_bh(&sc->rx.rxbuflock); 531 532 if (!(ah->ah_flags & AH_UNPLUGGED) && 533 unlikely(!stopped)) { 534 ath_err(ath9k_hw_common(sc->sc_ah), 535 "Could not stop RX, we could be " 536 "confusing the DMA engine when we start RX up\n"); 537 ATH_DBG_WARN_ON_ONCE(!stopped); 538 } 539 return stopped && !reset; 540 } 541 542 void ath_flushrecv(struct ath_softc *sc) 543 { 544 sc->sc_flags |= SC_OP_RXFLUSH; 545 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 546 ath_rx_tasklet(sc, 1, true); 547 ath_rx_tasklet(sc, 1, false); 548 sc->sc_flags &= ~SC_OP_RXFLUSH; 549 } 550 551 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 552 { 553 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 554 struct ieee80211_mgmt *mgmt; 555 u8 *pos, *end, id, elen; 556 struct ieee80211_tim_ie *tim; 557 558 mgmt = (struct ieee80211_mgmt *)skb->data; 559 pos = mgmt->u.beacon.variable; 560 end = skb->data + skb->len; 561 562 while (pos + 2 < end) { 563 id = *pos++; 564 elen = *pos++; 565 if (pos + elen > end) 566 break; 567 568 if (id == WLAN_EID_TIM) { 569 if (elen < sizeof(*tim)) 570 break; 571 tim = (struct ieee80211_tim_ie *) pos; 572 if (tim->dtim_count != 0) 573 break; 574 return tim->bitmap_ctrl & 0x01; 575 } 576 577 pos += elen; 578 } 579 580 return false; 581 } 582 583 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 584 { 585 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 586 587 if (skb->len < 24 + 8 + 2 + 2) 588 return; 589 590 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 591 592 if (sc->ps_flags & PS_BEACON_SYNC) { 593 sc->ps_flags &= ~PS_BEACON_SYNC; 594 ath_dbg(common, PS, 595 "Reconfigure Beacon timers based on timestamp from the AP\n"); 596 ath_set_beacon(sc); 597 } 598 599 if (ath_beacon_dtim_pending_cab(skb)) { 600 /* 601 * Remain awake waiting for buffered broadcast/multicast 602 * frames. If the last broadcast/multicast frame is not 603 * received properly, the next beacon frame will work as 604 * a backup trigger for returning into NETWORK SLEEP state, 605 * so we are waiting for it as well. 606 */ 607 ath_dbg(common, PS, 608 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 609 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 610 return; 611 } 612 613 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 614 /* 615 * This can happen if a broadcast frame is dropped or the AP 616 * fails to send a frame indicating that all CAB frames have 617 * been delivered. 618 */ 619 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 620 ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 621 } 622 } 623 624 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 625 { 626 struct ieee80211_hdr *hdr; 627 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 628 629 hdr = (struct ieee80211_hdr *)skb->data; 630 631 /* Process Beacon and CAB receive in PS state */ 632 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 633 && mybeacon) 634 ath_rx_ps_beacon(sc, skb); 635 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 636 (ieee80211_is_data(hdr->frame_control) || 637 ieee80211_is_action(hdr->frame_control)) && 638 is_multicast_ether_addr(hdr->addr1) && 639 !ieee80211_has_moredata(hdr->frame_control)) { 640 /* 641 * No more broadcast/multicast frames to be received at this 642 * point. 643 */ 644 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 645 ath_dbg(common, PS, 646 "All PS CAB frames received, back to sleep\n"); 647 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 648 !is_multicast_ether_addr(hdr->addr1) && 649 !ieee80211_has_morefrags(hdr->frame_control)) { 650 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 651 ath_dbg(common, PS, 652 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 653 sc->ps_flags & (PS_WAIT_FOR_BEACON | 654 PS_WAIT_FOR_CAB | 655 PS_WAIT_FOR_PSPOLL_DATA | 656 PS_WAIT_FOR_TX_ACK)); 657 } 658 } 659 660 static bool ath_edma_get_buffers(struct ath_softc *sc, 661 enum ath9k_rx_qtype qtype) 662 { 663 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 664 struct ath_hw *ah = sc->sc_ah; 665 struct ath_common *common = ath9k_hw_common(ah); 666 struct sk_buff *skb; 667 struct ath_buf *bf; 668 int ret; 669 670 skb = skb_peek(&rx_edma->rx_fifo); 671 if (!skb) 672 return false; 673 674 bf = SKB_CB_ATHBUF(skb); 675 BUG_ON(!bf); 676 677 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 678 common->rx_bufsize, DMA_FROM_DEVICE); 679 680 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 681 if (ret == -EINPROGRESS) { 682 /*let device gain the buffer again*/ 683 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 684 common->rx_bufsize, DMA_FROM_DEVICE); 685 return false; 686 } 687 688 __skb_unlink(skb, &rx_edma->rx_fifo); 689 if (ret == -EINVAL) { 690 /* corrupt descriptor, skip this one and the following one */ 691 list_add_tail(&bf->list, &sc->rx.rxbuf); 692 ath_rx_edma_buf_link(sc, qtype); 693 skb = skb_peek(&rx_edma->rx_fifo); 694 if (!skb) 695 return true; 696 697 bf = SKB_CB_ATHBUF(skb); 698 BUG_ON(!bf); 699 700 __skb_unlink(skb, &rx_edma->rx_fifo); 701 list_add_tail(&bf->list, &sc->rx.rxbuf); 702 ath_rx_edma_buf_link(sc, qtype); 703 return true; 704 } 705 skb_queue_tail(&rx_edma->rx_buffers, skb); 706 707 return true; 708 } 709 710 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 711 struct ath_rx_status *rs, 712 enum ath9k_rx_qtype qtype) 713 { 714 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 715 struct sk_buff *skb; 716 struct ath_buf *bf; 717 718 while (ath_edma_get_buffers(sc, qtype)); 719 skb = __skb_dequeue(&rx_edma->rx_buffers); 720 if (!skb) 721 return NULL; 722 723 bf = SKB_CB_ATHBUF(skb); 724 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); 725 return bf; 726 } 727 728 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 729 struct ath_rx_status *rs) 730 { 731 struct ath_hw *ah = sc->sc_ah; 732 struct ath_common *common = ath9k_hw_common(ah); 733 struct ath_desc *ds; 734 struct ath_buf *bf; 735 int ret; 736 737 if (list_empty(&sc->rx.rxbuf)) { 738 sc->rx.rxlink = NULL; 739 return NULL; 740 } 741 742 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 743 ds = bf->bf_desc; 744 745 /* 746 * Must provide the virtual address of the current 747 * descriptor, the physical address, and the virtual 748 * address of the next descriptor in the h/w chain. 749 * This allows the HAL to look ahead to see if the 750 * hardware is done with a descriptor by checking the 751 * done bit in the following descriptor and the address 752 * of the current descriptor the DMA engine is working 753 * on. All this is necessary because of our use of 754 * a self-linked list to avoid rx overruns. 755 */ 756 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 757 if (ret == -EINPROGRESS) { 758 struct ath_rx_status trs; 759 struct ath_buf *tbf; 760 struct ath_desc *tds; 761 762 memset(&trs, 0, sizeof(trs)); 763 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 764 sc->rx.rxlink = NULL; 765 return NULL; 766 } 767 768 tbf = list_entry(bf->list.next, struct ath_buf, list); 769 770 /* 771 * On some hardware the descriptor status words could 772 * get corrupted, including the done bit. Because of 773 * this, check if the next descriptor's done bit is 774 * set or not. 775 * 776 * If the next descriptor's done bit is set, the current 777 * descriptor has been corrupted. Force s/w to discard 778 * this descriptor and continue... 779 */ 780 781 tds = tbf->bf_desc; 782 ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 783 if (ret == -EINPROGRESS) 784 return NULL; 785 } 786 787 if (!bf->bf_mpdu) 788 return bf; 789 790 /* 791 * Synchronize the DMA transfer with CPU before 792 * 1. accessing the frame 793 * 2. requeueing the same buffer to h/w 794 */ 795 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 796 common->rx_bufsize, 797 DMA_FROM_DEVICE); 798 799 return bf; 800 } 801 802 /* Assumes you've already done the endian to CPU conversion */ 803 static bool ath9k_rx_accept(struct ath_common *common, 804 struct ieee80211_hdr *hdr, 805 struct ieee80211_rx_status *rxs, 806 struct ath_rx_status *rx_stats, 807 bool *decrypt_error) 808 { 809 struct ath_softc *sc = (struct ath_softc *) common->priv; 810 bool is_mc, is_valid_tkip, strip_mic, mic_error; 811 struct ath_hw *ah = common->ah; 812 __le16 fc; 813 u8 rx_status_len = ah->caps.rx_status_len; 814 815 fc = hdr->frame_control; 816 817 is_mc = !!is_multicast_ether_addr(hdr->addr1); 818 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 819 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 820 strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 821 !(rx_stats->rs_status & 822 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 823 ATH9K_RXERR_KEYMISS)); 824 825 /* 826 * Key miss events are only relevant for pairwise keys where the 827 * descriptor does contain a valid key index. This has been observed 828 * mostly with CCMP encryption. 829 */ 830 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID) 831 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 832 833 if (!rx_stats->rs_datalen) 834 return false; 835 /* 836 * rs_status follows rs_datalen so if rs_datalen is too large 837 * we can take a hint that hardware corrupted it, so ignore 838 * those frames. 839 */ 840 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 841 return false; 842 843 /* Only use error bits from the last fragment */ 844 if (rx_stats->rs_more) 845 return true; 846 847 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 848 !ieee80211_has_morefrags(fc) && 849 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 850 (rx_stats->rs_status & ATH9K_RXERR_MIC); 851 852 /* 853 * The rx_stats->rs_status will not be set until the end of the 854 * chained descriptors so it can be ignored if rs_more is set. The 855 * rs_more will be false at the last element of the chained 856 * descriptors. 857 */ 858 if (rx_stats->rs_status != 0) { 859 u8 status_mask; 860 861 if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 862 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 863 mic_error = false; 864 } 865 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 866 return false; 867 868 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 869 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 870 *decrypt_error = true; 871 mic_error = false; 872 } 873 874 /* 875 * Reject error frames with the exception of 876 * decryption and MIC failures. For monitor mode, 877 * we also ignore the CRC error. 878 */ 879 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 880 ATH9K_RXERR_KEYMISS; 881 882 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 883 status_mask |= ATH9K_RXERR_CRC; 884 885 if (rx_stats->rs_status & ~status_mask) 886 return false; 887 } 888 889 /* 890 * For unicast frames the MIC error bit can have false positives, 891 * so all MIC error reports need to be validated in software. 892 * False negatives are not common, so skip software verification 893 * if the hardware considers the MIC valid. 894 */ 895 if (strip_mic) 896 rxs->flag |= RX_FLAG_MMIC_STRIPPED; 897 else if (is_mc && mic_error) 898 rxs->flag |= RX_FLAG_MMIC_ERROR; 899 900 return true; 901 } 902 903 static int ath9k_process_rate(struct ath_common *common, 904 struct ieee80211_hw *hw, 905 struct ath_rx_status *rx_stats, 906 struct ieee80211_rx_status *rxs) 907 { 908 struct ieee80211_supported_band *sband; 909 enum ieee80211_band band; 910 unsigned int i = 0; 911 912 band = hw->conf.channel->band; 913 sband = hw->wiphy->bands[band]; 914 915 if (rx_stats->rs_rate & 0x80) { 916 /* HT rate */ 917 rxs->flag |= RX_FLAG_HT; 918 if (rx_stats->rs_flags & ATH9K_RX_2040) 919 rxs->flag |= RX_FLAG_40MHZ; 920 if (rx_stats->rs_flags & ATH9K_RX_GI) 921 rxs->flag |= RX_FLAG_SHORT_GI; 922 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 923 return 0; 924 } 925 926 for (i = 0; i < sband->n_bitrates; i++) { 927 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 928 rxs->rate_idx = i; 929 return 0; 930 } 931 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 932 rxs->flag |= RX_FLAG_SHORTPRE; 933 rxs->rate_idx = i; 934 return 0; 935 } 936 } 937 938 /* 939 * No valid hardware bitrate found -- we should not get here 940 * because hardware has already validated this frame as OK. 941 */ 942 ath_dbg(common, ANY, 943 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 944 rx_stats->rs_rate); 945 946 return -EINVAL; 947 } 948 949 static void ath9k_process_rssi(struct ath_common *common, 950 struct ieee80211_hw *hw, 951 struct ieee80211_hdr *hdr, 952 struct ath_rx_status *rx_stats) 953 { 954 struct ath_softc *sc = hw->priv; 955 struct ath_hw *ah = common->ah; 956 int last_rssi; 957 958 if (!rx_stats->is_mybeacon || 959 ((ah->opmode != NL80211_IFTYPE_STATION) && 960 (ah->opmode != NL80211_IFTYPE_ADHOC))) 961 return; 962 963 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 964 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 965 966 last_rssi = sc->last_rssi; 967 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 968 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 969 ATH_RSSI_EP_MULTIPLIER); 970 if (rx_stats->rs_rssi < 0) 971 rx_stats->rs_rssi = 0; 972 973 /* Update Beacon RSSI, this is used by ANI. */ 974 ah->stats.avgbrssi = rx_stats->rs_rssi; 975 } 976 977 /* 978 * For Decrypt or Demic errors, we only mark packet status here and always push 979 * up the frame up to let mac80211 handle the actual error case, be it no 980 * decryption key or real decryption error. This let us keep statistics there. 981 */ 982 static int ath9k_rx_skb_preprocess(struct ath_common *common, 983 struct ieee80211_hw *hw, 984 struct ieee80211_hdr *hdr, 985 struct ath_rx_status *rx_stats, 986 struct ieee80211_rx_status *rx_status, 987 bool *decrypt_error) 988 { 989 struct ath_hw *ah = common->ah; 990 991 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 992 993 /* 994 * everything but the rate is checked here, the rate check is done 995 * separately to avoid doing two lookups for a rate for each frame. 996 */ 997 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 998 return -EINVAL; 999 1000 /* Only use status info from the last fragment */ 1001 if (rx_stats->rs_more) 1002 return 0; 1003 1004 ath9k_process_rssi(common, hw, hdr, rx_stats); 1005 1006 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1007 return -EINVAL; 1008 1009 rx_status->band = hw->conf.channel->band; 1010 rx_status->freq = hw->conf.channel->center_freq; 1011 rx_status->signal = ah->noise + rx_stats->rs_rssi; 1012 rx_status->antenna = rx_stats->rs_antenna; 1013 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 1014 1015 return 0; 1016 } 1017 1018 static void ath9k_rx_skb_postprocess(struct ath_common *common, 1019 struct sk_buff *skb, 1020 struct ath_rx_status *rx_stats, 1021 struct ieee80211_rx_status *rxs, 1022 bool decrypt_error) 1023 { 1024 struct ath_hw *ah = common->ah; 1025 struct ieee80211_hdr *hdr; 1026 int hdrlen, padpos, padsize; 1027 u8 keyix; 1028 __le16 fc; 1029 1030 /* see if any padding is done by the hw and remove it */ 1031 hdr = (struct ieee80211_hdr *) skb->data; 1032 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1033 fc = hdr->frame_control; 1034 padpos = ath9k_cmn_padpos(hdr->frame_control); 1035 1036 /* The MAC header is padded to have 32-bit boundary if the 1037 * packet payload is non-zero. The general calculation for 1038 * padsize would take into account odd header lengths: 1039 * padsize = (4 - padpos % 4) % 4; However, since only 1040 * even-length headers are used, padding can only be 0 or 2 1041 * bytes and we can optimize this a bit. In addition, we must 1042 * not try to remove padding from short control frames that do 1043 * not have payload. */ 1044 padsize = padpos & 3; 1045 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1046 memmove(skb->data + padsize, skb->data, padpos); 1047 skb_pull(skb, padsize); 1048 } 1049 1050 keyix = rx_stats->rs_keyix; 1051 1052 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1053 ieee80211_has_protected(fc)) { 1054 rxs->flag |= RX_FLAG_DECRYPTED; 1055 } else if (ieee80211_has_protected(fc) 1056 && !decrypt_error && skb->len >= hdrlen + 4) { 1057 keyix = skb->data[hdrlen + 3] >> 6; 1058 1059 if (test_bit(keyix, common->keymap)) 1060 rxs->flag |= RX_FLAG_DECRYPTED; 1061 } 1062 if (ah->sw_mgmt_crypto && 1063 (rxs->flag & RX_FLAG_DECRYPTED) && 1064 ieee80211_is_mgmt(fc)) 1065 /* Use software decrypt for management frames. */ 1066 rxs->flag &= ~RX_FLAG_DECRYPTED; 1067 } 1068 1069 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, 1070 struct ath_hw_antcomb_conf ant_conf, 1071 int main_rssi_avg) 1072 { 1073 antcomb->quick_scan_cnt = 0; 1074 1075 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) 1076 antcomb->rssi_lna2 = main_rssi_avg; 1077 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) 1078 antcomb->rssi_lna1 = main_rssi_avg; 1079 1080 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { 1081 case 0x10: /* LNA2 A-B */ 1082 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1083 antcomb->first_quick_scan_conf = 1084 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1085 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1086 break; 1087 case 0x20: /* LNA1 A-B */ 1088 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1089 antcomb->first_quick_scan_conf = 1090 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1091 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1092 break; 1093 case 0x21: /* LNA1 LNA2 */ 1094 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; 1095 antcomb->first_quick_scan_conf = 1096 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1097 antcomb->second_quick_scan_conf = 1098 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1099 break; 1100 case 0x12: /* LNA2 LNA1 */ 1101 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; 1102 antcomb->first_quick_scan_conf = 1103 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1104 antcomb->second_quick_scan_conf = 1105 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1106 break; 1107 case 0x13: /* LNA2 A+B */ 1108 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1109 antcomb->first_quick_scan_conf = 1110 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1111 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1112 break; 1113 case 0x23: /* LNA1 A+B */ 1114 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1115 antcomb->first_quick_scan_conf = 1116 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1117 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1118 break; 1119 default: 1120 break; 1121 } 1122 } 1123 1124 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 1125 struct ath_hw_antcomb_conf *div_ant_conf, 1126 int main_rssi_avg, int alt_rssi_avg, 1127 int alt_ratio) 1128 { 1129 /* alt_good */ 1130 switch (antcomb->quick_scan_cnt) { 1131 case 0: 1132 /* set alt to main, and alt to first conf */ 1133 div_ant_conf->main_lna_conf = antcomb->main_conf; 1134 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; 1135 break; 1136 case 1: 1137 /* set alt to main, and alt to first conf */ 1138 div_ant_conf->main_lna_conf = antcomb->main_conf; 1139 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; 1140 antcomb->rssi_first = main_rssi_avg; 1141 antcomb->rssi_second = alt_rssi_avg; 1142 1143 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1144 /* main is LNA1 */ 1145 if (ath_is_alt_ant_ratio_better(alt_ratio, 1146 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1147 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1148 main_rssi_avg, alt_rssi_avg, 1149 antcomb->total_pkt_count)) 1150 antcomb->first_ratio = true; 1151 else 1152 antcomb->first_ratio = false; 1153 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1154 if (ath_is_alt_ant_ratio_better(alt_ratio, 1155 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1156 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1157 main_rssi_avg, alt_rssi_avg, 1158 antcomb->total_pkt_count)) 1159 antcomb->first_ratio = true; 1160 else 1161 antcomb->first_ratio = false; 1162 } else { 1163 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1164 (alt_rssi_avg > main_rssi_avg + 1165 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1166 (alt_rssi_avg > main_rssi_avg)) && 1167 (antcomb->total_pkt_count > 50)) 1168 antcomb->first_ratio = true; 1169 else 1170 antcomb->first_ratio = false; 1171 } 1172 break; 1173 case 2: 1174 antcomb->alt_good = false; 1175 antcomb->scan_not_start = false; 1176 antcomb->scan = false; 1177 antcomb->rssi_first = main_rssi_avg; 1178 antcomb->rssi_third = alt_rssi_avg; 1179 1180 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 1181 antcomb->rssi_lna1 = alt_rssi_avg; 1182 else if (antcomb->second_quick_scan_conf == 1183 ATH_ANT_DIV_COMB_LNA2) 1184 antcomb->rssi_lna2 = alt_rssi_avg; 1185 else if (antcomb->second_quick_scan_conf == 1186 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 1187 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 1188 antcomb->rssi_lna2 = main_rssi_avg; 1189 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 1190 antcomb->rssi_lna1 = main_rssi_avg; 1191 } 1192 1193 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 1194 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 1195 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1196 else 1197 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 1198 1199 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1200 if (ath_is_alt_ant_ratio_better(alt_ratio, 1201 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1202 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1203 main_rssi_avg, alt_rssi_avg, 1204 antcomb->total_pkt_count)) 1205 antcomb->second_ratio = true; 1206 else 1207 antcomb->second_ratio = false; 1208 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1209 if (ath_is_alt_ant_ratio_better(alt_ratio, 1210 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1211 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1212 main_rssi_avg, alt_rssi_avg, 1213 antcomb->total_pkt_count)) 1214 antcomb->second_ratio = true; 1215 else 1216 antcomb->second_ratio = false; 1217 } else { 1218 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1219 (alt_rssi_avg > main_rssi_avg + 1220 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1221 (alt_rssi_avg > main_rssi_avg)) && 1222 (antcomb->total_pkt_count > 50)) 1223 antcomb->second_ratio = true; 1224 else 1225 antcomb->second_ratio = false; 1226 } 1227 1228 /* set alt to the conf with maximun ratio */ 1229 if (antcomb->first_ratio && antcomb->second_ratio) { 1230 if (antcomb->rssi_second > antcomb->rssi_third) { 1231 /* first alt*/ 1232 if ((antcomb->first_quick_scan_conf == 1233 ATH_ANT_DIV_COMB_LNA1) || 1234 (antcomb->first_quick_scan_conf == 1235 ATH_ANT_DIV_COMB_LNA2)) 1236 /* Set alt LNA1 or LNA2*/ 1237 if (div_ant_conf->main_lna_conf == 1238 ATH_ANT_DIV_COMB_LNA2) 1239 div_ant_conf->alt_lna_conf = 1240 ATH_ANT_DIV_COMB_LNA1; 1241 else 1242 div_ant_conf->alt_lna_conf = 1243 ATH_ANT_DIV_COMB_LNA2; 1244 else 1245 /* Set alt to A+B or A-B */ 1246 div_ant_conf->alt_lna_conf = 1247 antcomb->first_quick_scan_conf; 1248 } else if ((antcomb->second_quick_scan_conf == 1249 ATH_ANT_DIV_COMB_LNA1) || 1250 (antcomb->second_quick_scan_conf == 1251 ATH_ANT_DIV_COMB_LNA2)) { 1252 /* Set alt LNA1 or LNA2 */ 1253 if (div_ant_conf->main_lna_conf == 1254 ATH_ANT_DIV_COMB_LNA2) 1255 div_ant_conf->alt_lna_conf = 1256 ATH_ANT_DIV_COMB_LNA1; 1257 else 1258 div_ant_conf->alt_lna_conf = 1259 ATH_ANT_DIV_COMB_LNA2; 1260 } else { 1261 /* Set alt to A+B or A-B */ 1262 div_ant_conf->alt_lna_conf = 1263 antcomb->second_quick_scan_conf; 1264 } 1265 } else if (antcomb->first_ratio) { 1266 /* first alt */ 1267 if ((antcomb->first_quick_scan_conf == 1268 ATH_ANT_DIV_COMB_LNA1) || 1269 (antcomb->first_quick_scan_conf == 1270 ATH_ANT_DIV_COMB_LNA2)) 1271 /* Set alt LNA1 or LNA2 */ 1272 if (div_ant_conf->main_lna_conf == 1273 ATH_ANT_DIV_COMB_LNA2) 1274 div_ant_conf->alt_lna_conf = 1275 ATH_ANT_DIV_COMB_LNA1; 1276 else 1277 div_ant_conf->alt_lna_conf = 1278 ATH_ANT_DIV_COMB_LNA2; 1279 else 1280 /* Set alt to A+B or A-B */ 1281 div_ant_conf->alt_lna_conf = 1282 antcomb->first_quick_scan_conf; 1283 } else if (antcomb->second_ratio) { 1284 /* second alt */ 1285 if ((antcomb->second_quick_scan_conf == 1286 ATH_ANT_DIV_COMB_LNA1) || 1287 (antcomb->second_quick_scan_conf == 1288 ATH_ANT_DIV_COMB_LNA2)) 1289 /* Set alt LNA1 or LNA2 */ 1290 if (div_ant_conf->main_lna_conf == 1291 ATH_ANT_DIV_COMB_LNA2) 1292 div_ant_conf->alt_lna_conf = 1293 ATH_ANT_DIV_COMB_LNA1; 1294 else 1295 div_ant_conf->alt_lna_conf = 1296 ATH_ANT_DIV_COMB_LNA2; 1297 else 1298 /* Set alt to A+B or A-B */ 1299 div_ant_conf->alt_lna_conf = 1300 antcomb->second_quick_scan_conf; 1301 } else { 1302 /* main is largest */ 1303 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || 1304 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) 1305 /* Set alt LNA1 or LNA2 */ 1306 if (div_ant_conf->main_lna_conf == 1307 ATH_ANT_DIV_COMB_LNA2) 1308 div_ant_conf->alt_lna_conf = 1309 ATH_ANT_DIV_COMB_LNA1; 1310 else 1311 div_ant_conf->alt_lna_conf = 1312 ATH_ANT_DIV_COMB_LNA2; 1313 else 1314 /* Set alt to A+B or A-B */ 1315 div_ant_conf->alt_lna_conf = antcomb->main_conf; 1316 } 1317 break; 1318 default: 1319 break; 1320 } 1321 } 1322 1323 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, 1324 struct ath_ant_comb *antcomb, int alt_ratio) 1325 { 1326 if (ant_conf->div_group == 0) { 1327 /* Adjust the fast_div_bias based on main and alt lna conf */ 1328 switch ((ant_conf->main_lna_conf << 4) | 1329 ant_conf->alt_lna_conf) { 1330 case 0x01: /* A-B LNA2 */ 1331 ant_conf->fast_div_bias = 0x3b; 1332 break; 1333 case 0x02: /* A-B LNA1 */ 1334 ant_conf->fast_div_bias = 0x3d; 1335 break; 1336 case 0x03: /* A-B A+B */ 1337 ant_conf->fast_div_bias = 0x1; 1338 break; 1339 case 0x10: /* LNA2 A-B */ 1340 ant_conf->fast_div_bias = 0x7; 1341 break; 1342 case 0x12: /* LNA2 LNA1 */ 1343 ant_conf->fast_div_bias = 0x2; 1344 break; 1345 case 0x13: /* LNA2 A+B */ 1346 ant_conf->fast_div_bias = 0x7; 1347 break; 1348 case 0x20: /* LNA1 A-B */ 1349 ant_conf->fast_div_bias = 0x6; 1350 break; 1351 case 0x21: /* LNA1 LNA2 */ 1352 ant_conf->fast_div_bias = 0x0; 1353 break; 1354 case 0x23: /* LNA1 A+B */ 1355 ant_conf->fast_div_bias = 0x6; 1356 break; 1357 case 0x30: /* A+B A-B */ 1358 ant_conf->fast_div_bias = 0x1; 1359 break; 1360 case 0x31: /* A+B LNA2 */ 1361 ant_conf->fast_div_bias = 0x3b; 1362 break; 1363 case 0x32: /* A+B LNA1 */ 1364 ant_conf->fast_div_bias = 0x3d; 1365 break; 1366 default: 1367 break; 1368 } 1369 } else if (ant_conf->div_group == 1) { 1370 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1371 switch ((ant_conf->main_lna_conf << 4) | 1372 ant_conf->alt_lna_conf) { 1373 case 0x01: /* A-B LNA2 */ 1374 ant_conf->fast_div_bias = 0x1; 1375 ant_conf->main_gaintb = 0; 1376 ant_conf->alt_gaintb = 0; 1377 break; 1378 case 0x02: /* A-B LNA1 */ 1379 ant_conf->fast_div_bias = 0x1; 1380 ant_conf->main_gaintb = 0; 1381 ant_conf->alt_gaintb = 0; 1382 break; 1383 case 0x03: /* A-B A+B */ 1384 ant_conf->fast_div_bias = 0x1; 1385 ant_conf->main_gaintb = 0; 1386 ant_conf->alt_gaintb = 0; 1387 break; 1388 case 0x10: /* LNA2 A-B */ 1389 if (!(antcomb->scan) && 1390 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1391 ant_conf->fast_div_bias = 0x3f; 1392 else 1393 ant_conf->fast_div_bias = 0x1; 1394 ant_conf->main_gaintb = 0; 1395 ant_conf->alt_gaintb = 0; 1396 break; 1397 case 0x12: /* LNA2 LNA1 */ 1398 ant_conf->fast_div_bias = 0x1; 1399 ant_conf->main_gaintb = 0; 1400 ant_conf->alt_gaintb = 0; 1401 break; 1402 case 0x13: /* LNA2 A+B */ 1403 if (!(antcomb->scan) && 1404 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1405 ant_conf->fast_div_bias = 0x3f; 1406 else 1407 ant_conf->fast_div_bias = 0x1; 1408 ant_conf->main_gaintb = 0; 1409 ant_conf->alt_gaintb = 0; 1410 break; 1411 case 0x20: /* LNA1 A-B */ 1412 if (!(antcomb->scan) && 1413 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1414 ant_conf->fast_div_bias = 0x3f; 1415 else 1416 ant_conf->fast_div_bias = 0x1; 1417 ant_conf->main_gaintb = 0; 1418 ant_conf->alt_gaintb = 0; 1419 break; 1420 case 0x21: /* LNA1 LNA2 */ 1421 ant_conf->fast_div_bias = 0x1; 1422 ant_conf->main_gaintb = 0; 1423 ant_conf->alt_gaintb = 0; 1424 break; 1425 case 0x23: /* LNA1 A+B */ 1426 if (!(antcomb->scan) && 1427 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1428 ant_conf->fast_div_bias = 0x3f; 1429 else 1430 ant_conf->fast_div_bias = 0x1; 1431 ant_conf->main_gaintb = 0; 1432 ant_conf->alt_gaintb = 0; 1433 break; 1434 case 0x30: /* A+B A-B */ 1435 ant_conf->fast_div_bias = 0x1; 1436 ant_conf->main_gaintb = 0; 1437 ant_conf->alt_gaintb = 0; 1438 break; 1439 case 0x31: /* A+B LNA2 */ 1440 ant_conf->fast_div_bias = 0x1; 1441 ant_conf->main_gaintb = 0; 1442 ant_conf->alt_gaintb = 0; 1443 break; 1444 case 0x32: /* A+B LNA1 */ 1445 ant_conf->fast_div_bias = 0x1; 1446 ant_conf->main_gaintb = 0; 1447 ant_conf->alt_gaintb = 0; 1448 break; 1449 default: 1450 break; 1451 } 1452 } else if (ant_conf->div_group == 2) { 1453 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1454 switch ((ant_conf->main_lna_conf << 4) | 1455 ant_conf->alt_lna_conf) { 1456 case 0x01: /* A-B LNA2 */ 1457 ant_conf->fast_div_bias = 0x1; 1458 ant_conf->main_gaintb = 0; 1459 ant_conf->alt_gaintb = 0; 1460 break; 1461 case 0x02: /* A-B LNA1 */ 1462 ant_conf->fast_div_bias = 0x1; 1463 ant_conf->main_gaintb = 0; 1464 ant_conf->alt_gaintb = 0; 1465 break; 1466 case 0x03: /* A-B A+B */ 1467 ant_conf->fast_div_bias = 0x1; 1468 ant_conf->main_gaintb = 0; 1469 ant_conf->alt_gaintb = 0; 1470 break; 1471 case 0x10: /* LNA2 A-B */ 1472 if (!(antcomb->scan) && 1473 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1474 ant_conf->fast_div_bias = 0x1; 1475 else 1476 ant_conf->fast_div_bias = 0x2; 1477 ant_conf->main_gaintb = 0; 1478 ant_conf->alt_gaintb = 0; 1479 break; 1480 case 0x12: /* LNA2 LNA1 */ 1481 ant_conf->fast_div_bias = 0x1; 1482 ant_conf->main_gaintb = 0; 1483 ant_conf->alt_gaintb = 0; 1484 break; 1485 case 0x13: /* LNA2 A+B */ 1486 if (!(antcomb->scan) && 1487 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1488 ant_conf->fast_div_bias = 0x1; 1489 else 1490 ant_conf->fast_div_bias = 0x2; 1491 ant_conf->main_gaintb = 0; 1492 ant_conf->alt_gaintb = 0; 1493 break; 1494 case 0x20: /* LNA1 A-B */ 1495 if (!(antcomb->scan) && 1496 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1497 ant_conf->fast_div_bias = 0x1; 1498 else 1499 ant_conf->fast_div_bias = 0x2; 1500 ant_conf->main_gaintb = 0; 1501 ant_conf->alt_gaintb = 0; 1502 break; 1503 case 0x21: /* LNA1 LNA2 */ 1504 ant_conf->fast_div_bias = 0x1; 1505 ant_conf->main_gaintb = 0; 1506 ant_conf->alt_gaintb = 0; 1507 break; 1508 case 0x23: /* LNA1 A+B */ 1509 if (!(antcomb->scan) && 1510 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1511 ant_conf->fast_div_bias = 0x1; 1512 else 1513 ant_conf->fast_div_bias = 0x2; 1514 ant_conf->main_gaintb = 0; 1515 ant_conf->alt_gaintb = 0; 1516 break; 1517 case 0x30: /* A+B A-B */ 1518 ant_conf->fast_div_bias = 0x1; 1519 ant_conf->main_gaintb = 0; 1520 ant_conf->alt_gaintb = 0; 1521 break; 1522 case 0x31: /* A+B LNA2 */ 1523 ant_conf->fast_div_bias = 0x1; 1524 ant_conf->main_gaintb = 0; 1525 ant_conf->alt_gaintb = 0; 1526 break; 1527 case 0x32: /* A+B LNA1 */ 1528 ant_conf->fast_div_bias = 0x1; 1529 ant_conf->main_gaintb = 0; 1530 ant_conf->alt_gaintb = 0; 1531 break; 1532 default: 1533 break; 1534 } 1535 } 1536 } 1537 1538 /* Antenna diversity and combining */ 1539 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 1540 { 1541 struct ath_hw_antcomb_conf div_ant_conf; 1542 struct ath_ant_comb *antcomb = &sc->ant_comb; 1543 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1544 int curr_main_set; 1545 int main_rssi = rs->rs_rssi_ctl0; 1546 int alt_rssi = rs->rs_rssi_ctl1; 1547 int rx_ant_conf, main_ant_conf; 1548 bool short_scan = false; 1549 1550 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 1551 ATH_ANT_RX_MASK; 1552 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1553 ATH_ANT_RX_MASK; 1554 1555 /* Record packet only when both main_rssi and alt_rssi is positive */ 1556 if (main_rssi > 0 && alt_rssi > 0) { 1557 antcomb->total_pkt_count++; 1558 antcomb->main_total_rssi += main_rssi; 1559 antcomb->alt_total_rssi += alt_rssi; 1560 if (main_ant_conf == rx_ant_conf) 1561 antcomb->main_recv_cnt++; 1562 else 1563 antcomb->alt_recv_cnt++; 1564 } 1565 1566 /* Short scan check */ 1567 if (antcomb->scan && antcomb->alt_good) { 1568 if (time_after(jiffies, antcomb->scan_start_time + 1569 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 1570 short_scan = true; 1571 else 1572 if (antcomb->total_pkt_count == 1573 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { 1574 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1575 antcomb->total_pkt_count); 1576 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 1577 short_scan = true; 1578 } 1579 } 1580 1581 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 1582 rs->rs_moreaggr) && !short_scan) 1583 return; 1584 1585 if (antcomb->total_pkt_count) { 1586 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1587 antcomb->total_pkt_count); 1588 main_rssi_avg = (antcomb->main_total_rssi / 1589 antcomb->total_pkt_count); 1590 alt_rssi_avg = (antcomb->alt_total_rssi / 1591 antcomb->total_pkt_count); 1592 } 1593 1594 1595 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1596 curr_alt_set = div_ant_conf.alt_lna_conf; 1597 curr_main_set = div_ant_conf.main_lna_conf; 1598 1599 antcomb->count++; 1600 1601 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 1602 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1603 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 1604 main_rssi_avg); 1605 antcomb->alt_good = true; 1606 } else { 1607 antcomb->alt_good = false; 1608 } 1609 1610 antcomb->count = 0; 1611 antcomb->scan = true; 1612 antcomb->scan_not_start = true; 1613 } 1614 1615 if (!antcomb->scan) { 1616 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, 1617 alt_ratio, curr_main_set, curr_alt_set, 1618 alt_rssi_avg, main_rssi_avg)) { 1619 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1620 /* Switch main and alt LNA */ 1621 div_ant_conf.main_lna_conf = 1622 ATH_ANT_DIV_COMB_LNA2; 1623 div_ant_conf.alt_lna_conf = 1624 ATH_ANT_DIV_COMB_LNA1; 1625 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { 1626 div_ant_conf.main_lna_conf = 1627 ATH_ANT_DIV_COMB_LNA1; 1628 div_ant_conf.alt_lna_conf = 1629 ATH_ANT_DIV_COMB_LNA2; 1630 } 1631 1632 goto div_comb_done; 1633 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && 1634 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { 1635 /* Set alt to another LNA */ 1636 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) 1637 div_ant_conf.alt_lna_conf = 1638 ATH_ANT_DIV_COMB_LNA1; 1639 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) 1640 div_ant_conf.alt_lna_conf = 1641 ATH_ANT_DIV_COMB_LNA2; 1642 1643 goto div_comb_done; 1644 } 1645 1646 if ((alt_rssi_avg < (main_rssi_avg + 1647 div_ant_conf.lna1_lna2_delta))) 1648 goto div_comb_done; 1649 } 1650 1651 if (!antcomb->scan_not_start) { 1652 switch (curr_alt_set) { 1653 case ATH_ANT_DIV_COMB_LNA2: 1654 antcomb->rssi_lna2 = alt_rssi_avg; 1655 antcomb->rssi_lna1 = main_rssi_avg; 1656 antcomb->scan = true; 1657 /* set to A+B */ 1658 div_ant_conf.main_lna_conf = 1659 ATH_ANT_DIV_COMB_LNA1; 1660 div_ant_conf.alt_lna_conf = 1661 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1662 break; 1663 case ATH_ANT_DIV_COMB_LNA1: 1664 antcomb->rssi_lna1 = alt_rssi_avg; 1665 antcomb->rssi_lna2 = main_rssi_avg; 1666 antcomb->scan = true; 1667 /* set to A+B */ 1668 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1669 div_ant_conf.alt_lna_conf = 1670 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1671 break; 1672 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: 1673 antcomb->rssi_add = alt_rssi_avg; 1674 antcomb->scan = true; 1675 /* set to A-B */ 1676 div_ant_conf.alt_lna_conf = 1677 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1678 break; 1679 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: 1680 antcomb->rssi_sub = alt_rssi_avg; 1681 antcomb->scan = false; 1682 if (antcomb->rssi_lna2 > 1683 (antcomb->rssi_lna1 + 1684 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 1685 /* use LNA2 as main LNA */ 1686 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 1687 (antcomb->rssi_add > antcomb->rssi_sub)) { 1688 /* set to A+B */ 1689 div_ant_conf.main_lna_conf = 1690 ATH_ANT_DIV_COMB_LNA2; 1691 div_ant_conf.alt_lna_conf = 1692 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1693 } else if (antcomb->rssi_sub > 1694 antcomb->rssi_lna1) { 1695 /* set to A-B */ 1696 div_ant_conf.main_lna_conf = 1697 ATH_ANT_DIV_COMB_LNA2; 1698 div_ant_conf.alt_lna_conf = 1699 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1700 } else { 1701 /* set to LNA1 */ 1702 div_ant_conf.main_lna_conf = 1703 ATH_ANT_DIV_COMB_LNA2; 1704 div_ant_conf.alt_lna_conf = 1705 ATH_ANT_DIV_COMB_LNA1; 1706 } 1707 } else { 1708 /* use LNA1 as main LNA */ 1709 if ((antcomb->rssi_add > antcomb->rssi_lna2) && 1710 (antcomb->rssi_add > antcomb->rssi_sub)) { 1711 /* set to A+B */ 1712 div_ant_conf.main_lna_conf = 1713 ATH_ANT_DIV_COMB_LNA1; 1714 div_ant_conf.alt_lna_conf = 1715 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1716 } else if (antcomb->rssi_sub > 1717 antcomb->rssi_lna1) { 1718 /* set to A-B */ 1719 div_ant_conf.main_lna_conf = 1720 ATH_ANT_DIV_COMB_LNA1; 1721 div_ant_conf.alt_lna_conf = 1722 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1723 } else { 1724 /* set to LNA2 */ 1725 div_ant_conf.main_lna_conf = 1726 ATH_ANT_DIV_COMB_LNA1; 1727 div_ant_conf.alt_lna_conf = 1728 ATH_ANT_DIV_COMB_LNA2; 1729 } 1730 } 1731 break; 1732 default: 1733 break; 1734 } 1735 } else { 1736 if (!antcomb->alt_good) { 1737 antcomb->scan_not_start = false; 1738 /* Set alt to another LNA */ 1739 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 1740 div_ant_conf.main_lna_conf = 1741 ATH_ANT_DIV_COMB_LNA2; 1742 div_ant_conf.alt_lna_conf = 1743 ATH_ANT_DIV_COMB_LNA1; 1744 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 1745 div_ant_conf.main_lna_conf = 1746 ATH_ANT_DIV_COMB_LNA1; 1747 div_ant_conf.alt_lna_conf = 1748 ATH_ANT_DIV_COMB_LNA2; 1749 } 1750 goto div_comb_done; 1751 } 1752 } 1753 1754 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, 1755 main_rssi_avg, alt_rssi_avg, 1756 alt_ratio); 1757 1758 antcomb->quick_scan_cnt++; 1759 1760 div_comb_done: 1761 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); 1762 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1763 1764 antcomb->scan_start_time = jiffies; 1765 antcomb->total_pkt_count = 0; 1766 antcomb->main_total_rssi = 0; 1767 antcomb->alt_total_rssi = 0; 1768 antcomb->main_recv_cnt = 0; 1769 antcomb->alt_recv_cnt = 0; 1770 } 1771 1772 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1773 { 1774 struct ath_buf *bf; 1775 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1776 struct ieee80211_rx_status *rxs; 1777 struct ath_hw *ah = sc->sc_ah; 1778 struct ath_common *common = ath9k_hw_common(ah); 1779 struct ieee80211_hw *hw = sc->hw; 1780 struct ieee80211_hdr *hdr; 1781 int retval; 1782 bool decrypt_error = false; 1783 struct ath_rx_status rs; 1784 enum ath9k_rx_qtype qtype; 1785 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1786 int dma_type; 1787 u8 rx_status_len = ah->caps.rx_status_len; 1788 u64 tsf = 0; 1789 u32 tsf_lower = 0; 1790 unsigned long flags; 1791 1792 if (edma) 1793 dma_type = DMA_BIDIRECTIONAL; 1794 else 1795 dma_type = DMA_FROM_DEVICE; 1796 1797 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1798 spin_lock_bh(&sc->rx.rxbuflock); 1799 1800 tsf = ath9k_hw_gettsf64(ah); 1801 tsf_lower = tsf & 0xffffffff; 1802 1803 do { 1804 /* If handling rx interrupt and flush is in progress => exit */ 1805 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1806 break; 1807 1808 memset(&rs, 0, sizeof(rs)); 1809 if (edma) 1810 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1811 else 1812 bf = ath_get_next_rx_buf(sc, &rs); 1813 1814 if (!bf) 1815 break; 1816 1817 skb = bf->bf_mpdu; 1818 if (!skb) 1819 continue; 1820 1821 /* 1822 * Take frame header from the first fragment and RX status from 1823 * the last one. 1824 */ 1825 if (sc->rx.frag) 1826 hdr_skb = sc->rx.frag; 1827 else 1828 hdr_skb = skb; 1829 1830 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1831 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1832 if (ieee80211_is_beacon(hdr->frame_control) && 1833 !is_zero_ether_addr(common->curbssid) && 1834 !compare_ether_addr(hdr->addr3, common->curbssid)) 1835 rs.is_mybeacon = true; 1836 else 1837 rs.is_mybeacon = false; 1838 1839 ath_debug_stat_rx(sc, &rs); 1840 1841 /* 1842 * If we're asked to flush receive queue, directly 1843 * chain it back at the queue without processing it. 1844 */ 1845 if (sc->sc_flags & SC_OP_RXFLUSH) 1846 goto requeue_drop_frag; 1847 1848 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1849 if (rs.rs_tstamp > tsf_lower && 1850 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1851 rxs->mactime -= 0x100000000ULL; 1852 1853 if (rs.rs_tstamp < tsf_lower && 1854 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1855 rxs->mactime += 0x100000000ULL; 1856 1857 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1858 rxs, &decrypt_error); 1859 if (retval) 1860 goto requeue_drop_frag; 1861 1862 /* Ensure we always have an skb to requeue once we are done 1863 * processing the current buffer's skb */ 1864 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1865 1866 /* If there is no memory we ignore the current RX'd frame, 1867 * tell hardware it can give us a new frame using the old 1868 * skb and put it at the tail of the sc->rx.rxbuf list for 1869 * processing. */ 1870 if (!requeue_skb) 1871 goto requeue_drop_frag; 1872 1873 /* Unmap the frame */ 1874 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1875 common->rx_bufsize, 1876 dma_type); 1877 1878 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1879 if (ah->caps.rx_status_len) 1880 skb_pull(skb, ah->caps.rx_status_len); 1881 1882 if (!rs.rs_more) 1883 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1884 rxs, decrypt_error); 1885 1886 /* We will now give hardware our shiny new allocated skb */ 1887 bf->bf_mpdu = requeue_skb; 1888 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1889 common->rx_bufsize, 1890 dma_type); 1891 if (unlikely(dma_mapping_error(sc->dev, 1892 bf->bf_buf_addr))) { 1893 dev_kfree_skb_any(requeue_skb); 1894 bf->bf_mpdu = NULL; 1895 bf->bf_buf_addr = 0; 1896 ath_err(common, "dma_mapping_error() on RX\n"); 1897 ieee80211_rx(hw, skb); 1898 break; 1899 } 1900 1901 if (rs.rs_more) { 1902 /* 1903 * rs_more indicates chained descriptors which can be 1904 * used to link buffers together for a sort of 1905 * scatter-gather operation. 1906 */ 1907 if (sc->rx.frag) { 1908 /* too many fragments - cannot handle frame */ 1909 dev_kfree_skb_any(sc->rx.frag); 1910 dev_kfree_skb_any(skb); 1911 skb = NULL; 1912 } 1913 sc->rx.frag = skb; 1914 goto requeue; 1915 } 1916 1917 if (sc->rx.frag) { 1918 int space = skb->len - skb_tailroom(hdr_skb); 1919 1920 sc->rx.frag = NULL; 1921 1922 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1923 dev_kfree_skb(skb); 1924 goto requeue_drop_frag; 1925 } 1926 1927 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1928 skb->len); 1929 dev_kfree_skb_any(skb); 1930 skb = hdr_skb; 1931 } 1932 1933 1934 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 1935 1936 /* 1937 * change the default rx antenna if rx diversity 1938 * chooses the other antenna 3 times in a row. 1939 */ 1940 if (sc->rx.defant != rs.rs_antenna) { 1941 if (++sc->rx.rxotherant >= 3) 1942 ath_setdefantenna(sc, rs.rs_antenna); 1943 } else { 1944 sc->rx.rxotherant = 0; 1945 } 1946 1947 } 1948 1949 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1950 skb_trim(skb, skb->len - 8); 1951 1952 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1953 1954 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1955 PS_WAIT_FOR_CAB | 1956 PS_WAIT_FOR_PSPOLL_DATA)) || 1957 ath9k_check_auto_sleep(sc)) 1958 ath_rx_ps(sc, skb, rs.is_mybeacon); 1959 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1960 1961 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1962 ath_ant_comb_scan(sc, &rs); 1963 1964 ieee80211_rx(hw, skb); 1965 1966 requeue_drop_frag: 1967 if (sc->rx.frag) { 1968 dev_kfree_skb_any(sc->rx.frag); 1969 sc->rx.frag = NULL; 1970 } 1971 requeue: 1972 if (edma) { 1973 list_add_tail(&bf->list, &sc->rx.rxbuf); 1974 ath_rx_edma_buf_link(sc, qtype); 1975 } else { 1976 list_move_tail(&bf->list, &sc->rx.rxbuf); 1977 ath_rx_buf_link(sc, bf); 1978 if (!flush) 1979 ath9k_hw_rxena(ah); 1980 } 1981 } while (1); 1982 1983 spin_unlock_bh(&sc->rx.rxbuflock); 1984 1985 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1986 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 1987 ath9k_hw_set_interrupts(ah); 1988 } 1989 1990 return 0; 1991 } 1992