1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22 23 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 24 int mindelta, int main_rssi_avg, 25 int alt_rssi_avg, int pkt_count) 26 { 27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 28 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 30 } 31 32 static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, 33 int curr_main_set, int curr_alt_set, 34 int alt_rssi_avg, int main_rssi_avg) 35 { 36 bool result = false; 37 switch (div_group) { 38 case 0: 39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 40 result = true; 41 break; 42 case 1: 43 case 2: 44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && 45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && 46 (alt_rssi_avg >= (main_rssi_avg - 5))) || 47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && 48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && 49 (alt_rssi_avg >= (main_rssi_avg - 2)))) && 50 (alt_rssi_avg >= 4)) 51 result = true; 52 else 53 result = false; 54 break; 55 } 56 57 return result; 58 } 59 60 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 61 { 62 return sc->ps_enabled && 63 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 64 } 65 66 /* 67 * Setup and link descriptors. 68 * 69 * 11N: we can no longer afford to self link the last descriptor. 70 * MAC acknowledges BA status as long as it copies frames to host 71 * buffer (or rx fifo). This can incorrectly acknowledge packets 72 * to a sender if last desc is self-linked. 73 */ 74 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 75 { 76 struct ath_hw *ah = sc->sc_ah; 77 struct ath_common *common = ath9k_hw_common(ah); 78 struct ath_desc *ds; 79 struct sk_buff *skb; 80 81 ATH_RXBUF_RESET(bf); 82 83 ds = bf->bf_desc; 84 ds->ds_link = 0; /* link to null */ 85 ds->ds_data = bf->bf_buf_addr; 86 87 /* virtual addr of the beginning of the buffer. */ 88 skb = bf->bf_mpdu; 89 BUG_ON(skb == NULL); 90 ds->ds_vdata = skb->data; 91 92 /* 93 * setup rx descriptors. The rx_bufsize here tells the hardware 94 * how much data it can DMA to us and that we are prepared 95 * to process 96 */ 97 ath9k_hw_setuprxdesc(ah, ds, 98 common->rx_bufsize, 99 0); 100 101 if (sc->rx.rxlink == NULL) 102 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 103 else 104 *sc->rx.rxlink = bf->bf_daddr; 105 106 sc->rx.rxlink = &ds->ds_link; 107 } 108 109 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 110 { 111 /* XXX block beacon interrupts */ 112 ath9k_hw_setantenna(sc->sc_ah, antenna); 113 sc->rx.defant = antenna; 114 sc->rx.rxotherant = 0; 115 } 116 117 static void ath_opmode_init(struct ath_softc *sc) 118 { 119 struct ath_hw *ah = sc->sc_ah; 120 struct ath_common *common = ath9k_hw_common(ah); 121 122 u32 rfilt, mfilt[2]; 123 124 /* configure rx filter */ 125 rfilt = ath_calcrxfilter(sc); 126 ath9k_hw_setrxfilter(ah, rfilt); 127 128 /* configure bssid mask */ 129 ath_hw_setbssidmask(common); 130 131 /* configure operational mode */ 132 ath9k_hw_setopmode(ah); 133 134 /* calculate and install multicast filter */ 135 mfilt[0] = mfilt[1] = ~0; 136 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 137 } 138 139 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 140 enum ath9k_rx_qtype qtype) 141 { 142 struct ath_hw *ah = sc->sc_ah; 143 struct ath_rx_edma *rx_edma; 144 struct sk_buff *skb; 145 struct ath_buf *bf; 146 147 rx_edma = &sc->rx.rx_edma[qtype]; 148 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 149 return false; 150 151 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 152 list_del_init(&bf->list); 153 154 skb = bf->bf_mpdu; 155 156 ATH_RXBUF_RESET(bf); 157 memset(skb->data, 0, ah->caps.rx_status_len); 158 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 159 ah->caps.rx_status_len, DMA_TO_DEVICE); 160 161 SKB_CB_ATHBUF(skb) = bf; 162 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 163 skb_queue_tail(&rx_edma->rx_fifo, skb); 164 165 return true; 166 } 167 168 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 169 enum ath9k_rx_qtype qtype, int size) 170 { 171 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 172 u32 nbuf = 0; 173 174 if (list_empty(&sc->rx.rxbuf)) { 175 ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n"); 176 return; 177 } 178 179 while (!list_empty(&sc->rx.rxbuf)) { 180 nbuf++; 181 182 if (!ath_rx_edma_buf_link(sc, qtype)) 183 break; 184 185 if (nbuf >= size) 186 break; 187 } 188 } 189 190 static void ath_rx_remove_buffer(struct ath_softc *sc, 191 enum ath9k_rx_qtype qtype) 192 { 193 struct ath_buf *bf; 194 struct ath_rx_edma *rx_edma; 195 struct sk_buff *skb; 196 197 rx_edma = &sc->rx.rx_edma[qtype]; 198 199 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 200 bf = SKB_CB_ATHBUF(skb); 201 BUG_ON(!bf); 202 list_add_tail(&bf->list, &sc->rx.rxbuf); 203 } 204 } 205 206 static void ath_rx_edma_cleanup(struct ath_softc *sc) 207 { 208 struct ath_buf *bf; 209 210 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 211 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 212 213 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 214 if (bf->bf_mpdu) 215 dev_kfree_skb_any(bf->bf_mpdu); 216 } 217 218 INIT_LIST_HEAD(&sc->rx.rxbuf); 219 220 kfree(sc->rx.rx_bufptr); 221 sc->rx.rx_bufptr = NULL; 222 } 223 224 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 225 { 226 skb_queue_head_init(&rx_edma->rx_fifo); 227 skb_queue_head_init(&rx_edma->rx_buffers); 228 rx_edma->rx_fifo_hwsize = size; 229 } 230 231 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 232 { 233 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 234 struct ath_hw *ah = sc->sc_ah; 235 struct sk_buff *skb; 236 struct ath_buf *bf; 237 int error = 0, i; 238 u32 size; 239 240 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 241 ah->caps.rx_status_len); 242 243 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 244 ah->caps.rx_lp_qdepth); 245 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 246 ah->caps.rx_hp_qdepth); 247 248 size = sizeof(struct ath_buf) * nbufs; 249 bf = kzalloc(size, GFP_KERNEL); 250 if (!bf) 251 return -ENOMEM; 252 253 INIT_LIST_HEAD(&sc->rx.rxbuf); 254 sc->rx.rx_bufptr = bf; 255 256 for (i = 0; i < nbufs; i++, bf++) { 257 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 258 if (!skb) { 259 error = -ENOMEM; 260 goto rx_init_fail; 261 } 262 263 memset(skb->data, 0, common->rx_bufsize); 264 bf->bf_mpdu = skb; 265 266 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 267 common->rx_bufsize, 268 DMA_BIDIRECTIONAL); 269 if (unlikely(dma_mapping_error(sc->dev, 270 bf->bf_buf_addr))) { 271 dev_kfree_skb_any(skb); 272 bf->bf_mpdu = NULL; 273 bf->bf_buf_addr = 0; 274 ath_err(common, 275 "dma_mapping_error() on RX init\n"); 276 error = -ENOMEM; 277 goto rx_init_fail; 278 } 279 280 list_add_tail(&bf->list, &sc->rx.rxbuf); 281 } 282 283 return 0; 284 285 rx_init_fail: 286 ath_rx_edma_cleanup(sc); 287 return error; 288 } 289 290 static void ath_edma_start_recv(struct ath_softc *sc) 291 { 292 spin_lock_bh(&sc->rx.rxbuflock); 293 294 ath9k_hw_rxena(sc->sc_ah); 295 296 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 297 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 298 299 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 300 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 301 302 ath_opmode_init(sc); 303 304 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 305 306 spin_unlock_bh(&sc->rx.rxbuflock); 307 } 308 309 static void ath_edma_stop_recv(struct ath_softc *sc) 310 { 311 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 312 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 313 } 314 315 int ath_rx_init(struct ath_softc *sc, int nbufs) 316 { 317 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 318 struct sk_buff *skb; 319 struct ath_buf *bf; 320 int error = 0; 321 322 spin_lock_init(&sc->sc_pcu_lock); 323 sc->sc_flags &= ~SC_OP_RXFLUSH; 324 spin_lock_init(&sc->rx.rxbuflock); 325 326 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 327 sc->sc_ah->caps.rx_status_len; 328 329 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 330 return ath_rx_edma_init(sc, nbufs); 331 } else { 332 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 333 common->cachelsz, common->rx_bufsize); 334 335 /* Initialize rx descriptors */ 336 337 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 338 "rx", nbufs, 1, 0); 339 if (error != 0) { 340 ath_err(common, 341 "failed to allocate rx descriptors: %d\n", 342 error); 343 goto err; 344 } 345 346 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 347 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 348 GFP_KERNEL); 349 if (skb == NULL) { 350 error = -ENOMEM; 351 goto err; 352 } 353 354 bf->bf_mpdu = skb; 355 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 356 common->rx_bufsize, 357 DMA_FROM_DEVICE); 358 if (unlikely(dma_mapping_error(sc->dev, 359 bf->bf_buf_addr))) { 360 dev_kfree_skb_any(skb); 361 bf->bf_mpdu = NULL; 362 bf->bf_buf_addr = 0; 363 ath_err(common, 364 "dma_mapping_error() on RX init\n"); 365 error = -ENOMEM; 366 goto err; 367 } 368 } 369 sc->rx.rxlink = NULL; 370 } 371 372 err: 373 if (error) 374 ath_rx_cleanup(sc); 375 376 return error; 377 } 378 379 void ath_rx_cleanup(struct ath_softc *sc) 380 { 381 struct ath_hw *ah = sc->sc_ah; 382 struct ath_common *common = ath9k_hw_common(ah); 383 struct sk_buff *skb; 384 struct ath_buf *bf; 385 386 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 387 ath_rx_edma_cleanup(sc); 388 return; 389 } else { 390 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 391 skb = bf->bf_mpdu; 392 if (skb) { 393 dma_unmap_single(sc->dev, bf->bf_buf_addr, 394 common->rx_bufsize, 395 DMA_FROM_DEVICE); 396 dev_kfree_skb(skb); 397 bf->bf_buf_addr = 0; 398 bf->bf_mpdu = NULL; 399 } 400 } 401 402 if (sc->rx.rxdma.dd_desc_len != 0) 403 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 404 } 405 } 406 407 /* 408 * Calculate the receive filter according to the 409 * operating mode and state: 410 * 411 * o always accept unicast, broadcast, and multicast traffic 412 * o maintain current state of phy error reception (the hal 413 * may enable phy error frames for noise immunity work) 414 * o probe request frames are accepted only when operating in 415 * hostap, adhoc, or monitor modes 416 * o enable promiscuous mode according to the interface state 417 * o accept beacons: 418 * - when operating in adhoc mode so the 802.11 layer creates 419 * node table entries for peers, 420 * - when operating in station mode for collecting rssi data when 421 * the station is otherwise quiet, or 422 * - when operating as a repeater so we see repeater-sta beacons 423 * - when scanning 424 */ 425 426 u32 ath_calcrxfilter(struct ath_softc *sc) 427 { 428 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 429 430 u32 rfilt; 431 432 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 433 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 434 | ATH9K_RX_FILTER_MCAST; 435 436 if (sc->rx.rxfilter & FIF_PROBE_REQ) 437 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 438 439 /* 440 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 441 * mode interface or when in monitor mode. AP mode does not need this 442 * since it receives all in-BSS frames anyway. 443 */ 444 if (sc->sc_ah->is_monitoring) 445 rfilt |= ATH9K_RX_FILTER_PROM; 446 447 if (sc->rx.rxfilter & FIF_CONTROL) 448 rfilt |= ATH9K_RX_FILTER_CONTROL; 449 450 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 451 (sc->nvifs <= 1) && 452 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 453 rfilt |= ATH9K_RX_FILTER_MYBEACON; 454 else 455 rfilt |= ATH9K_RX_FILTER_BEACON; 456 457 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 458 (sc->rx.rxfilter & FIF_PSPOLL)) 459 rfilt |= ATH9K_RX_FILTER_PSPOLL; 460 461 if (conf_is_ht(&sc->hw->conf)) 462 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 463 464 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 465 /* The following may also be needed for other older chips */ 466 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 467 rfilt |= ATH9K_RX_FILTER_PROM; 468 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 469 } 470 471 return rfilt; 472 473 #undef RX_FILTER_PRESERVE 474 } 475 476 int ath_startrecv(struct ath_softc *sc) 477 { 478 struct ath_hw *ah = sc->sc_ah; 479 struct ath_buf *bf, *tbf; 480 481 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 482 ath_edma_start_recv(sc); 483 return 0; 484 } 485 486 spin_lock_bh(&sc->rx.rxbuflock); 487 if (list_empty(&sc->rx.rxbuf)) 488 goto start_recv; 489 490 sc->rx.rxlink = NULL; 491 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 492 ath_rx_buf_link(sc, bf); 493 } 494 495 /* We could have deleted elements so the list may be empty now */ 496 if (list_empty(&sc->rx.rxbuf)) 497 goto start_recv; 498 499 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 500 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 501 ath9k_hw_rxena(ah); 502 503 start_recv: 504 ath_opmode_init(sc); 505 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 506 507 spin_unlock_bh(&sc->rx.rxbuflock); 508 509 return 0; 510 } 511 512 bool ath_stoprecv(struct ath_softc *sc) 513 { 514 struct ath_hw *ah = sc->sc_ah; 515 bool stopped, reset = false; 516 517 spin_lock_bh(&sc->rx.rxbuflock); 518 ath9k_hw_abortpcurecv(ah); 519 ath9k_hw_setrxfilter(ah, 0); 520 stopped = ath9k_hw_stopdmarecv(ah, &reset); 521 522 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 523 ath_edma_stop_recv(sc); 524 else 525 sc->rx.rxlink = NULL; 526 spin_unlock_bh(&sc->rx.rxbuflock); 527 528 if (!(ah->ah_flags & AH_UNPLUGGED) && 529 unlikely(!stopped)) { 530 ath_err(ath9k_hw_common(sc->sc_ah), 531 "Could not stop RX, we could be " 532 "confusing the DMA engine when we start RX up\n"); 533 ATH_DBG_WARN_ON_ONCE(!stopped); 534 } 535 return stopped && !reset; 536 } 537 538 void ath_flushrecv(struct ath_softc *sc) 539 { 540 sc->sc_flags |= SC_OP_RXFLUSH; 541 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 542 ath_rx_tasklet(sc, 1, true); 543 ath_rx_tasklet(sc, 1, false); 544 sc->sc_flags &= ~SC_OP_RXFLUSH; 545 } 546 547 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 548 { 549 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 550 struct ieee80211_mgmt *mgmt; 551 u8 *pos, *end, id, elen; 552 struct ieee80211_tim_ie *tim; 553 554 mgmt = (struct ieee80211_mgmt *)skb->data; 555 pos = mgmt->u.beacon.variable; 556 end = skb->data + skb->len; 557 558 while (pos + 2 < end) { 559 id = *pos++; 560 elen = *pos++; 561 if (pos + elen > end) 562 break; 563 564 if (id == WLAN_EID_TIM) { 565 if (elen < sizeof(*tim)) 566 break; 567 tim = (struct ieee80211_tim_ie *) pos; 568 if (tim->dtim_count != 0) 569 break; 570 return tim->bitmap_ctrl & 0x01; 571 } 572 573 pos += elen; 574 } 575 576 return false; 577 } 578 579 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 580 { 581 struct ieee80211_mgmt *mgmt; 582 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 583 584 if (skb->len < 24 + 8 + 2 + 2) 585 return; 586 587 mgmt = (struct ieee80211_mgmt *)skb->data; 588 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) { 589 /* TODO: This doesn't work well if you have stations 590 * associated to two different APs because curbssid 591 * is just the last AP that any of the stations associated 592 * with. 593 */ 594 return; /* not from our current AP */ 595 } 596 597 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 598 599 if (sc->ps_flags & PS_BEACON_SYNC) { 600 sc->ps_flags &= ~PS_BEACON_SYNC; 601 ath_dbg(common, ATH_DBG_PS, 602 "Reconfigure Beacon timers based on timestamp from the AP\n"); 603 ath_set_beacon(sc); 604 sc->ps_flags &= ~PS_TSFOOR_SYNC; 605 } 606 607 if (ath_beacon_dtim_pending_cab(skb)) { 608 /* 609 * Remain awake waiting for buffered broadcast/multicast 610 * frames. If the last broadcast/multicast frame is not 611 * received properly, the next beacon frame will work as 612 * a backup trigger for returning into NETWORK SLEEP state, 613 * so we are waiting for it as well. 614 */ 615 ath_dbg(common, ATH_DBG_PS, 616 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 617 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 618 return; 619 } 620 621 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 622 /* 623 * This can happen if a broadcast frame is dropped or the AP 624 * fails to send a frame indicating that all CAB frames have 625 * been delivered. 626 */ 627 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 628 ath_dbg(common, ATH_DBG_PS, 629 "PS wait for CAB frames timed out\n"); 630 } 631 } 632 633 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 634 { 635 struct ieee80211_hdr *hdr; 636 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 637 638 hdr = (struct ieee80211_hdr *)skb->data; 639 640 /* Process Beacon and CAB receive in PS state */ 641 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 642 && ieee80211_is_beacon(hdr->frame_control)) 643 ath_rx_ps_beacon(sc, skb); 644 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 645 (ieee80211_is_data(hdr->frame_control) || 646 ieee80211_is_action(hdr->frame_control)) && 647 is_multicast_ether_addr(hdr->addr1) && 648 !ieee80211_has_moredata(hdr->frame_control)) { 649 /* 650 * No more broadcast/multicast frames to be received at this 651 * point. 652 */ 653 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 654 ath_dbg(common, ATH_DBG_PS, 655 "All PS CAB frames received, back to sleep\n"); 656 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 657 !is_multicast_ether_addr(hdr->addr1) && 658 !ieee80211_has_morefrags(hdr->frame_control)) { 659 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 660 ath_dbg(common, ATH_DBG_PS, 661 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 662 sc->ps_flags & (PS_WAIT_FOR_BEACON | 663 PS_WAIT_FOR_CAB | 664 PS_WAIT_FOR_PSPOLL_DATA | 665 PS_WAIT_FOR_TX_ACK)); 666 } 667 } 668 669 static bool ath_edma_get_buffers(struct ath_softc *sc, 670 enum ath9k_rx_qtype qtype) 671 { 672 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 673 struct ath_hw *ah = sc->sc_ah; 674 struct ath_common *common = ath9k_hw_common(ah); 675 struct sk_buff *skb; 676 struct ath_buf *bf; 677 int ret; 678 679 skb = skb_peek(&rx_edma->rx_fifo); 680 if (!skb) 681 return false; 682 683 bf = SKB_CB_ATHBUF(skb); 684 BUG_ON(!bf); 685 686 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 687 common->rx_bufsize, DMA_FROM_DEVICE); 688 689 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 690 if (ret == -EINPROGRESS) { 691 /*let device gain the buffer again*/ 692 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 693 common->rx_bufsize, DMA_FROM_DEVICE); 694 return false; 695 } 696 697 __skb_unlink(skb, &rx_edma->rx_fifo); 698 if (ret == -EINVAL) { 699 /* corrupt descriptor, skip this one and the following one */ 700 list_add_tail(&bf->list, &sc->rx.rxbuf); 701 ath_rx_edma_buf_link(sc, qtype); 702 skb = skb_peek(&rx_edma->rx_fifo); 703 if (!skb) 704 return true; 705 706 bf = SKB_CB_ATHBUF(skb); 707 BUG_ON(!bf); 708 709 __skb_unlink(skb, &rx_edma->rx_fifo); 710 list_add_tail(&bf->list, &sc->rx.rxbuf); 711 ath_rx_edma_buf_link(sc, qtype); 712 return true; 713 } 714 skb_queue_tail(&rx_edma->rx_buffers, skb); 715 716 return true; 717 } 718 719 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 720 struct ath_rx_status *rs, 721 enum ath9k_rx_qtype qtype) 722 { 723 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 724 struct sk_buff *skb; 725 struct ath_buf *bf; 726 727 while (ath_edma_get_buffers(sc, qtype)); 728 skb = __skb_dequeue(&rx_edma->rx_buffers); 729 if (!skb) 730 return NULL; 731 732 bf = SKB_CB_ATHBUF(skb); 733 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); 734 return bf; 735 } 736 737 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 738 struct ath_rx_status *rs) 739 { 740 struct ath_hw *ah = sc->sc_ah; 741 struct ath_common *common = ath9k_hw_common(ah); 742 struct ath_desc *ds; 743 struct ath_buf *bf; 744 int ret; 745 746 if (list_empty(&sc->rx.rxbuf)) { 747 sc->rx.rxlink = NULL; 748 return NULL; 749 } 750 751 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 752 ds = bf->bf_desc; 753 754 /* 755 * Must provide the virtual address of the current 756 * descriptor, the physical address, and the virtual 757 * address of the next descriptor in the h/w chain. 758 * This allows the HAL to look ahead to see if the 759 * hardware is done with a descriptor by checking the 760 * done bit in the following descriptor and the address 761 * of the current descriptor the DMA engine is working 762 * on. All this is necessary because of our use of 763 * a self-linked list to avoid rx overruns. 764 */ 765 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); 766 if (ret == -EINPROGRESS) { 767 struct ath_rx_status trs; 768 struct ath_buf *tbf; 769 struct ath_desc *tds; 770 771 memset(&trs, 0, sizeof(trs)); 772 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 773 sc->rx.rxlink = NULL; 774 return NULL; 775 } 776 777 tbf = list_entry(bf->list.next, struct ath_buf, list); 778 779 /* 780 * On some hardware the descriptor status words could 781 * get corrupted, including the done bit. Because of 782 * this, check if the next descriptor's done bit is 783 * set or not. 784 * 785 * If the next descriptor's done bit is set, the current 786 * descriptor has been corrupted. Force s/w to discard 787 * this descriptor and continue... 788 */ 789 790 tds = tbf->bf_desc; 791 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); 792 if (ret == -EINPROGRESS) 793 return NULL; 794 } 795 796 if (!bf->bf_mpdu) 797 return bf; 798 799 /* 800 * Synchronize the DMA transfer with CPU before 801 * 1. accessing the frame 802 * 2. requeueing the same buffer to h/w 803 */ 804 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 805 common->rx_bufsize, 806 DMA_FROM_DEVICE); 807 808 return bf; 809 } 810 811 /* Assumes you've already done the endian to CPU conversion */ 812 static bool ath9k_rx_accept(struct ath_common *common, 813 struct ieee80211_hdr *hdr, 814 struct ieee80211_rx_status *rxs, 815 struct ath_rx_status *rx_stats, 816 bool *decrypt_error) 817 { 818 bool is_mc, is_valid_tkip, strip_mic, mic_error; 819 struct ath_hw *ah = common->ah; 820 __le16 fc; 821 u8 rx_status_len = ah->caps.rx_status_len; 822 823 fc = hdr->frame_control; 824 825 is_mc = !!is_multicast_ether_addr(hdr->addr1); 826 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 827 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 828 strip_mic = is_valid_tkip && !(rx_stats->rs_status & 829 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC)); 830 831 if (!rx_stats->rs_datalen) 832 return false; 833 /* 834 * rs_status follows rs_datalen so if rs_datalen is too large 835 * we can take a hint that hardware corrupted it, so ignore 836 * those frames. 837 */ 838 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 839 return false; 840 841 /* Only use error bits from the last fragment */ 842 if (rx_stats->rs_more) 843 return true; 844 845 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 846 !ieee80211_has_morefrags(fc) && 847 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 848 (rx_stats->rs_status & ATH9K_RXERR_MIC); 849 850 /* 851 * The rx_stats->rs_status will not be set until the end of the 852 * chained descriptors so it can be ignored if rs_more is set. The 853 * rs_more will be false at the last element of the chained 854 * descriptors. 855 */ 856 if (rx_stats->rs_status != 0) { 857 if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 858 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 859 mic_error = false; 860 } 861 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 862 return false; 863 864 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 865 *decrypt_error = true; 866 mic_error = false; 867 } 868 869 /* 870 * Reject error frames with the exception of 871 * decryption and MIC failures. For monitor mode, 872 * we also ignore the CRC error. 873 */ 874 if (ah->is_monitoring) { 875 if (rx_stats->rs_status & 876 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 877 ATH9K_RXERR_CRC)) 878 return false; 879 } else { 880 if (rx_stats->rs_status & 881 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 882 return false; 883 } 884 } 885 } 886 887 /* 888 * For unicast frames the MIC error bit can have false positives, 889 * so all MIC error reports need to be validated in software. 890 * False negatives are not common, so skip software verification 891 * if the hardware considers the MIC valid. 892 */ 893 if (strip_mic) 894 rxs->flag |= RX_FLAG_MMIC_STRIPPED; 895 else if (is_mc && mic_error) 896 rxs->flag |= RX_FLAG_MMIC_ERROR; 897 898 return true; 899 } 900 901 static int ath9k_process_rate(struct ath_common *common, 902 struct ieee80211_hw *hw, 903 struct ath_rx_status *rx_stats, 904 struct ieee80211_rx_status *rxs) 905 { 906 struct ieee80211_supported_band *sband; 907 enum ieee80211_band band; 908 unsigned int i = 0; 909 910 band = hw->conf.channel->band; 911 sband = hw->wiphy->bands[band]; 912 913 if (rx_stats->rs_rate & 0x80) { 914 /* HT rate */ 915 rxs->flag |= RX_FLAG_HT; 916 if (rx_stats->rs_flags & ATH9K_RX_2040) 917 rxs->flag |= RX_FLAG_40MHZ; 918 if (rx_stats->rs_flags & ATH9K_RX_GI) 919 rxs->flag |= RX_FLAG_SHORT_GI; 920 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 921 return 0; 922 } 923 924 for (i = 0; i < sband->n_bitrates; i++) { 925 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 926 rxs->rate_idx = i; 927 return 0; 928 } 929 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 930 rxs->flag |= RX_FLAG_SHORTPRE; 931 rxs->rate_idx = i; 932 return 0; 933 } 934 } 935 936 /* 937 * No valid hardware bitrate found -- we should not get here 938 * because hardware has already validated this frame as OK. 939 */ 940 ath_dbg(common, ATH_DBG_XMIT, 941 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 942 rx_stats->rs_rate); 943 944 return -EINVAL; 945 } 946 947 static void ath9k_process_rssi(struct ath_common *common, 948 struct ieee80211_hw *hw, 949 struct ieee80211_hdr *hdr, 950 struct ath_rx_status *rx_stats) 951 { 952 struct ath_softc *sc = hw->priv; 953 struct ath_hw *ah = common->ah; 954 int last_rssi; 955 __le16 fc; 956 957 if ((ah->opmode != NL80211_IFTYPE_STATION) && 958 (ah->opmode != NL80211_IFTYPE_ADHOC)) 959 return; 960 961 fc = hdr->frame_control; 962 if (!ieee80211_is_beacon(fc) || 963 compare_ether_addr(hdr->addr3, common->curbssid)) { 964 /* TODO: This doesn't work well if you have stations 965 * associated to two different APs because curbssid 966 * is just the last AP that any of the stations associated 967 * with. 968 */ 969 return; 970 } 971 972 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 973 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 974 975 last_rssi = sc->last_rssi; 976 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 977 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 978 ATH_RSSI_EP_MULTIPLIER); 979 if (rx_stats->rs_rssi < 0) 980 rx_stats->rs_rssi = 0; 981 982 /* Update Beacon RSSI, this is used by ANI. */ 983 ah->stats.avgbrssi = rx_stats->rs_rssi; 984 } 985 986 /* 987 * For Decrypt or Demic errors, we only mark packet status here and always push 988 * up the frame up to let mac80211 handle the actual error case, be it no 989 * decryption key or real decryption error. This let us keep statistics there. 990 */ 991 static int ath9k_rx_skb_preprocess(struct ath_common *common, 992 struct ieee80211_hw *hw, 993 struct ieee80211_hdr *hdr, 994 struct ath_rx_status *rx_stats, 995 struct ieee80211_rx_status *rx_status, 996 bool *decrypt_error) 997 { 998 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 999 1000 /* 1001 * everything but the rate is checked here, the rate check is done 1002 * separately to avoid doing two lookups for a rate for each frame. 1003 */ 1004 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 1005 return -EINVAL; 1006 1007 /* Only use status info from the last fragment */ 1008 if (rx_stats->rs_more) 1009 return 0; 1010 1011 ath9k_process_rssi(common, hw, hdr, rx_stats); 1012 1013 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1014 return -EINVAL; 1015 1016 rx_status->band = hw->conf.channel->band; 1017 rx_status->freq = hw->conf.channel->center_freq; 1018 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 1019 rx_status->antenna = rx_stats->rs_antenna; 1020 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 1021 1022 return 0; 1023 } 1024 1025 static void ath9k_rx_skb_postprocess(struct ath_common *common, 1026 struct sk_buff *skb, 1027 struct ath_rx_status *rx_stats, 1028 struct ieee80211_rx_status *rxs, 1029 bool decrypt_error) 1030 { 1031 struct ath_hw *ah = common->ah; 1032 struct ieee80211_hdr *hdr; 1033 int hdrlen, padpos, padsize; 1034 u8 keyix; 1035 __le16 fc; 1036 1037 /* see if any padding is done by the hw and remove it */ 1038 hdr = (struct ieee80211_hdr *) skb->data; 1039 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1040 fc = hdr->frame_control; 1041 padpos = ath9k_cmn_padpos(hdr->frame_control); 1042 1043 /* The MAC header is padded to have 32-bit boundary if the 1044 * packet payload is non-zero. The general calculation for 1045 * padsize would take into account odd header lengths: 1046 * padsize = (4 - padpos % 4) % 4; However, since only 1047 * even-length headers are used, padding can only be 0 or 2 1048 * bytes and we can optimize this a bit. In addition, we must 1049 * not try to remove padding from short control frames that do 1050 * not have payload. */ 1051 padsize = padpos & 3; 1052 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1053 memmove(skb->data + padsize, skb->data, padpos); 1054 skb_pull(skb, padsize); 1055 } 1056 1057 keyix = rx_stats->rs_keyix; 1058 1059 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1060 ieee80211_has_protected(fc)) { 1061 rxs->flag |= RX_FLAG_DECRYPTED; 1062 } else if (ieee80211_has_protected(fc) 1063 && !decrypt_error && skb->len >= hdrlen + 4) { 1064 keyix = skb->data[hdrlen + 3] >> 6; 1065 1066 if (test_bit(keyix, common->keymap)) 1067 rxs->flag |= RX_FLAG_DECRYPTED; 1068 } 1069 if (ah->sw_mgmt_crypto && 1070 (rxs->flag & RX_FLAG_DECRYPTED) && 1071 ieee80211_is_mgmt(fc)) 1072 /* Use software decrypt for management frames. */ 1073 rxs->flag &= ~RX_FLAG_DECRYPTED; 1074 } 1075 1076 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, 1077 struct ath_hw_antcomb_conf ant_conf, 1078 int main_rssi_avg) 1079 { 1080 antcomb->quick_scan_cnt = 0; 1081 1082 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) 1083 antcomb->rssi_lna2 = main_rssi_avg; 1084 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) 1085 antcomb->rssi_lna1 = main_rssi_avg; 1086 1087 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { 1088 case 0x10: /* LNA2 A-B */ 1089 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1090 antcomb->first_quick_scan_conf = 1091 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1092 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1093 break; 1094 case 0x20: /* LNA1 A-B */ 1095 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1096 antcomb->first_quick_scan_conf = 1097 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1098 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1099 break; 1100 case 0x21: /* LNA1 LNA2 */ 1101 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; 1102 antcomb->first_quick_scan_conf = 1103 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1104 antcomb->second_quick_scan_conf = 1105 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1106 break; 1107 case 0x12: /* LNA2 LNA1 */ 1108 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; 1109 antcomb->first_quick_scan_conf = 1110 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1111 antcomb->second_quick_scan_conf = 1112 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1113 break; 1114 case 0x13: /* LNA2 A+B */ 1115 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1116 antcomb->first_quick_scan_conf = 1117 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1118 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1119 break; 1120 case 0x23: /* LNA1 A+B */ 1121 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1122 antcomb->first_quick_scan_conf = 1123 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1124 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1125 break; 1126 default: 1127 break; 1128 } 1129 } 1130 1131 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 1132 struct ath_hw_antcomb_conf *div_ant_conf, 1133 int main_rssi_avg, int alt_rssi_avg, 1134 int alt_ratio) 1135 { 1136 /* alt_good */ 1137 switch (antcomb->quick_scan_cnt) { 1138 case 0: 1139 /* set alt to main, and alt to first conf */ 1140 div_ant_conf->main_lna_conf = antcomb->main_conf; 1141 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; 1142 break; 1143 case 1: 1144 /* set alt to main, and alt to first conf */ 1145 div_ant_conf->main_lna_conf = antcomb->main_conf; 1146 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; 1147 antcomb->rssi_first = main_rssi_avg; 1148 antcomb->rssi_second = alt_rssi_avg; 1149 1150 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1151 /* main is LNA1 */ 1152 if (ath_is_alt_ant_ratio_better(alt_ratio, 1153 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1154 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1155 main_rssi_avg, alt_rssi_avg, 1156 antcomb->total_pkt_count)) 1157 antcomb->first_ratio = true; 1158 else 1159 antcomb->first_ratio = false; 1160 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1161 if (ath_is_alt_ant_ratio_better(alt_ratio, 1162 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1163 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1164 main_rssi_avg, alt_rssi_avg, 1165 antcomb->total_pkt_count)) 1166 antcomb->first_ratio = true; 1167 else 1168 antcomb->first_ratio = false; 1169 } else { 1170 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1171 (alt_rssi_avg > main_rssi_avg + 1172 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1173 (alt_rssi_avg > main_rssi_avg)) && 1174 (antcomb->total_pkt_count > 50)) 1175 antcomb->first_ratio = true; 1176 else 1177 antcomb->first_ratio = false; 1178 } 1179 break; 1180 case 2: 1181 antcomb->alt_good = false; 1182 antcomb->scan_not_start = false; 1183 antcomb->scan = false; 1184 antcomb->rssi_first = main_rssi_avg; 1185 antcomb->rssi_third = alt_rssi_avg; 1186 1187 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 1188 antcomb->rssi_lna1 = alt_rssi_avg; 1189 else if (antcomb->second_quick_scan_conf == 1190 ATH_ANT_DIV_COMB_LNA2) 1191 antcomb->rssi_lna2 = alt_rssi_avg; 1192 else if (antcomb->second_quick_scan_conf == 1193 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 1194 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 1195 antcomb->rssi_lna2 = main_rssi_avg; 1196 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 1197 antcomb->rssi_lna1 = main_rssi_avg; 1198 } 1199 1200 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 1201 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 1202 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1203 else 1204 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 1205 1206 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1207 if (ath_is_alt_ant_ratio_better(alt_ratio, 1208 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1209 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1210 main_rssi_avg, alt_rssi_avg, 1211 antcomb->total_pkt_count)) 1212 antcomb->second_ratio = true; 1213 else 1214 antcomb->second_ratio = false; 1215 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1216 if (ath_is_alt_ant_ratio_better(alt_ratio, 1217 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1218 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1219 main_rssi_avg, alt_rssi_avg, 1220 antcomb->total_pkt_count)) 1221 antcomb->second_ratio = true; 1222 else 1223 antcomb->second_ratio = false; 1224 } else { 1225 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1226 (alt_rssi_avg > main_rssi_avg + 1227 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1228 (alt_rssi_avg > main_rssi_avg)) && 1229 (antcomb->total_pkt_count > 50)) 1230 antcomb->second_ratio = true; 1231 else 1232 antcomb->second_ratio = false; 1233 } 1234 1235 /* set alt to the conf with maximun ratio */ 1236 if (antcomb->first_ratio && antcomb->second_ratio) { 1237 if (antcomb->rssi_second > antcomb->rssi_third) { 1238 /* first alt*/ 1239 if ((antcomb->first_quick_scan_conf == 1240 ATH_ANT_DIV_COMB_LNA1) || 1241 (antcomb->first_quick_scan_conf == 1242 ATH_ANT_DIV_COMB_LNA2)) 1243 /* Set alt LNA1 or LNA2*/ 1244 if (div_ant_conf->main_lna_conf == 1245 ATH_ANT_DIV_COMB_LNA2) 1246 div_ant_conf->alt_lna_conf = 1247 ATH_ANT_DIV_COMB_LNA1; 1248 else 1249 div_ant_conf->alt_lna_conf = 1250 ATH_ANT_DIV_COMB_LNA2; 1251 else 1252 /* Set alt to A+B or A-B */ 1253 div_ant_conf->alt_lna_conf = 1254 antcomb->first_quick_scan_conf; 1255 } else if ((antcomb->second_quick_scan_conf == 1256 ATH_ANT_DIV_COMB_LNA1) || 1257 (antcomb->second_quick_scan_conf == 1258 ATH_ANT_DIV_COMB_LNA2)) { 1259 /* Set alt LNA1 or LNA2 */ 1260 if (div_ant_conf->main_lna_conf == 1261 ATH_ANT_DIV_COMB_LNA2) 1262 div_ant_conf->alt_lna_conf = 1263 ATH_ANT_DIV_COMB_LNA1; 1264 else 1265 div_ant_conf->alt_lna_conf = 1266 ATH_ANT_DIV_COMB_LNA2; 1267 } else { 1268 /* Set alt to A+B or A-B */ 1269 div_ant_conf->alt_lna_conf = 1270 antcomb->second_quick_scan_conf; 1271 } 1272 } else if (antcomb->first_ratio) { 1273 /* first alt */ 1274 if ((antcomb->first_quick_scan_conf == 1275 ATH_ANT_DIV_COMB_LNA1) || 1276 (antcomb->first_quick_scan_conf == 1277 ATH_ANT_DIV_COMB_LNA2)) 1278 /* Set alt LNA1 or LNA2 */ 1279 if (div_ant_conf->main_lna_conf == 1280 ATH_ANT_DIV_COMB_LNA2) 1281 div_ant_conf->alt_lna_conf = 1282 ATH_ANT_DIV_COMB_LNA1; 1283 else 1284 div_ant_conf->alt_lna_conf = 1285 ATH_ANT_DIV_COMB_LNA2; 1286 else 1287 /* Set alt to A+B or A-B */ 1288 div_ant_conf->alt_lna_conf = 1289 antcomb->first_quick_scan_conf; 1290 } else if (antcomb->second_ratio) { 1291 /* second alt */ 1292 if ((antcomb->second_quick_scan_conf == 1293 ATH_ANT_DIV_COMB_LNA1) || 1294 (antcomb->second_quick_scan_conf == 1295 ATH_ANT_DIV_COMB_LNA2)) 1296 /* Set alt LNA1 or LNA2 */ 1297 if (div_ant_conf->main_lna_conf == 1298 ATH_ANT_DIV_COMB_LNA2) 1299 div_ant_conf->alt_lna_conf = 1300 ATH_ANT_DIV_COMB_LNA1; 1301 else 1302 div_ant_conf->alt_lna_conf = 1303 ATH_ANT_DIV_COMB_LNA2; 1304 else 1305 /* Set alt to A+B or A-B */ 1306 div_ant_conf->alt_lna_conf = 1307 antcomb->second_quick_scan_conf; 1308 } else { 1309 /* main is largest */ 1310 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || 1311 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) 1312 /* Set alt LNA1 or LNA2 */ 1313 if (div_ant_conf->main_lna_conf == 1314 ATH_ANT_DIV_COMB_LNA2) 1315 div_ant_conf->alt_lna_conf = 1316 ATH_ANT_DIV_COMB_LNA1; 1317 else 1318 div_ant_conf->alt_lna_conf = 1319 ATH_ANT_DIV_COMB_LNA2; 1320 else 1321 /* Set alt to A+B or A-B */ 1322 div_ant_conf->alt_lna_conf = antcomb->main_conf; 1323 } 1324 break; 1325 default: 1326 break; 1327 } 1328 } 1329 1330 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, 1331 struct ath_ant_comb *antcomb, int alt_ratio) 1332 { 1333 if (ant_conf->div_group == 0) { 1334 /* Adjust the fast_div_bias based on main and alt lna conf */ 1335 switch ((ant_conf->main_lna_conf << 4) | 1336 ant_conf->alt_lna_conf) { 1337 case 0x01: /* A-B LNA2 */ 1338 ant_conf->fast_div_bias = 0x3b; 1339 break; 1340 case 0x02: /* A-B LNA1 */ 1341 ant_conf->fast_div_bias = 0x3d; 1342 break; 1343 case 0x03: /* A-B A+B */ 1344 ant_conf->fast_div_bias = 0x1; 1345 break; 1346 case 0x10: /* LNA2 A-B */ 1347 ant_conf->fast_div_bias = 0x7; 1348 break; 1349 case 0x12: /* LNA2 LNA1 */ 1350 ant_conf->fast_div_bias = 0x2; 1351 break; 1352 case 0x13: /* LNA2 A+B */ 1353 ant_conf->fast_div_bias = 0x7; 1354 break; 1355 case 0x20: /* LNA1 A-B */ 1356 ant_conf->fast_div_bias = 0x6; 1357 break; 1358 case 0x21: /* LNA1 LNA2 */ 1359 ant_conf->fast_div_bias = 0x0; 1360 break; 1361 case 0x23: /* LNA1 A+B */ 1362 ant_conf->fast_div_bias = 0x6; 1363 break; 1364 case 0x30: /* A+B A-B */ 1365 ant_conf->fast_div_bias = 0x1; 1366 break; 1367 case 0x31: /* A+B LNA2 */ 1368 ant_conf->fast_div_bias = 0x3b; 1369 break; 1370 case 0x32: /* A+B LNA1 */ 1371 ant_conf->fast_div_bias = 0x3d; 1372 break; 1373 default: 1374 break; 1375 } 1376 } else if (ant_conf->div_group == 1) { 1377 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1378 switch ((ant_conf->main_lna_conf << 4) | 1379 ant_conf->alt_lna_conf) { 1380 case 0x01: /* A-B LNA2 */ 1381 ant_conf->fast_div_bias = 0x1; 1382 ant_conf->main_gaintb = 0; 1383 ant_conf->alt_gaintb = 0; 1384 break; 1385 case 0x02: /* A-B LNA1 */ 1386 ant_conf->fast_div_bias = 0x1; 1387 ant_conf->main_gaintb = 0; 1388 ant_conf->alt_gaintb = 0; 1389 break; 1390 case 0x03: /* A-B A+B */ 1391 ant_conf->fast_div_bias = 0x1; 1392 ant_conf->main_gaintb = 0; 1393 ant_conf->alt_gaintb = 0; 1394 break; 1395 case 0x10: /* LNA2 A-B */ 1396 if (!(antcomb->scan) && 1397 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1398 ant_conf->fast_div_bias = 0x3f; 1399 else 1400 ant_conf->fast_div_bias = 0x1; 1401 ant_conf->main_gaintb = 0; 1402 ant_conf->alt_gaintb = 0; 1403 break; 1404 case 0x12: /* LNA2 LNA1 */ 1405 ant_conf->fast_div_bias = 0x1; 1406 ant_conf->main_gaintb = 0; 1407 ant_conf->alt_gaintb = 0; 1408 break; 1409 case 0x13: /* LNA2 A+B */ 1410 if (!(antcomb->scan) && 1411 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1412 ant_conf->fast_div_bias = 0x3f; 1413 else 1414 ant_conf->fast_div_bias = 0x1; 1415 ant_conf->main_gaintb = 0; 1416 ant_conf->alt_gaintb = 0; 1417 break; 1418 case 0x20: /* LNA1 A-B */ 1419 if (!(antcomb->scan) && 1420 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1421 ant_conf->fast_div_bias = 0x3f; 1422 else 1423 ant_conf->fast_div_bias = 0x1; 1424 ant_conf->main_gaintb = 0; 1425 ant_conf->alt_gaintb = 0; 1426 break; 1427 case 0x21: /* LNA1 LNA2 */ 1428 ant_conf->fast_div_bias = 0x1; 1429 ant_conf->main_gaintb = 0; 1430 ant_conf->alt_gaintb = 0; 1431 break; 1432 case 0x23: /* LNA1 A+B */ 1433 if (!(antcomb->scan) && 1434 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1435 ant_conf->fast_div_bias = 0x3f; 1436 else 1437 ant_conf->fast_div_bias = 0x1; 1438 ant_conf->main_gaintb = 0; 1439 ant_conf->alt_gaintb = 0; 1440 break; 1441 case 0x30: /* A+B A-B */ 1442 ant_conf->fast_div_bias = 0x1; 1443 ant_conf->main_gaintb = 0; 1444 ant_conf->alt_gaintb = 0; 1445 break; 1446 case 0x31: /* A+B LNA2 */ 1447 ant_conf->fast_div_bias = 0x1; 1448 ant_conf->main_gaintb = 0; 1449 ant_conf->alt_gaintb = 0; 1450 break; 1451 case 0x32: /* A+B LNA1 */ 1452 ant_conf->fast_div_bias = 0x1; 1453 ant_conf->main_gaintb = 0; 1454 ant_conf->alt_gaintb = 0; 1455 break; 1456 default: 1457 break; 1458 } 1459 } else if (ant_conf->div_group == 2) { 1460 /* Adjust the fast_div_bias based on main and alt_lna_conf */ 1461 switch ((ant_conf->main_lna_conf << 4) | 1462 ant_conf->alt_lna_conf) { 1463 case 0x01: /* A-B LNA2 */ 1464 ant_conf->fast_div_bias = 0x1; 1465 ant_conf->main_gaintb = 0; 1466 ant_conf->alt_gaintb = 0; 1467 break; 1468 case 0x02: /* A-B LNA1 */ 1469 ant_conf->fast_div_bias = 0x1; 1470 ant_conf->main_gaintb = 0; 1471 ant_conf->alt_gaintb = 0; 1472 break; 1473 case 0x03: /* A-B A+B */ 1474 ant_conf->fast_div_bias = 0x1; 1475 ant_conf->main_gaintb = 0; 1476 ant_conf->alt_gaintb = 0; 1477 break; 1478 case 0x10: /* LNA2 A-B */ 1479 if (!(antcomb->scan) && 1480 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1481 ant_conf->fast_div_bias = 0x1; 1482 else 1483 ant_conf->fast_div_bias = 0x2; 1484 ant_conf->main_gaintb = 0; 1485 ant_conf->alt_gaintb = 0; 1486 break; 1487 case 0x12: /* LNA2 LNA1 */ 1488 ant_conf->fast_div_bias = 0x1; 1489 ant_conf->main_gaintb = 0; 1490 ant_conf->alt_gaintb = 0; 1491 break; 1492 case 0x13: /* LNA2 A+B */ 1493 if (!(antcomb->scan) && 1494 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1495 ant_conf->fast_div_bias = 0x1; 1496 else 1497 ant_conf->fast_div_bias = 0x2; 1498 ant_conf->main_gaintb = 0; 1499 ant_conf->alt_gaintb = 0; 1500 break; 1501 case 0x20: /* LNA1 A-B */ 1502 if (!(antcomb->scan) && 1503 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1504 ant_conf->fast_div_bias = 0x1; 1505 else 1506 ant_conf->fast_div_bias = 0x2; 1507 ant_conf->main_gaintb = 0; 1508 ant_conf->alt_gaintb = 0; 1509 break; 1510 case 0x21: /* LNA1 LNA2 */ 1511 ant_conf->fast_div_bias = 0x1; 1512 ant_conf->main_gaintb = 0; 1513 ant_conf->alt_gaintb = 0; 1514 break; 1515 case 0x23: /* LNA1 A+B */ 1516 if (!(antcomb->scan) && 1517 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) 1518 ant_conf->fast_div_bias = 0x1; 1519 else 1520 ant_conf->fast_div_bias = 0x2; 1521 ant_conf->main_gaintb = 0; 1522 ant_conf->alt_gaintb = 0; 1523 break; 1524 case 0x30: /* A+B A-B */ 1525 ant_conf->fast_div_bias = 0x1; 1526 ant_conf->main_gaintb = 0; 1527 ant_conf->alt_gaintb = 0; 1528 break; 1529 case 0x31: /* A+B LNA2 */ 1530 ant_conf->fast_div_bias = 0x1; 1531 ant_conf->main_gaintb = 0; 1532 ant_conf->alt_gaintb = 0; 1533 break; 1534 case 0x32: /* A+B LNA1 */ 1535 ant_conf->fast_div_bias = 0x1; 1536 ant_conf->main_gaintb = 0; 1537 ant_conf->alt_gaintb = 0; 1538 break; 1539 default: 1540 break; 1541 } 1542 } 1543 } 1544 1545 /* Antenna diversity and combining */ 1546 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 1547 { 1548 struct ath_hw_antcomb_conf div_ant_conf; 1549 struct ath_ant_comb *antcomb = &sc->ant_comb; 1550 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1551 int curr_main_set; 1552 int main_rssi = rs->rs_rssi_ctl0; 1553 int alt_rssi = rs->rs_rssi_ctl1; 1554 int rx_ant_conf, main_ant_conf; 1555 bool short_scan = false; 1556 1557 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 1558 ATH_ANT_RX_MASK; 1559 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1560 ATH_ANT_RX_MASK; 1561 1562 /* Record packet only when both main_rssi and alt_rssi is positive */ 1563 if (main_rssi > 0 && alt_rssi > 0) { 1564 antcomb->total_pkt_count++; 1565 antcomb->main_total_rssi += main_rssi; 1566 antcomb->alt_total_rssi += alt_rssi; 1567 if (main_ant_conf == rx_ant_conf) 1568 antcomb->main_recv_cnt++; 1569 else 1570 antcomb->alt_recv_cnt++; 1571 } 1572 1573 /* Short scan check */ 1574 if (antcomb->scan && antcomb->alt_good) { 1575 if (time_after(jiffies, antcomb->scan_start_time + 1576 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 1577 short_scan = true; 1578 else 1579 if (antcomb->total_pkt_count == 1580 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { 1581 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1582 antcomb->total_pkt_count); 1583 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 1584 short_scan = true; 1585 } 1586 } 1587 1588 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 1589 rs->rs_moreaggr) && !short_scan) 1590 return; 1591 1592 if (antcomb->total_pkt_count) { 1593 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1594 antcomb->total_pkt_count); 1595 main_rssi_avg = (antcomb->main_total_rssi / 1596 antcomb->total_pkt_count); 1597 alt_rssi_avg = (antcomb->alt_total_rssi / 1598 antcomb->total_pkt_count); 1599 } 1600 1601 1602 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1603 curr_alt_set = div_ant_conf.alt_lna_conf; 1604 curr_main_set = div_ant_conf.main_lna_conf; 1605 1606 antcomb->count++; 1607 1608 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 1609 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1610 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 1611 main_rssi_avg); 1612 antcomb->alt_good = true; 1613 } else { 1614 antcomb->alt_good = false; 1615 } 1616 1617 antcomb->count = 0; 1618 antcomb->scan = true; 1619 antcomb->scan_not_start = true; 1620 } 1621 1622 if (!antcomb->scan) { 1623 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, 1624 alt_ratio, curr_main_set, curr_alt_set, 1625 alt_rssi_avg, main_rssi_avg)) { 1626 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1627 /* Switch main and alt LNA */ 1628 div_ant_conf.main_lna_conf = 1629 ATH_ANT_DIV_COMB_LNA2; 1630 div_ant_conf.alt_lna_conf = 1631 ATH_ANT_DIV_COMB_LNA1; 1632 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { 1633 div_ant_conf.main_lna_conf = 1634 ATH_ANT_DIV_COMB_LNA1; 1635 div_ant_conf.alt_lna_conf = 1636 ATH_ANT_DIV_COMB_LNA2; 1637 } 1638 1639 goto div_comb_done; 1640 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && 1641 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { 1642 /* Set alt to another LNA */ 1643 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) 1644 div_ant_conf.alt_lna_conf = 1645 ATH_ANT_DIV_COMB_LNA1; 1646 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) 1647 div_ant_conf.alt_lna_conf = 1648 ATH_ANT_DIV_COMB_LNA2; 1649 1650 goto div_comb_done; 1651 } 1652 1653 if ((alt_rssi_avg < (main_rssi_avg + 1654 div_ant_conf.lna1_lna2_delta))) 1655 goto div_comb_done; 1656 } 1657 1658 if (!antcomb->scan_not_start) { 1659 switch (curr_alt_set) { 1660 case ATH_ANT_DIV_COMB_LNA2: 1661 antcomb->rssi_lna2 = alt_rssi_avg; 1662 antcomb->rssi_lna1 = main_rssi_avg; 1663 antcomb->scan = true; 1664 /* set to A+B */ 1665 div_ant_conf.main_lna_conf = 1666 ATH_ANT_DIV_COMB_LNA1; 1667 div_ant_conf.alt_lna_conf = 1668 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1669 break; 1670 case ATH_ANT_DIV_COMB_LNA1: 1671 antcomb->rssi_lna1 = alt_rssi_avg; 1672 antcomb->rssi_lna2 = main_rssi_avg; 1673 antcomb->scan = true; 1674 /* set to A+B */ 1675 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1676 div_ant_conf.alt_lna_conf = 1677 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1678 break; 1679 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: 1680 antcomb->rssi_add = alt_rssi_avg; 1681 antcomb->scan = true; 1682 /* set to A-B */ 1683 div_ant_conf.alt_lna_conf = 1684 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1685 break; 1686 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: 1687 antcomb->rssi_sub = alt_rssi_avg; 1688 antcomb->scan = false; 1689 if (antcomb->rssi_lna2 > 1690 (antcomb->rssi_lna1 + 1691 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 1692 /* use LNA2 as main LNA */ 1693 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 1694 (antcomb->rssi_add > antcomb->rssi_sub)) { 1695 /* set to A+B */ 1696 div_ant_conf.main_lna_conf = 1697 ATH_ANT_DIV_COMB_LNA2; 1698 div_ant_conf.alt_lna_conf = 1699 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1700 } else if (antcomb->rssi_sub > 1701 antcomb->rssi_lna1) { 1702 /* set to A-B */ 1703 div_ant_conf.main_lna_conf = 1704 ATH_ANT_DIV_COMB_LNA2; 1705 div_ant_conf.alt_lna_conf = 1706 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1707 } else { 1708 /* set to LNA1 */ 1709 div_ant_conf.main_lna_conf = 1710 ATH_ANT_DIV_COMB_LNA2; 1711 div_ant_conf.alt_lna_conf = 1712 ATH_ANT_DIV_COMB_LNA1; 1713 } 1714 } else { 1715 /* use LNA1 as main LNA */ 1716 if ((antcomb->rssi_add > antcomb->rssi_lna2) && 1717 (antcomb->rssi_add > antcomb->rssi_sub)) { 1718 /* set to A+B */ 1719 div_ant_conf.main_lna_conf = 1720 ATH_ANT_DIV_COMB_LNA1; 1721 div_ant_conf.alt_lna_conf = 1722 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1723 } else if (antcomb->rssi_sub > 1724 antcomb->rssi_lna1) { 1725 /* set to A-B */ 1726 div_ant_conf.main_lna_conf = 1727 ATH_ANT_DIV_COMB_LNA1; 1728 div_ant_conf.alt_lna_conf = 1729 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1730 } else { 1731 /* set to LNA2 */ 1732 div_ant_conf.main_lna_conf = 1733 ATH_ANT_DIV_COMB_LNA1; 1734 div_ant_conf.alt_lna_conf = 1735 ATH_ANT_DIV_COMB_LNA2; 1736 } 1737 } 1738 break; 1739 default: 1740 break; 1741 } 1742 } else { 1743 if (!antcomb->alt_good) { 1744 antcomb->scan_not_start = false; 1745 /* Set alt to another LNA */ 1746 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 1747 div_ant_conf.main_lna_conf = 1748 ATH_ANT_DIV_COMB_LNA2; 1749 div_ant_conf.alt_lna_conf = 1750 ATH_ANT_DIV_COMB_LNA1; 1751 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 1752 div_ant_conf.main_lna_conf = 1753 ATH_ANT_DIV_COMB_LNA1; 1754 div_ant_conf.alt_lna_conf = 1755 ATH_ANT_DIV_COMB_LNA2; 1756 } 1757 goto div_comb_done; 1758 } 1759 } 1760 1761 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, 1762 main_rssi_avg, alt_rssi_avg, 1763 alt_ratio); 1764 1765 antcomb->quick_scan_cnt++; 1766 1767 div_comb_done: 1768 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); 1769 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1770 1771 antcomb->scan_start_time = jiffies; 1772 antcomb->total_pkt_count = 0; 1773 antcomb->main_total_rssi = 0; 1774 antcomb->alt_total_rssi = 0; 1775 antcomb->main_recv_cnt = 0; 1776 antcomb->alt_recv_cnt = 0; 1777 } 1778 1779 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1780 { 1781 struct ath_buf *bf; 1782 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1783 struct ieee80211_rx_status *rxs; 1784 struct ath_hw *ah = sc->sc_ah; 1785 struct ath_common *common = ath9k_hw_common(ah); 1786 /* 1787 * The hw can technically differ from common->hw when using ath9k 1788 * virtual wiphy so to account for that we iterate over the active 1789 * wiphys and find the appropriate wiphy and therefore hw. 1790 */ 1791 struct ieee80211_hw *hw = sc->hw; 1792 struct ieee80211_hdr *hdr; 1793 int retval; 1794 bool decrypt_error = false; 1795 struct ath_rx_status rs; 1796 enum ath9k_rx_qtype qtype; 1797 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1798 int dma_type; 1799 u8 rx_status_len = ah->caps.rx_status_len; 1800 u64 tsf = 0; 1801 u32 tsf_lower = 0; 1802 unsigned long flags; 1803 1804 if (edma) 1805 dma_type = DMA_BIDIRECTIONAL; 1806 else 1807 dma_type = DMA_FROM_DEVICE; 1808 1809 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1810 spin_lock_bh(&sc->rx.rxbuflock); 1811 1812 tsf = ath9k_hw_gettsf64(ah); 1813 tsf_lower = tsf & 0xffffffff; 1814 1815 do { 1816 /* If handling rx interrupt and flush is in progress => exit */ 1817 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1818 break; 1819 1820 memset(&rs, 0, sizeof(rs)); 1821 if (edma) 1822 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1823 else 1824 bf = ath_get_next_rx_buf(sc, &rs); 1825 1826 if (!bf) 1827 break; 1828 1829 skb = bf->bf_mpdu; 1830 if (!skb) 1831 continue; 1832 1833 /* 1834 * Take frame header from the first fragment and RX status from 1835 * the last one. 1836 */ 1837 if (sc->rx.frag) 1838 hdr_skb = sc->rx.frag; 1839 else 1840 hdr_skb = skb; 1841 1842 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1843 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1844 1845 ath_debug_stat_rx(sc, &rs); 1846 1847 /* 1848 * If we're asked to flush receive queue, directly 1849 * chain it back at the queue without processing it. 1850 */ 1851 if (flush) 1852 goto requeue_drop_frag; 1853 1854 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1855 rxs, &decrypt_error); 1856 if (retval) 1857 goto requeue_drop_frag; 1858 1859 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1860 if (rs.rs_tstamp > tsf_lower && 1861 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1862 rxs->mactime -= 0x100000000ULL; 1863 1864 if (rs.rs_tstamp < tsf_lower && 1865 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1866 rxs->mactime += 0x100000000ULL; 1867 1868 /* Ensure we always have an skb to requeue once we are done 1869 * processing the current buffer's skb */ 1870 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1871 1872 /* If there is no memory we ignore the current RX'd frame, 1873 * tell hardware it can give us a new frame using the old 1874 * skb and put it at the tail of the sc->rx.rxbuf list for 1875 * processing. */ 1876 if (!requeue_skb) 1877 goto requeue_drop_frag; 1878 1879 /* Unmap the frame */ 1880 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1881 common->rx_bufsize, 1882 dma_type); 1883 1884 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1885 if (ah->caps.rx_status_len) 1886 skb_pull(skb, ah->caps.rx_status_len); 1887 1888 if (!rs.rs_more) 1889 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1890 rxs, decrypt_error); 1891 1892 /* We will now give hardware our shiny new allocated skb */ 1893 bf->bf_mpdu = requeue_skb; 1894 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1895 common->rx_bufsize, 1896 dma_type); 1897 if (unlikely(dma_mapping_error(sc->dev, 1898 bf->bf_buf_addr))) { 1899 dev_kfree_skb_any(requeue_skb); 1900 bf->bf_mpdu = NULL; 1901 bf->bf_buf_addr = 0; 1902 ath_err(common, "dma_mapping_error() on RX\n"); 1903 ieee80211_rx(hw, skb); 1904 break; 1905 } 1906 1907 if (rs.rs_more) { 1908 /* 1909 * rs_more indicates chained descriptors which can be 1910 * used to link buffers together for a sort of 1911 * scatter-gather operation. 1912 */ 1913 if (sc->rx.frag) { 1914 /* too many fragments - cannot handle frame */ 1915 dev_kfree_skb_any(sc->rx.frag); 1916 dev_kfree_skb_any(skb); 1917 skb = NULL; 1918 } 1919 sc->rx.frag = skb; 1920 goto requeue; 1921 } 1922 1923 if (sc->rx.frag) { 1924 int space = skb->len - skb_tailroom(hdr_skb); 1925 1926 sc->rx.frag = NULL; 1927 1928 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1929 dev_kfree_skb(skb); 1930 goto requeue_drop_frag; 1931 } 1932 1933 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1934 skb->len); 1935 dev_kfree_skb_any(skb); 1936 skb = hdr_skb; 1937 } 1938 1939 /* 1940 * change the default rx antenna if rx diversity chooses the 1941 * other antenna 3 times in a row. 1942 */ 1943 if (sc->rx.defant != rs.rs_antenna) { 1944 if (++sc->rx.rxotherant >= 3) 1945 ath_setdefantenna(sc, rs.rs_antenna); 1946 } else { 1947 sc->rx.rxotherant = 0; 1948 } 1949 1950 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1951 skb_trim(skb, skb->len - 8); 1952 1953 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1954 1955 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1956 PS_WAIT_FOR_CAB | 1957 PS_WAIT_FOR_PSPOLL_DATA)) || 1958 ath9k_check_auto_sleep(sc)) 1959 ath_rx_ps(sc, skb); 1960 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1961 1962 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 1963 ath_ant_comb_scan(sc, &rs); 1964 1965 ieee80211_rx(hw, skb); 1966 1967 requeue_drop_frag: 1968 if (sc->rx.frag) { 1969 dev_kfree_skb_any(sc->rx.frag); 1970 sc->rx.frag = NULL; 1971 } 1972 requeue: 1973 if (edma) { 1974 list_add_tail(&bf->list, &sc->rx.rxbuf); 1975 ath_rx_edma_buf_link(sc, qtype); 1976 } else { 1977 list_move_tail(&bf->list, &sc->rx.rxbuf); 1978 ath_rx_buf_link(sc, bf); 1979 ath9k_hw_rxena(ah); 1980 } 1981 } while (1); 1982 1983 spin_unlock_bh(&sc->rx.rxbuflock); 1984 1985 return 0; 1986 } 1987