1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "ath9k.h" 18 #include "ar9003_mac.h" 19 20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21 22 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 23 int mindelta, int main_rssi_avg, 24 int alt_rssi_avg, int pkt_count) 25 { 26 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 27 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 28 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 29 } 30 31 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 32 { 33 return sc->ps_enabled && 34 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 35 } 36 37 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 38 struct ieee80211_hdr *hdr) 39 { 40 struct ieee80211_hw *hw = sc->pri_wiphy->hw; 41 int i; 42 43 spin_lock_bh(&sc->wiphy_lock); 44 for (i = 0; i < sc->num_sec_wiphy; i++) { 45 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 46 if (aphy == NULL) 47 continue; 48 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) 49 == 0) { 50 hw = aphy->hw; 51 break; 52 } 53 } 54 spin_unlock_bh(&sc->wiphy_lock); 55 return hw; 56 } 57 58 /* 59 * Setup and link descriptors. 60 * 61 * 11N: we can no longer afford to self link the last descriptor. 62 * MAC acknowledges BA status as long as it copies frames to host 63 * buffer (or rx fifo). This can incorrectly acknowledge packets 64 * to a sender if last desc is self-linked. 65 */ 66 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 67 { 68 struct ath_hw *ah = sc->sc_ah; 69 struct ath_common *common = ath9k_hw_common(ah); 70 struct ath_desc *ds; 71 struct sk_buff *skb; 72 73 ATH_RXBUF_RESET(bf); 74 75 ds = bf->bf_desc; 76 ds->ds_link = 0; /* link to null */ 77 ds->ds_data = bf->bf_buf_addr; 78 79 /* virtual addr of the beginning of the buffer. */ 80 skb = bf->bf_mpdu; 81 BUG_ON(skb == NULL); 82 ds->ds_vdata = skb->data; 83 84 /* 85 * setup rx descriptors. The rx_bufsize here tells the hardware 86 * how much data it can DMA to us and that we are prepared 87 * to process 88 */ 89 ath9k_hw_setuprxdesc(ah, ds, 90 common->rx_bufsize, 91 0); 92 93 if (sc->rx.rxlink == NULL) 94 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 95 else 96 *sc->rx.rxlink = bf->bf_daddr; 97 98 sc->rx.rxlink = &ds->ds_link; 99 ath9k_hw_rxena(ah); 100 } 101 102 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 103 { 104 /* XXX block beacon interrupts */ 105 ath9k_hw_setantenna(sc->sc_ah, antenna); 106 sc->rx.defant = antenna; 107 sc->rx.rxotherant = 0; 108 } 109 110 static void ath_opmode_init(struct ath_softc *sc) 111 { 112 struct ath_hw *ah = sc->sc_ah; 113 struct ath_common *common = ath9k_hw_common(ah); 114 115 u32 rfilt, mfilt[2]; 116 117 /* configure rx filter */ 118 rfilt = ath_calcrxfilter(sc); 119 ath9k_hw_setrxfilter(ah, rfilt); 120 121 /* configure bssid mask */ 122 ath_hw_setbssidmask(common); 123 124 /* configure operational mode */ 125 ath9k_hw_setopmode(ah); 126 127 /* calculate and install multicast filter */ 128 mfilt[0] = mfilt[1] = ~0; 129 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 130 } 131 132 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 133 enum ath9k_rx_qtype qtype) 134 { 135 struct ath_hw *ah = sc->sc_ah; 136 struct ath_rx_edma *rx_edma; 137 struct sk_buff *skb; 138 struct ath_buf *bf; 139 140 rx_edma = &sc->rx.rx_edma[qtype]; 141 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 142 return false; 143 144 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 145 list_del_init(&bf->list); 146 147 skb = bf->bf_mpdu; 148 149 ATH_RXBUF_RESET(bf); 150 memset(skb->data, 0, ah->caps.rx_status_len); 151 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 152 ah->caps.rx_status_len, DMA_TO_DEVICE); 153 154 SKB_CB_ATHBUF(skb) = bf; 155 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 156 skb_queue_tail(&rx_edma->rx_fifo, skb); 157 158 return true; 159 } 160 161 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 162 enum ath9k_rx_qtype qtype, int size) 163 { 164 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 165 u32 nbuf = 0; 166 167 if (list_empty(&sc->rx.rxbuf)) { 168 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); 169 return; 170 } 171 172 while (!list_empty(&sc->rx.rxbuf)) { 173 nbuf++; 174 175 if (!ath_rx_edma_buf_link(sc, qtype)) 176 break; 177 178 if (nbuf >= size) 179 break; 180 } 181 } 182 183 static void ath_rx_remove_buffer(struct ath_softc *sc, 184 enum ath9k_rx_qtype qtype) 185 { 186 struct ath_buf *bf; 187 struct ath_rx_edma *rx_edma; 188 struct sk_buff *skb; 189 190 rx_edma = &sc->rx.rx_edma[qtype]; 191 192 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 193 bf = SKB_CB_ATHBUF(skb); 194 BUG_ON(!bf); 195 list_add_tail(&bf->list, &sc->rx.rxbuf); 196 } 197 } 198 199 static void ath_rx_edma_cleanup(struct ath_softc *sc) 200 { 201 struct ath_buf *bf; 202 203 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 204 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 205 206 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 207 if (bf->bf_mpdu) 208 dev_kfree_skb_any(bf->bf_mpdu); 209 } 210 211 INIT_LIST_HEAD(&sc->rx.rxbuf); 212 213 kfree(sc->rx.rx_bufptr); 214 sc->rx.rx_bufptr = NULL; 215 } 216 217 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 218 { 219 skb_queue_head_init(&rx_edma->rx_fifo); 220 skb_queue_head_init(&rx_edma->rx_buffers); 221 rx_edma->rx_fifo_hwsize = size; 222 } 223 224 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 225 { 226 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 227 struct ath_hw *ah = sc->sc_ah; 228 struct sk_buff *skb; 229 struct ath_buf *bf; 230 int error = 0, i; 231 u32 size; 232 233 234 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + 235 ah->caps.rx_status_len, 236 min(common->cachelsz, (u16)64)); 237 238 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 239 ah->caps.rx_status_len); 240 241 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 242 ah->caps.rx_lp_qdepth); 243 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 244 ah->caps.rx_hp_qdepth); 245 246 size = sizeof(struct ath_buf) * nbufs; 247 bf = kzalloc(size, GFP_KERNEL); 248 if (!bf) 249 return -ENOMEM; 250 251 INIT_LIST_HEAD(&sc->rx.rxbuf); 252 sc->rx.rx_bufptr = bf; 253 254 for (i = 0; i < nbufs; i++, bf++) { 255 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 256 if (!skb) { 257 error = -ENOMEM; 258 goto rx_init_fail; 259 } 260 261 memset(skb->data, 0, common->rx_bufsize); 262 bf->bf_mpdu = skb; 263 264 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 265 common->rx_bufsize, 266 DMA_BIDIRECTIONAL); 267 if (unlikely(dma_mapping_error(sc->dev, 268 bf->bf_buf_addr))) { 269 dev_kfree_skb_any(skb); 270 bf->bf_mpdu = NULL; 271 bf->bf_buf_addr = 0; 272 ath_print(common, ATH_DBG_FATAL, 273 "dma_mapping_error() on RX init\n"); 274 error = -ENOMEM; 275 goto rx_init_fail; 276 } 277 278 list_add_tail(&bf->list, &sc->rx.rxbuf); 279 } 280 281 return 0; 282 283 rx_init_fail: 284 ath_rx_edma_cleanup(sc); 285 return error; 286 } 287 288 static void ath_edma_start_recv(struct ath_softc *sc) 289 { 290 spin_lock_bh(&sc->rx.rxbuflock); 291 292 ath9k_hw_rxena(sc->sc_ah); 293 294 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 295 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 296 297 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 298 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 299 300 ath_opmode_init(sc); 301 302 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 303 304 spin_unlock_bh(&sc->rx.rxbuflock); 305 } 306 307 static void ath_edma_stop_recv(struct ath_softc *sc) 308 { 309 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 310 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 311 } 312 313 int ath_rx_init(struct ath_softc *sc, int nbufs) 314 { 315 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 316 struct sk_buff *skb; 317 struct ath_buf *bf; 318 int error = 0; 319 320 spin_lock_init(&sc->rx.pcu_lock); 321 sc->sc_flags &= ~SC_OP_RXFLUSH; 322 spin_lock_init(&sc->rx.rxbuflock); 323 324 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 325 return ath_rx_edma_init(sc, nbufs); 326 } else { 327 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 328 min(common->cachelsz, (u16)64)); 329 330 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 331 common->cachelsz, common->rx_bufsize); 332 333 /* Initialize rx descriptors */ 334 335 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 336 "rx", nbufs, 1, 0); 337 if (error != 0) { 338 ath_print(common, ATH_DBG_FATAL, 339 "failed to allocate rx descriptors: %d\n", 340 error); 341 goto err; 342 } 343 344 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 345 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 346 GFP_KERNEL); 347 if (skb == NULL) { 348 error = -ENOMEM; 349 goto err; 350 } 351 352 bf->bf_mpdu = skb; 353 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 354 common->rx_bufsize, 355 DMA_FROM_DEVICE); 356 if (unlikely(dma_mapping_error(sc->dev, 357 bf->bf_buf_addr))) { 358 dev_kfree_skb_any(skb); 359 bf->bf_mpdu = NULL; 360 bf->bf_buf_addr = 0; 361 ath_print(common, ATH_DBG_FATAL, 362 "dma_mapping_error() on RX init\n"); 363 error = -ENOMEM; 364 goto err; 365 } 366 } 367 sc->rx.rxlink = NULL; 368 } 369 370 err: 371 if (error) 372 ath_rx_cleanup(sc); 373 374 return error; 375 } 376 377 void ath_rx_cleanup(struct ath_softc *sc) 378 { 379 struct ath_hw *ah = sc->sc_ah; 380 struct ath_common *common = ath9k_hw_common(ah); 381 struct sk_buff *skb; 382 struct ath_buf *bf; 383 384 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 385 ath_rx_edma_cleanup(sc); 386 return; 387 } else { 388 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 389 skb = bf->bf_mpdu; 390 if (skb) { 391 dma_unmap_single(sc->dev, bf->bf_buf_addr, 392 common->rx_bufsize, 393 DMA_FROM_DEVICE); 394 dev_kfree_skb(skb); 395 bf->bf_buf_addr = 0; 396 bf->bf_mpdu = NULL; 397 } 398 } 399 400 if (sc->rx.rxdma.dd_desc_len != 0) 401 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 402 } 403 } 404 405 /* 406 * Calculate the receive filter according to the 407 * operating mode and state: 408 * 409 * o always accept unicast, broadcast, and multicast traffic 410 * o maintain current state of phy error reception (the hal 411 * may enable phy error frames for noise immunity work) 412 * o probe request frames are accepted only when operating in 413 * hostap, adhoc, or monitor modes 414 * o enable promiscuous mode according to the interface state 415 * o accept beacons: 416 * - when operating in adhoc mode so the 802.11 layer creates 417 * node table entries for peers, 418 * - when operating in station mode for collecting rssi data when 419 * the station is otherwise quiet, or 420 * - when operating as a repeater so we see repeater-sta beacons 421 * - when scanning 422 */ 423 424 u32 ath_calcrxfilter(struct ath_softc *sc) 425 { 426 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 427 428 u32 rfilt; 429 430 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 431 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 432 | ATH9K_RX_FILTER_MCAST; 433 434 if (sc->rx.rxfilter & FIF_PROBE_REQ) 435 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 436 437 /* 438 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 439 * mode interface or when in monitor mode. AP mode does not need this 440 * since it receives all in-BSS frames anyway. 441 */ 442 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 443 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 444 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 445 rfilt |= ATH9K_RX_FILTER_PROM; 446 447 if (sc->rx.rxfilter & FIF_CONTROL) 448 rfilt |= ATH9K_RX_FILTER_CONTROL; 449 450 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 451 (sc->nvifs <= 1) && 452 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 453 rfilt |= ATH9K_RX_FILTER_MYBEACON; 454 else 455 rfilt |= ATH9K_RX_FILTER_BEACON; 456 457 if ((AR_SREV_9280_20_OR_LATER(sc->sc_ah) || 458 AR_SREV_9285_12_OR_LATER(sc->sc_ah)) && 459 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && 460 (sc->rx.rxfilter & FIF_PSPOLL)) 461 rfilt |= ATH9K_RX_FILTER_PSPOLL; 462 463 if (conf_is_ht(&sc->hw->conf)) 464 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 465 466 if (sc->sec_wiphy || (sc->nvifs > 1) || 467 (sc->rx.rxfilter & FIF_OTHER_BSS)) { 468 /* The following may also be needed for other older chips */ 469 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 470 rfilt |= ATH9K_RX_FILTER_PROM; 471 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 472 } 473 474 return rfilt; 475 476 #undef RX_FILTER_PRESERVE 477 } 478 479 int ath_startrecv(struct ath_softc *sc) 480 { 481 struct ath_hw *ah = sc->sc_ah; 482 struct ath_buf *bf, *tbf; 483 484 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 485 ath_edma_start_recv(sc); 486 return 0; 487 } 488 489 spin_lock_bh(&sc->rx.rxbuflock); 490 if (list_empty(&sc->rx.rxbuf)) 491 goto start_recv; 492 493 sc->rx.rxlink = NULL; 494 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 495 ath_rx_buf_link(sc, bf); 496 } 497 498 /* We could have deleted elements so the list may be empty now */ 499 if (list_empty(&sc->rx.rxbuf)) 500 goto start_recv; 501 502 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 503 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 504 ath9k_hw_rxena(ah); 505 506 start_recv: 507 ath_opmode_init(sc); 508 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 509 510 spin_unlock_bh(&sc->rx.rxbuflock); 511 512 return 0; 513 } 514 515 bool ath_stoprecv(struct ath_softc *sc) 516 { 517 struct ath_hw *ah = sc->sc_ah; 518 bool stopped; 519 520 spin_lock_bh(&sc->rx.rxbuflock); 521 ath9k_hw_stoppcurecv(ah); 522 ath9k_hw_setrxfilter(ah, 0); 523 stopped = ath9k_hw_stopdmarecv(ah); 524 525 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 526 ath_edma_stop_recv(sc); 527 else 528 sc->rx.rxlink = NULL; 529 spin_unlock_bh(&sc->rx.rxbuflock); 530 531 return stopped; 532 } 533 534 void ath_flushrecv(struct ath_softc *sc) 535 { 536 sc->sc_flags |= SC_OP_RXFLUSH; 537 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 538 ath_rx_tasklet(sc, 1, true); 539 ath_rx_tasklet(sc, 1, false); 540 sc->sc_flags &= ~SC_OP_RXFLUSH; 541 } 542 543 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 544 { 545 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 546 struct ieee80211_mgmt *mgmt; 547 u8 *pos, *end, id, elen; 548 struct ieee80211_tim_ie *tim; 549 550 mgmt = (struct ieee80211_mgmt *)skb->data; 551 pos = mgmt->u.beacon.variable; 552 end = skb->data + skb->len; 553 554 while (pos + 2 < end) { 555 id = *pos++; 556 elen = *pos++; 557 if (pos + elen > end) 558 break; 559 560 if (id == WLAN_EID_TIM) { 561 if (elen < sizeof(*tim)) 562 break; 563 tim = (struct ieee80211_tim_ie *) pos; 564 if (tim->dtim_count != 0) 565 break; 566 return tim->bitmap_ctrl & 0x01; 567 } 568 569 pos += elen; 570 } 571 572 return false; 573 } 574 575 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 576 { 577 struct ieee80211_mgmt *mgmt; 578 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 579 580 if (skb->len < 24 + 8 + 2 + 2) 581 return; 582 583 mgmt = (struct ieee80211_mgmt *)skb->data; 584 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 585 return; /* not from our current AP */ 586 587 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 588 589 if (sc->ps_flags & PS_BEACON_SYNC) { 590 sc->ps_flags &= ~PS_BEACON_SYNC; 591 ath_print(common, ATH_DBG_PS, 592 "Reconfigure Beacon timers based on " 593 "timestamp from the AP\n"); 594 ath_beacon_config(sc, NULL); 595 } 596 597 if (ath_beacon_dtim_pending_cab(skb)) { 598 /* 599 * Remain awake waiting for buffered broadcast/multicast 600 * frames. If the last broadcast/multicast frame is not 601 * received properly, the next beacon frame will work as 602 * a backup trigger for returning into NETWORK SLEEP state, 603 * so we are waiting for it as well. 604 */ 605 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 606 "buffered broadcast/multicast frame(s)\n"); 607 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 608 return; 609 } 610 611 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 612 /* 613 * This can happen if a broadcast frame is dropped or the AP 614 * fails to send a frame indicating that all CAB frames have 615 * been delivered. 616 */ 617 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 618 ath_print(common, ATH_DBG_PS, 619 "PS wait for CAB frames timed out\n"); 620 } 621 } 622 623 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 624 { 625 struct ieee80211_hdr *hdr; 626 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 627 628 hdr = (struct ieee80211_hdr *)skb->data; 629 630 /* Process Beacon and CAB receive in PS state */ 631 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 632 && ieee80211_is_beacon(hdr->frame_control)) 633 ath_rx_ps_beacon(sc, skb); 634 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 635 (ieee80211_is_data(hdr->frame_control) || 636 ieee80211_is_action(hdr->frame_control)) && 637 is_multicast_ether_addr(hdr->addr1) && 638 !ieee80211_has_moredata(hdr->frame_control)) { 639 /* 640 * No more broadcast/multicast frames to be received at this 641 * point. 642 */ 643 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 644 ath_print(common, ATH_DBG_PS, 645 "All PS CAB frames received, back to sleep\n"); 646 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 647 !is_multicast_ether_addr(hdr->addr1) && 648 !ieee80211_has_morefrags(hdr->frame_control)) { 649 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 650 ath_print(common, ATH_DBG_PS, 651 "Going back to sleep after having received " 652 "PS-Poll data (0x%lx)\n", 653 sc->ps_flags & (PS_WAIT_FOR_BEACON | 654 PS_WAIT_FOR_CAB | 655 PS_WAIT_FOR_PSPOLL_DATA | 656 PS_WAIT_FOR_TX_ACK)); 657 } 658 } 659 660 static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, 661 struct ath_softc *sc, struct sk_buff *skb, 662 struct ieee80211_rx_status *rxs) 663 { 664 struct ieee80211_hdr *hdr; 665 666 hdr = (struct ieee80211_hdr *)skb->data; 667 668 /* Send the frame to mac80211 */ 669 if (is_multicast_ether_addr(hdr->addr1)) { 670 int i; 671 /* 672 * Deliver broadcast/multicast frames to all suitable 673 * virtual wiphys. 674 */ 675 /* TODO: filter based on channel configuration */ 676 for (i = 0; i < sc->num_sec_wiphy; i++) { 677 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 678 struct sk_buff *nskb; 679 if (aphy == NULL) 680 continue; 681 nskb = skb_copy(skb, GFP_ATOMIC); 682 if (!nskb) 683 continue; 684 ieee80211_rx(aphy->hw, nskb); 685 } 686 ieee80211_rx(sc->hw, skb); 687 } else 688 /* Deliver unicast frames based on receiver address */ 689 ieee80211_rx(hw, skb); 690 } 691 692 static bool ath_edma_get_buffers(struct ath_softc *sc, 693 enum ath9k_rx_qtype qtype) 694 { 695 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 696 struct ath_hw *ah = sc->sc_ah; 697 struct ath_common *common = ath9k_hw_common(ah); 698 struct sk_buff *skb; 699 struct ath_buf *bf; 700 int ret; 701 702 skb = skb_peek(&rx_edma->rx_fifo); 703 if (!skb) 704 return false; 705 706 bf = SKB_CB_ATHBUF(skb); 707 BUG_ON(!bf); 708 709 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 710 common->rx_bufsize, DMA_FROM_DEVICE); 711 712 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 713 if (ret == -EINPROGRESS) { 714 /*let device gain the buffer again*/ 715 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 716 common->rx_bufsize, DMA_FROM_DEVICE); 717 return false; 718 } 719 720 __skb_unlink(skb, &rx_edma->rx_fifo); 721 if (ret == -EINVAL) { 722 /* corrupt descriptor, skip this one and the following one */ 723 list_add_tail(&bf->list, &sc->rx.rxbuf); 724 ath_rx_edma_buf_link(sc, qtype); 725 skb = skb_peek(&rx_edma->rx_fifo); 726 if (!skb) 727 return true; 728 729 bf = SKB_CB_ATHBUF(skb); 730 BUG_ON(!bf); 731 732 __skb_unlink(skb, &rx_edma->rx_fifo); 733 list_add_tail(&bf->list, &sc->rx.rxbuf); 734 ath_rx_edma_buf_link(sc, qtype); 735 return true; 736 } 737 skb_queue_tail(&rx_edma->rx_buffers, skb); 738 739 return true; 740 } 741 742 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 743 struct ath_rx_status *rs, 744 enum ath9k_rx_qtype qtype) 745 { 746 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 747 struct sk_buff *skb; 748 struct ath_buf *bf; 749 750 while (ath_edma_get_buffers(sc, qtype)); 751 skb = __skb_dequeue(&rx_edma->rx_buffers); 752 if (!skb) 753 return NULL; 754 755 bf = SKB_CB_ATHBUF(skb); 756 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); 757 return bf; 758 } 759 760 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 761 struct ath_rx_status *rs) 762 { 763 struct ath_hw *ah = sc->sc_ah; 764 struct ath_common *common = ath9k_hw_common(ah); 765 struct ath_desc *ds; 766 struct ath_buf *bf; 767 int ret; 768 769 if (list_empty(&sc->rx.rxbuf)) { 770 sc->rx.rxlink = NULL; 771 return NULL; 772 } 773 774 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 775 ds = bf->bf_desc; 776 777 /* 778 * Must provide the virtual address of the current 779 * descriptor, the physical address, and the virtual 780 * address of the next descriptor in the h/w chain. 781 * This allows the HAL to look ahead to see if the 782 * hardware is done with a descriptor by checking the 783 * done bit in the following descriptor and the address 784 * of the current descriptor the DMA engine is working 785 * on. All this is necessary because of our use of 786 * a self-linked list to avoid rx overruns. 787 */ 788 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); 789 if (ret == -EINPROGRESS) { 790 struct ath_rx_status trs; 791 struct ath_buf *tbf; 792 struct ath_desc *tds; 793 794 memset(&trs, 0, sizeof(trs)); 795 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 796 sc->rx.rxlink = NULL; 797 return NULL; 798 } 799 800 tbf = list_entry(bf->list.next, struct ath_buf, list); 801 802 /* 803 * On some hardware the descriptor status words could 804 * get corrupted, including the done bit. Because of 805 * this, check if the next descriptor's done bit is 806 * set or not. 807 * 808 * If the next descriptor's done bit is set, the current 809 * descriptor has been corrupted. Force s/w to discard 810 * this descriptor and continue... 811 */ 812 813 tds = tbf->bf_desc; 814 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); 815 if (ret == -EINPROGRESS) 816 return NULL; 817 } 818 819 if (!bf->bf_mpdu) 820 return bf; 821 822 /* 823 * Synchronize the DMA transfer with CPU before 824 * 1. accessing the frame 825 * 2. requeueing the same buffer to h/w 826 */ 827 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 828 common->rx_bufsize, 829 DMA_FROM_DEVICE); 830 831 return bf; 832 } 833 834 /* Assumes you've already done the endian to CPU conversion */ 835 static bool ath9k_rx_accept(struct ath_common *common, 836 struct ieee80211_hdr *hdr, 837 struct ieee80211_rx_status *rxs, 838 struct ath_rx_status *rx_stats, 839 bool *decrypt_error) 840 { 841 struct ath_hw *ah = common->ah; 842 __le16 fc; 843 u8 rx_status_len = ah->caps.rx_status_len; 844 845 fc = hdr->frame_control; 846 847 if (!rx_stats->rs_datalen) 848 return false; 849 /* 850 * rs_status follows rs_datalen so if rs_datalen is too large 851 * we can take a hint that hardware corrupted it, so ignore 852 * those frames. 853 */ 854 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 855 return false; 856 857 /* 858 * rs_more indicates chained descriptors which can be used 859 * to link buffers together for a sort of scatter-gather 860 * operation. 861 * reject the frame, we don't support scatter-gather yet and 862 * the frame is probably corrupt anyway 863 */ 864 if (rx_stats->rs_more) 865 return false; 866 867 /* 868 * The rx_stats->rs_status will not be set until the end of the 869 * chained descriptors so it can be ignored if rs_more is set. The 870 * rs_more will be false at the last element of the chained 871 * descriptors. 872 */ 873 if (rx_stats->rs_status != 0) { 874 if (rx_stats->rs_status & ATH9K_RXERR_CRC) 875 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 876 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 877 return false; 878 879 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 880 *decrypt_error = true; 881 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 882 /* 883 * The MIC error bit is only valid if the frame 884 * is not a control frame or fragment, and it was 885 * decrypted using a valid TKIP key. 886 */ 887 if (!ieee80211_is_ctl(fc) && 888 !ieee80211_has_morefrags(fc) && 889 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 890 test_bit(rx_stats->rs_keyix, common->tkip_keymap)) 891 rxs->flag |= RX_FLAG_MMIC_ERROR; 892 else 893 rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 894 } 895 /* 896 * Reject error frames with the exception of 897 * decryption and MIC failures. For monitor mode, 898 * we also ignore the CRC error. 899 */ 900 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 901 if (rx_stats->rs_status & 902 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 903 ATH9K_RXERR_CRC)) 904 return false; 905 } else { 906 if (rx_stats->rs_status & 907 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 908 return false; 909 } 910 } 911 } 912 return true; 913 } 914 915 static int ath9k_process_rate(struct ath_common *common, 916 struct ieee80211_hw *hw, 917 struct ath_rx_status *rx_stats, 918 struct ieee80211_rx_status *rxs) 919 { 920 struct ieee80211_supported_band *sband; 921 enum ieee80211_band band; 922 unsigned int i = 0; 923 924 band = hw->conf.channel->band; 925 sband = hw->wiphy->bands[band]; 926 927 if (rx_stats->rs_rate & 0x80) { 928 /* HT rate */ 929 rxs->flag |= RX_FLAG_HT; 930 if (rx_stats->rs_flags & ATH9K_RX_2040) 931 rxs->flag |= RX_FLAG_40MHZ; 932 if (rx_stats->rs_flags & ATH9K_RX_GI) 933 rxs->flag |= RX_FLAG_SHORT_GI; 934 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 935 return 0; 936 } 937 938 for (i = 0; i < sband->n_bitrates; i++) { 939 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 940 rxs->rate_idx = i; 941 return 0; 942 } 943 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 944 rxs->flag |= RX_FLAG_SHORTPRE; 945 rxs->rate_idx = i; 946 return 0; 947 } 948 } 949 950 /* 951 * No valid hardware bitrate found -- we should not get here 952 * because hardware has already validated this frame as OK. 953 */ 954 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " 955 "0x%02x using 1 Mbit\n", rx_stats->rs_rate); 956 957 return -EINVAL; 958 } 959 960 static void ath9k_process_rssi(struct ath_common *common, 961 struct ieee80211_hw *hw, 962 struct ieee80211_hdr *hdr, 963 struct ath_rx_status *rx_stats) 964 { 965 struct ath_hw *ah = common->ah; 966 struct ieee80211_sta *sta; 967 struct ath_node *an; 968 int last_rssi = ATH_RSSI_DUMMY_MARKER; 969 __le16 fc; 970 971 fc = hdr->frame_control; 972 973 rcu_read_lock(); 974 /* 975 * XXX: use ieee80211_find_sta! This requires quite a bit of work 976 * under the current ath9k virtual wiphy implementation as we have 977 * no way of tying a vif to wiphy. Typically vifs are attached to 978 * at least one sdata of a wiphy on mac80211 but with ath9k virtual 979 * wiphy you'd have to iterate over every wiphy and each sdata. 980 */ 981 if (is_multicast_ether_addr(hdr->addr1)) 982 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); 983 else 984 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, hdr->addr1); 985 986 if (sta) { 987 an = (struct ath_node *) sta->drv_priv; 988 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && 989 !rx_stats->rs_moreaggr) 990 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); 991 last_rssi = an->last_rssi; 992 } 993 rcu_read_unlock(); 994 995 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 996 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 997 ATH_RSSI_EP_MULTIPLIER); 998 if (rx_stats->rs_rssi < 0) 999 rx_stats->rs_rssi = 0; 1000 1001 /* Update Beacon RSSI, this is used by ANI. */ 1002 if (ieee80211_is_beacon(fc)) 1003 ah->stats.avgbrssi = rx_stats->rs_rssi; 1004 } 1005 1006 /* 1007 * For Decrypt or Demic errors, we only mark packet status here and always push 1008 * up the frame up to let mac80211 handle the actual error case, be it no 1009 * decryption key or real decryption error. This let us keep statistics there. 1010 */ 1011 static int ath9k_rx_skb_preprocess(struct ath_common *common, 1012 struct ieee80211_hw *hw, 1013 struct ieee80211_hdr *hdr, 1014 struct ath_rx_status *rx_stats, 1015 struct ieee80211_rx_status *rx_status, 1016 bool *decrypt_error) 1017 { 1018 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 1019 1020 /* 1021 * everything but the rate is checked here, the rate check is done 1022 * separately to avoid doing two lookups for a rate for each frame. 1023 */ 1024 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 1025 return -EINVAL; 1026 1027 ath9k_process_rssi(common, hw, hdr, rx_stats); 1028 1029 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1030 return -EINVAL; 1031 1032 rx_status->band = hw->conf.channel->band; 1033 rx_status->freq = hw->conf.channel->center_freq; 1034 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 1035 rx_status->antenna = rx_stats->rs_antenna; 1036 rx_status->flag |= RX_FLAG_TSFT; 1037 1038 return 0; 1039 } 1040 1041 static void ath9k_rx_skb_postprocess(struct ath_common *common, 1042 struct sk_buff *skb, 1043 struct ath_rx_status *rx_stats, 1044 struct ieee80211_rx_status *rxs, 1045 bool decrypt_error) 1046 { 1047 struct ath_hw *ah = common->ah; 1048 struct ieee80211_hdr *hdr; 1049 int hdrlen, padpos, padsize; 1050 u8 keyix; 1051 __le16 fc; 1052 1053 /* see if any padding is done by the hw and remove it */ 1054 hdr = (struct ieee80211_hdr *) skb->data; 1055 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1056 fc = hdr->frame_control; 1057 padpos = ath9k_cmn_padpos(hdr->frame_control); 1058 1059 /* The MAC header is padded to have 32-bit boundary if the 1060 * packet payload is non-zero. The general calculation for 1061 * padsize would take into account odd header lengths: 1062 * padsize = (4 - padpos % 4) % 4; However, since only 1063 * even-length headers are used, padding can only be 0 or 2 1064 * bytes and we can optimize this a bit. In addition, we must 1065 * not try to remove padding from short control frames that do 1066 * not have payload. */ 1067 padsize = padpos & 3; 1068 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1069 memmove(skb->data + padsize, skb->data, padpos); 1070 skb_pull(skb, padsize); 1071 } 1072 1073 keyix = rx_stats->rs_keyix; 1074 1075 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1076 ieee80211_has_protected(fc)) { 1077 rxs->flag |= RX_FLAG_DECRYPTED; 1078 } else if (ieee80211_has_protected(fc) 1079 && !decrypt_error && skb->len >= hdrlen + 4) { 1080 keyix = skb->data[hdrlen + 3] >> 6; 1081 1082 if (test_bit(keyix, common->keymap)) 1083 rxs->flag |= RX_FLAG_DECRYPTED; 1084 } 1085 if (ah->sw_mgmt_crypto && 1086 (rxs->flag & RX_FLAG_DECRYPTED) && 1087 ieee80211_is_mgmt(fc)) 1088 /* Use software decrypt for management frames. */ 1089 rxs->flag &= ~RX_FLAG_DECRYPTED; 1090 } 1091 1092 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, 1093 struct ath_hw_antcomb_conf ant_conf, 1094 int main_rssi_avg) 1095 { 1096 antcomb->quick_scan_cnt = 0; 1097 1098 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) 1099 antcomb->rssi_lna2 = main_rssi_avg; 1100 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) 1101 antcomb->rssi_lna1 = main_rssi_avg; 1102 1103 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { 1104 case (0x10): /* LNA2 A-B */ 1105 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1106 antcomb->first_quick_scan_conf = 1107 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1108 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1109 break; 1110 case (0x20): /* LNA1 A-B */ 1111 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1112 antcomb->first_quick_scan_conf = 1113 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1114 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1115 break; 1116 case (0x21): /* LNA1 LNA2 */ 1117 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; 1118 antcomb->first_quick_scan_conf = 1119 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1120 antcomb->second_quick_scan_conf = 1121 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1122 break; 1123 case (0x12): /* LNA2 LNA1 */ 1124 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; 1125 antcomb->first_quick_scan_conf = 1126 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1127 antcomb->second_quick_scan_conf = 1128 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1129 break; 1130 case (0x13): /* LNA2 A+B */ 1131 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1132 antcomb->first_quick_scan_conf = 1133 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1134 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1135 break; 1136 case (0x23): /* LNA1 A+B */ 1137 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1138 antcomb->first_quick_scan_conf = 1139 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1140 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1141 break; 1142 default: 1143 break; 1144 } 1145 } 1146 1147 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 1148 struct ath_hw_antcomb_conf *div_ant_conf, 1149 int main_rssi_avg, int alt_rssi_avg, 1150 int alt_ratio) 1151 { 1152 /* alt_good */ 1153 switch (antcomb->quick_scan_cnt) { 1154 case 0: 1155 /* set alt to main, and alt to first conf */ 1156 div_ant_conf->main_lna_conf = antcomb->main_conf; 1157 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; 1158 break; 1159 case 1: 1160 /* set alt to main, and alt to first conf */ 1161 div_ant_conf->main_lna_conf = antcomb->main_conf; 1162 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; 1163 antcomb->rssi_first = main_rssi_avg; 1164 antcomb->rssi_second = alt_rssi_avg; 1165 1166 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1167 /* main is LNA1 */ 1168 if (ath_is_alt_ant_ratio_better(alt_ratio, 1169 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1170 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1171 main_rssi_avg, alt_rssi_avg, 1172 antcomb->total_pkt_count)) 1173 antcomb->first_ratio = true; 1174 else 1175 antcomb->first_ratio = false; 1176 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1177 if (ath_is_alt_ant_ratio_better(alt_ratio, 1178 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1179 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1180 main_rssi_avg, alt_rssi_avg, 1181 antcomb->total_pkt_count)) 1182 antcomb->first_ratio = true; 1183 else 1184 antcomb->first_ratio = false; 1185 } else { 1186 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1187 (alt_rssi_avg > main_rssi_avg + 1188 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1189 (alt_rssi_avg > main_rssi_avg)) && 1190 (antcomb->total_pkt_count > 50)) 1191 antcomb->first_ratio = true; 1192 else 1193 antcomb->first_ratio = false; 1194 } 1195 break; 1196 case 2: 1197 antcomb->alt_good = false; 1198 antcomb->scan_not_start = false; 1199 antcomb->scan = false; 1200 antcomb->rssi_first = main_rssi_avg; 1201 antcomb->rssi_third = alt_rssi_avg; 1202 1203 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 1204 antcomb->rssi_lna1 = alt_rssi_avg; 1205 else if (antcomb->second_quick_scan_conf == 1206 ATH_ANT_DIV_COMB_LNA2) 1207 antcomb->rssi_lna2 = alt_rssi_avg; 1208 else if (antcomb->second_quick_scan_conf == 1209 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 1210 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 1211 antcomb->rssi_lna2 = main_rssi_avg; 1212 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 1213 antcomb->rssi_lna1 = main_rssi_avg; 1214 } 1215 1216 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 1217 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 1218 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1219 else 1220 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 1221 1222 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1223 if (ath_is_alt_ant_ratio_better(alt_ratio, 1224 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1225 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1226 main_rssi_avg, alt_rssi_avg, 1227 antcomb->total_pkt_count)) 1228 antcomb->second_ratio = true; 1229 else 1230 antcomb->second_ratio = false; 1231 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1232 if (ath_is_alt_ant_ratio_better(alt_ratio, 1233 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1234 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1235 main_rssi_avg, alt_rssi_avg, 1236 antcomb->total_pkt_count)) 1237 antcomb->second_ratio = true; 1238 else 1239 antcomb->second_ratio = false; 1240 } else { 1241 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1242 (alt_rssi_avg > main_rssi_avg + 1243 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1244 (alt_rssi_avg > main_rssi_avg)) && 1245 (antcomb->total_pkt_count > 50)) 1246 antcomb->second_ratio = true; 1247 else 1248 antcomb->second_ratio = false; 1249 } 1250 1251 /* set alt to the conf with maximun ratio */ 1252 if (antcomb->first_ratio && antcomb->second_ratio) { 1253 if (antcomb->rssi_second > antcomb->rssi_third) { 1254 /* first alt*/ 1255 if ((antcomb->first_quick_scan_conf == 1256 ATH_ANT_DIV_COMB_LNA1) || 1257 (antcomb->first_quick_scan_conf == 1258 ATH_ANT_DIV_COMB_LNA2)) 1259 /* Set alt LNA1 or LNA2*/ 1260 if (div_ant_conf->main_lna_conf == 1261 ATH_ANT_DIV_COMB_LNA2) 1262 div_ant_conf->alt_lna_conf = 1263 ATH_ANT_DIV_COMB_LNA1; 1264 else 1265 div_ant_conf->alt_lna_conf = 1266 ATH_ANT_DIV_COMB_LNA2; 1267 else 1268 /* Set alt to A+B or A-B */ 1269 div_ant_conf->alt_lna_conf = 1270 antcomb->first_quick_scan_conf; 1271 } else if ((antcomb->second_quick_scan_conf == 1272 ATH_ANT_DIV_COMB_LNA1) || 1273 (antcomb->second_quick_scan_conf == 1274 ATH_ANT_DIV_COMB_LNA2)) { 1275 /* Set alt LNA1 or LNA2 */ 1276 if (div_ant_conf->main_lna_conf == 1277 ATH_ANT_DIV_COMB_LNA2) 1278 div_ant_conf->alt_lna_conf = 1279 ATH_ANT_DIV_COMB_LNA1; 1280 else 1281 div_ant_conf->alt_lna_conf = 1282 ATH_ANT_DIV_COMB_LNA2; 1283 } else { 1284 /* Set alt to A+B or A-B */ 1285 div_ant_conf->alt_lna_conf = 1286 antcomb->second_quick_scan_conf; 1287 } 1288 } else if (antcomb->first_ratio) { 1289 /* first alt */ 1290 if ((antcomb->first_quick_scan_conf == 1291 ATH_ANT_DIV_COMB_LNA1) || 1292 (antcomb->first_quick_scan_conf == 1293 ATH_ANT_DIV_COMB_LNA2)) 1294 /* Set alt LNA1 or LNA2 */ 1295 if (div_ant_conf->main_lna_conf == 1296 ATH_ANT_DIV_COMB_LNA2) 1297 div_ant_conf->alt_lna_conf = 1298 ATH_ANT_DIV_COMB_LNA1; 1299 else 1300 div_ant_conf->alt_lna_conf = 1301 ATH_ANT_DIV_COMB_LNA2; 1302 else 1303 /* Set alt to A+B or A-B */ 1304 div_ant_conf->alt_lna_conf = 1305 antcomb->first_quick_scan_conf; 1306 } else if (antcomb->second_ratio) { 1307 /* second alt */ 1308 if ((antcomb->second_quick_scan_conf == 1309 ATH_ANT_DIV_COMB_LNA1) || 1310 (antcomb->second_quick_scan_conf == 1311 ATH_ANT_DIV_COMB_LNA2)) 1312 /* Set alt LNA1 or LNA2 */ 1313 if (div_ant_conf->main_lna_conf == 1314 ATH_ANT_DIV_COMB_LNA2) 1315 div_ant_conf->alt_lna_conf = 1316 ATH_ANT_DIV_COMB_LNA1; 1317 else 1318 div_ant_conf->alt_lna_conf = 1319 ATH_ANT_DIV_COMB_LNA2; 1320 else 1321 /* Set alt to A+B or A-B */ 1322 div_ant_conf->alt_lna_conf = 1323 antcomb->second_quick_scan_conf; 1324 } else { 1325 /* main is largest */ 1326 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || 1327 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) 1328 /* Set alt LNA1 or LNA2 */ 1329 if (div_ant_conf->main_lna_conf == 1330 ATH_ANT_DIV_COMB_LNA2) 1331 div_ant_conf->alt_lna_conf = 1332 ATH_ANT_DIV_COMB_LNA1; 1333 else 1334 div_ant_conf->alt_lna_conf = 1335 ATH_ANT_DIV_COMB_LNA2; 1336 else 1337 /* Set alt to A+B or A-B */ 1338 div_ant_conf->alt_lna_conf = antcomb->main_conf; 1339 } 1340 break; 1341 default: 1342 break; 1343 } 1344 } 1345 1346 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf) 1347 { 1348 /* Adjust the fast_div_bias based on main and alt lna conf */ 1349 switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) { 1350 case (0x01): /* A-B LNA2 */ 1351 ant_conf->fast_div_bias = 0x3b; 1352 break; 1353 case (0x02): /* A-B LNA1 */ 1354 ant_conf->fast_div_bias = 0x3d; 1355 break; 1356 case (0x03): /* A-B A+B */ 1357 ant_conf->fast_div_bias = 0x1; 1358 break; 1359 case (0x10): /* LNA2 A-B */ 1360 ant_conf->fast_div_bias = 0x7; 1361 break; 1362 case (0x12): /* LNA2 LNA1 */ 1363 ant_conf->fast_div_bias = 0x2; 1364 break; 1365 case (0x13): /* LNA2 A+B */ 1366 ant_conf->fast_div_bias = 0x7; 1367 break; 1368 case (0x20): /* LNA1 A-B */ 1369 ant_conf->fast_div_bias = 0x6; 1370 break; 1371 case (0x21): /* LNA1 LNA2 */ 1372 ant_conf->fast_div_bias = 0x0; 1373 break; 1374 case (0x23): /* LNA1 A+B */ 1375 ant_conf->fast_div_bias = 0x6; 1376 break; 1377 case (0x30): /* A+B A-B */ 1378 ant_conf->fast_div_bias = 0x1; 1379 break; 1380 case (0x31): /* A+B LNA2 */ 1381 ant_conf->fast_div_bias = 0x3b; 1382 break; 1383 case (0x32): /* A+B LNA1 */ 1384 ant_conf->fast_div_bias = 0x3d; 1385 break; 1386 default: 1387 break; 1388 } 1389 } 1390 1391 /* Antenna diversity and combining */ 1392 static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 1393 { 1394 struct ath_hw_antcomb_conf div_ant_conf; 1395 struct ath_ant_comb *antcomb = &sc->ant_comb; 1396 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1397 int curr_main_set, curr_bias; 1398 int main_rssi = rs->rs_rssi_ctl0; 1399 int alt_rssi = rs->rs_rssi_ctl1; 1400 int rx_ant_conf, main_ant_conf; 1401 bool short_scan = false; 1402 1403 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 1404 ATH_ANT_RX_MASK; 1405 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1406 ATH_ANT_RX_MASK; 1407 1408 /* Record packet only when alt_rssi is positive */ 1409 if (alt_rssi > 0) { 1410 antcomb->total_pkt_count++; 1411 antcomb->main_total_rssi += main_rssi; 1412 antcomb->alt_total_rssi += alt_rssi; 1413 if (main_ant_conf == rx_ant_conf) 1414 antcomb->main_recv_cnt++; 1415 else 1416 antcomb->alt_recv_cnt++; 1417 } 1418 1419 /* Short scan check */ 1420 if (antcomb->scan && antcomb->alt_good) { 1421 if (time_after(jiffies, antcomb->scan_start_time + 1422 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 1423 short_scan = true; 1424 else 1425 if (antcomb->total_pkt_count == 1426 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { 1427 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1428 antcomb->total_pkt_count); 1429 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 1430 short_scan = true; 1431 } 1432 } 1433 1434 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 1435 rs->rs_moreaggr) && !short_scan) 1436 return; 1437 1438 if (antcomb->total_pkt_count) { 1439 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1440 antcomb->total_pkt_count); 1441 main_rssi_avg = (antcomb->main_total_rssi / 1442 antcomb->total_pkt_count); 1443 alt_rssi_avg = (antcomb->alt_total_rssi / 1444 antcomb->total_pkt_count); 1445 } 1446 1447 1448 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1449 curr_alt_set = div_ant_conf.alt_lna_conf; 1450 curr_main_set = div_ant_conf.main_lna_conf; 1451 curr_bias = div_ant_conf.fast_div_bias; 1452 1453 antcomb->count++; 1454 1455 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 1456 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1457 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 1458 main_rssi_avg); 1459 antcomb->alt_good = true; 1460 } else { 1461 antcomb->alt_good = false; 1462 } 1463 1464 antcomb->count = 0; 1465 antcomb->scan = true; 1466 antcomb->scan_not_start = true; 1467 } 1468 1469 if (!antcomb->scan) { 1470 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1471 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1472 /* Switch main and alt LNA */ 1473 div_ant_conf.main_lna_conf = 1474 ATH_ANT_DIV_COMB_LNA2; 1475 div_ant_conf.alt_lna_conf = 1476 ATH_ANT_DIV_COMB_LNA1; 1477 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { 1478 div_ant_conf.main_lna_conf = 1479 ATH_ANT_DIV_COMB_LNA1; 1480 div_ant_conf.alt_lna_conf = 1481 ATH_ANT_DIV_COMB_LNA2; 1482 } 1483 1484 goto div_comb_done; 1485 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && 1486 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { 1487 /* Set alt to another LNA */ 1488 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) 1489 div_ant_conf.alt_lna_conf = 1490 ATH_ANT_DIV_COMB_LNA1; 1491 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) 1492 div_ant_conf.alt_lna_conf = 1493 ATH_ANT_DIV_COMB_LNA2; 1494 1495 goto div_comb_done; 1496 } 1497 1498 if ((alt_rssi_avg < (main_rssi_avg + 1499 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA))) 1500 goto div_comb_done; 1501 } 1502 1503 if (!antcomb->scan_not_start) { 1504 switch (curr_alt_set) { 1505 case ATH_ANT_DIV_COMB_LNA2: 1506 antcomb->rssi_lna2 = alt_rssi_avg; 1507 antcomb->rssi_lna1 = main_rssi_avg; 1508 antcomb->scan = true; 1509 /* set to A+B */ 1510 div_ant_conf.main_lna_conf = 1511 ATH_ANT_DIV_COMB_LNA1; 1512 div_ant_conf.alt_lna_conf = 1513 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1514 break; 1515 case ATH_ANT_DIV_COMB_LNA1: 1516 antcomb->rssi_lna1 = alt_rssi_avg; 1517 antcomb->rssi_lna2 = main_rssi_avg; 1518 antcomb->scan = true; 1519 /* set to A+B */ 1520 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1521 div_ant_conf.alt_lna_conf = 1522 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1523 break; 1524 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: 1525 antcomb->rssi_add = alt_rssi_avg; 1526 antcomb->scan = true; 1527 /* set to A-B */ 1528 div_ant_conf.alt_lna_conf = 1529 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1530 break; 1531 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: 1532 antcomb->rssi_sub = alt_rssi_avg; 1533 antcomb->scan = false; 1534 if (antcomb->rssi_lna2 > 1535 (antcomb->rssi_lna1 + 1536 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 1537 /* use LNA2 as main LNA */ 1538 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 1539 (antcomb->rssi_add > antcomb->rssi_sub)) { 1540 /* set to A+B */ 1541 div_ant_conf.main_lna_conf = 1542 ATH_ANT_DIV_COMB_LNA2; 1543 div_ant_conf.alt_lna_conf = 1544 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1545 } else if (antcomb->rssi_sub > 1546 antcomb->rssi_lna1) { 1547 /* set to A-B */ 1548 div_ant_conf.main_lna_conf = 1549 ATH_ANT_DIV_COMB_LNA2; 1550 div_ant_conf.alt_lna_conf = 1551 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1552 } else { 1553 /* set to LNA1 */ 1554 div_ant_conf.main_lna_conf = 1555 ATH_ANT_DIV_COMB_LNA2; 1556 div_ant_conf.alt_lna_conf = 1557 ATH_ANT_DIV_COMB_LNA1; 1558 } 1559 } else { 1560 /* use LNA1 as main LNA */ 1561 if ((antcomb->rssi_add > antcomb->rssi_lna2) && 1562 (antcomb->rssi_add > antcomb->rssi_sub)) { 1563 /* set to A+B */ 1564 div_ant_conf.main_lna_conf = 1565 ATH_ANT_DIV_COMB_LNA1; 1566 div_ant_conf.alt_lna_conf = 1567 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1568 } else if (antcomb->rssi_sub > 1569 antcomb->rssi_lna1) { 1570 /* set to A-B */ 1571 div_ant_conf.main_lna_conf = 1572 ATH_ANT_DIV_COMB_LNA1; 1573 div_ant_conf.alt_lna_conf = 1574 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1575 } else { 1576 /* set to LNA2 */ 1577 div_ant_conf.main_lna_conf = 1578 ATH_ANT_DIV_COMB_LNA1; 1579 div_ant_conf.alt_lna_conf = 1580 ATH_ANT_DIV_COMB_LNA2; 1581 } 1582 } 1583 break; 1584 default: 1585 break; 1586 } 1587 } else { 1588 if (!antcomb->alt_good) { 1589 antcomb->scan_not_start = false; 1590 /* Set alt to another LNA */ 1591 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 1592 div_ant_conf.main_lna_conf = 1593 ATH_ANT_DIV_COMB_LNA2; 1594 div_ant_conf.alt_lna_conf = 1595 ATH_ANT_DIV_COMB_LNA1; 1596 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 1597 div_ant_conf.main_lna_conf = 1598 ATH_ANT_DIV_COMB_LNA1; 1599 div_ant_conf.alt_lna_conf = 1600 ATH_ANT_DIV_COMB_LNA2; 1601 } 1602 goto div_comb_done; 1603 } 1604 } 1605 1606 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, 1607 main_rssi_avg, alt_rssi_avg, 1608 alt_ratio); 1609 1610 antcomb->quick_scan_cnt++; 1611 1612 div_comb_done: 1613 ath_ant_div_conf_fast_divbias(&div_ant_conf); 1614 1615 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1616 1617 antcomb->scan_start_time = jiffies; 1618 antcomb->total_pkt_count = 0; 1619 antcomb->main_total_rssi = 0; 1620 antcomb->alt_total_rssi = 0; 1621 antcomb->main_recv_cnt = 0; 1622 antcomb->alt_recv_cnt = 0; 1623 } 1624 1625 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1626 { 1627 struct ath_buf *bf; 1628 struct sk_buff *skb = NULL, *requeue_skb; 1629 struct ieee80211_rx_status *rxs; 1630 struct ath_hw *ah = sc->sc_ah; 1631 struct ath_common *common = ath9k_hw_common(ah); 1632 /* 1633 * The hw can techncically differ from common->hw when using ath9k 1634 * virtual wiphy so to account for that we iterate over the active 1635 * wiphys and find the appropriate wiphy and therefore hw. 1636 */ 1637 struct ieee80211_hw *hw = NULL; 1638 struct ieee80211_hdr *hdr; 1639 int retval; 1640 bool decrypt_error = false; 1641 struct ath_rx_status rs; 1642 enum ath9k_rx_qtype qtype; 1643 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1644 int dma_type; 1645 u8 rx_status_len = ah->caps.rx_status_len; 1646 u64 tsf = 0; 1647 u32 tsf_lower = 0; 1648 unsigned long flags; 1649 1650 if (edma) 1651 dma_type = DMA_BIDIRECTIONAL; 1652 else 1653 dma_type = DMA_FROM_DEVICE; 1654 1655 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1656 spin_lock_bh(&sc->rx.rxbuflock); 1657 1658 tsf = ath9k_hw_gettsf64(ah); 1659 tsf_lower = tsf & 0xffffffff; 1660 1661 do { 1662 /* If handling rx interrupt and flush is in progress => exit */ 1663 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1664 break; 1665 1666 memset(&rs, 0, sizeof(rs)); 1667 if (edma) 1668 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1669 else 1670 bf = ath_get_next_rx_buf(sc, &rs); 1671 1672 if (!bf) 1673 break; 1674 1675 skb = bf->bf_mpdu; 1676 if (!skb) 1677 continue; 1678 1679 hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); 1680 rxs = IEEE80211_SKB_RXCB(skb); 1681 1682 hw = ath_get_virt_hw(sc, hdr); 1683 1684 ath_debug_stat_rx(sc, &rs); 1685 1686 /* 1687 * If we're asked to flush receive queue, directly 1688 * chain it back at the queue without processing it. 1689 */ 1690 if (flush) 1691 goto requeue; 1692 1693 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1694 rxs, &decrypt_error); 1695 if (retval) 1696 goto requeue; 1697 1698 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1699 if (rs.rs_tstamp > tsf_lower && 1700 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1701 rxs->mactime -= 0x100000000ULL; 1702 1703 if (rs.rs_tstamp < tsf_lower && 1704 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1705 rxs->mactime += 0x100000000ULL; 1706 1707 /* Ensure we always have an skb to requeue once we are done 1708 * processing the current buffer's skb */ 1709 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1710 1711 /* If there is no memory we ignore the current RX'd frame, 1712 * tell hardware it can give us a new frame using the old 1713 * skb and put it at the tail of the sc->rx.rxbuf list for 1714 * processing. */ 1715 if (!requeue_skb) 1716 goto requeue; 1717 1718 /* Unmap the frame */ 1719 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1720 common->rx_bufsize, 1721 dma_type); 1722 1723 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1724 if (ah->caps.rx_status_len) 1725 skb_pull(skb, ah->caps.rx_status_len); 1726 1727 ath9k_rx_skb_postprocess(common, skb, &rs, 1728 rxs, decrypt_error); 1729 1730 /* We will now give hardware our shiny new allocated skb */ 1731 bf->bf_mpdu = requeue_skb; 1732 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1733 common->rx_bufsize, 1734 dma_type); 1735 if (unlikely(dma_mapping_error(sc->dev, 1736 bf->bf_buf_addr))) { 1737 dev_kfree_skb_any(requeue_skb); 1738 bf->bf_mpdu = NULL; 1739 bf->bf_buf_addr = 0; 1740 ath_print(common, ATH_DBG_FATAL, 1741 "dma_mapping_error() on RX\n"); 1742 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1743 break; 1744 } 1745 1746 /* 1747 * change the default rx antenna if rx diversity chooses the 1748 * other antenna 3 times in a row. 1749 */ 1750 if (sc->rx.defant != rs.rs_antenna) { 1751 if (++sc->rx.rxotherant >= 3) 1752 ath_setdefantenna(sc, rs.rs_antenna); 1753 } else { 1754 sc->rx.rxotherant = 0; 1755 } 1756 1757 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1758 if (unlikely(ath9k_check_auto_sleep(sc) || 1759 (sc->ps_flags & (PS_WAIT_FOR_BEACON | 1760 PS_WAIT_FOR_CAB | 1761 PS_WAIT_FOR_PSPOLL_DATA)))) 1762 ath_rx_ps(sc, skb); 1763 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1764 1765 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 1766 ath_ant_comb_scan(sc, &rs); 1767 1768 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1769 1770 requeue: 1771 if (edma) { 1772 list_add_tail(&bf->list, &sc->rx.rxbuf); 1773 ath_rx_edma_buf_link(sc, qtype); 1774 } else { 1775 list_move_tail(&bf->list, &sc->rx.rxbuf); 1776 ath_rx_buf_link(sc, bf); 1777 } 1778 } while (1); 1779 1780 spin_unlock_bh(&sc->rx.rxbuflock); 1781 1782 return 0; 1783 } 1784