1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22 23 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 24 { 25 return sc->ps_enabled && 26 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 27 } 28 29 /* 30 * Setup and link descriptors. 31 * 32 * 11N: we can no longer afford to self link the last descriptor. 33 * MAC acknowledges BA status as long as it copies frames to host 34 * buffer (or rx fifo). This can incorrectly acknowledge packets 35 * to a sender if last desc is self-linked. 36 */ 37 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 38 { 39 struct ath_hw *ah = sc->sc_ah; 40 struct ath_common *common = ath9k_hw_common(ah); 41 struct ath_desc *ds; 42 struct sk_buff *skb; 43 44 ATH_RXBUF_RESET(bf); 45 46 ds = bf->bf_desc; 47 ds->ds_link = 0; /* link to null */ 48 ds->ds_data = bf->bf_buf_addr; 49 50 /* virtual addr of the beginning of the buffer. */ 51 skb = bf->bf_mpdu; 52 BUG_ON(skb == NULL); 53 ds->ds_vdata = skb->data; 54 55 /* 56 * setup rx descriptors. The rx_bufsize here tells the hardware 57 * how much data it can DMA to us and that we are prepared 58 * to process 59 */ 60 ath9k_hw_setuprxdesc(ah, ds, 61 common->rx_bufsize, 62 0); 63 64 if (sc->rx.rxlink == NULL) 65 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 66 else 67 *sc->rx.rxlink = bf->bf_daddr; 68 69 sc->rx.rxlink = &ds->ds_link; 70 } 71 72 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 73 { 74 /* XXX block beacon interrupts */ 75 ath9k_hw_setantenna(sc->sc_ah, antenna); 76 sc->rx.defant = antenna; 77 sc->rx.rxotherant = 0; 78 } 79 80 static void ath_opmode_init(struct ath_softc *sc) 81 { 82 struct ath_hw *ah = sc->sc_ah; 83 struct ath_common *common = ath9k_hw_common(ah); 84 85 u32 rfilt, mfilt[2]; 86 87 /* configure rx filter */ 88 rfilt = ath_calcrxfilter(sc); 89 ath9k_hw_setrxfilter(ah, rfilt); 90 91 /* configure bssid mask */ 92 ath_hw_setbssidmask(common); 93 94 /* configure operational mode */ 95 ath9k_hw_setopmode(ah); 96 97 /* calculate and install multicast filter */ 98 mfilt[0] = mfilt[1] = ~0; 99 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 100 } 101 102 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 103 enum ath9k_rx_qtype qtype) 104 { 105 struct ath_hw *ah = sc->sc_ah; 106 struct ath_rx_edma *rx_edma; 107 struct sk_buff *skb; 108 struct ath_buf *bf; 109 110 rx_edma = &sc->rx.rx_edma[qtype]; 111 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 112 return false; 113 114 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 115 list_del_init(&bf->list); 116 117 skb = bf->bf_mpdu; 118 119 ATH_RXBUF_RESET(bf); 120 memset(skb->data, 0, ah->caps.rx_status_len); 121 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 122 ah->caps.rx_status_len, DMA_TO_DEVICE); 123 124 SKB_CB_ATHBUF(skb) = bf; 125 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 126 skb_queue_tail(&rx_edma->rx_fifo, skb); 127 128 return true; 129 } 130 131 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 132 enum ath9k_rx_qtype qtype, int size) 133 { 134 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 135 struct ath_buf *bf, *tbf; 136 137 if (list_empty(&sc->rx.rxbuf)) { 138 ath_dbg(common, QUEUE, "No free rx buf available\n"); 139 return; 140 } 141 142 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 143 if (!ath_rx_edma_buf_link(sc, qtype)) 144 break; 145 146 } 147 148 static void ath_rx_remove_buffer(struct ath_softc *sc, 149 enum ath9k_rx_qtype qtype) 150 { 151 struct ath_buf *bf; 152 struct ath_rx_edma *rx_edma; 153 struct sk_buff *skb; 154 155 rx_edma = &sc->rx.rx_edma[qtype]; 156 157 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 158 bf = SKB_CB_ATHBUF(skb); 159 BUG_ON(!bf); 160 list_add_tail(&bf->list, &sc->rx.rxbuf); 161 } 162 } 163 164 static void ath_rx_edma_cleanup(struct ath_softc *sc) 165 { 166 struct ath_hw *ah = sc->sc_ah; 167 struct ath_common *common = ath9k_hw_common(ah); 168 struct ath_buf *bf; 169 170 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 171 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 172 173 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 174 if (bf->bf_mpdu) { 175 dma_unmap_single(sc->dev, bf->bf_buf_addr, 176 common->rx_bufsize, 177 DMA_BIDIRECTIONAL); 178 dev_kfree_skb_any(bf->bf_mpdu); 179 bf->bf_buf_addr = 0; 180 bf->bf_mpdu = NULL; 181 } 182 } 183 184 INIT_LIST_HEAD(&sc->rx.rxbuf); 185 186 kfree(sc->rx.rx_bufptr); 187 sc->rx.rx_bufptr = NULL; 188 } 189 190 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 191 { 192 skb_queue_head_init(&rx_edma->rx_fifo); 193 rx_edma->rx_fifo_hwsize = size; 194 } 195 196 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 197 { 198 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 199 struct ath_hw *ah = sc->sc_ah; 200 struct sk_buff *skb; 201 struct ath_buf *bf; 202 int error = 0, i; 203 u32 size; 204 205 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 206 ah->caps.rx_status_len); 207 208 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 209 ah->caps.rx_lp_qdepth); 210 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 211 ah->caps.rx_hp_qdepth); 212 213 size = sizeof(struct ath_buf) * nbufs; 214 bf = kzalloc(size, GFP_KERNEL); 215 if (!bf) 216 return -ENOMEM; 217 218 INIT_LIST_HEAD(&sc->rx.rxbuf); 219 sc->rx.rx_bufptr = bf; 220 221 for (i = 0; i < nbufs; i++, bf++) { 222 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 223 if (!skb) { 224 error = -ENOMEM; 225 goto rx_init_fail; 226 } 227 228 memset(skb->data, 0, common->rx_bufsize); 229 bf->bf_mpdu = skb; 230 231 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 232 common->rx_bufsize, 233 DMA_BIDIRECTIONAL); 234 if (unlikely(dma_mapping_error(sc->dev, 235 bf->bf_buf_addr))) { 236 dev_kfree_skb_any(skb); 237 bf->bf_mpdu = NULL; 238 bf->bf_buf_addr = 0; 239 ath_err(common, 240 "dma_mapping_error() on RX init\n"); 241 error = -ENOMEM; 242 goto rx_init_fail; 243 } 244 245 list_add_tail(&bf->list, &sc->rx.rxbuf); 246 } 247 248 return 0; 249 250 rx_init_fail: 251 ath_rx_edma_cleanup(sc); 252 return error; 253 } 254 255 static void ath_edma_start_recv(struct ath_softc *sc) 256 { 257 spin_lock_bh(&sc->rx.rxbuflock); 258 259 ath9k_hw_rxena(sc->sc_ah); 260 261 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 262 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 263 264 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 265 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 266 267 ath_opmode_init(sc); 268 269 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 270 271 spin_unlock_bh(&sc->rx.rxbuflock); 272 } 273 274 static void ath_edma_stop_recv(struct ath_softc *sc) 275 { 276 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 277 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 278 } 279 280 int ath_rx_init(struct ath_softc *sc, int nbufs) 281 { 282 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 283 struct sk_buff *skb; 284 struct ath_buf *bf; 285 int error = 0; 286 287 spin_lock_init(&sc->sc_pcu_lock); 288 spin_lock_init(&sc->rx.rxbuflock); 289 clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); 290 291 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 292 sc->sc_ah->caps.rx_status_len; 293 294 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 295 return ath_rx_edma_init(sc, nbufs); 296 } else { 297 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 298 common->cachelsz, common->rx_bufsize); 299 300 /* Initialize rx descriptors */ 301 302 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 303 "rx", nbufs, 1, 0); 304 if (error != 0) { 305 ath_err(common, 306 "failed to allocate rx descriptors: %d\n", 307 error); 308 goto err; 309 } 310 311 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 312 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 313 GFP_KERNEL); 314 if (skb == NULL) { 315 error = -ENOMEM; 316 goto err; 317 } 318 319 bf->bf_mpdu = skb; 320 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 321 common->rx_bufsize, 322 DMA_FROM_DEVICE); 323 if (unlikely(dma_mapping_error(sc->dev, 324 bf->bf_buf_addr))) { 325 dev_kfree_skb_any(skb); 326 bf->bf_mpdu = NULL; 327 bf->bf_buf_addr = 0; 328 ath_err(common, 329 "dma_mapping_error() on RX init\n"); 330 error = -ENOMEM; 331 goto err; 332 } 333 } 334 sc->rx.rxlink = NULL; 335 } 336 337 err: 338 if (error) 339 ath_rx_cleanup(sc); 340 341 return error; 342 } 343 344 void ath_rx_cleanup(struct ath_softc *sc) 345 { 346 struct ath_hw *ah = sc->sc_ah; 347 struct ath_common *common = ath9k_hw_common(ah); 348 struct sk_buff *skb; 349 struct ath_buf *bf; 350 351 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 352 ath_rx_edma_cleanup(sc); 353 return; 354 } else { 355 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 356 skb = bf->bf_mpdu; 357 if (skb) { 358 dma_unmap_single(sc->dev, bf->bf_buf_addr, 359 common->rx_bufsize, 360 DMA_FROM_DEVICE); 361 dev_kfree_skb(skb); 362 bf->bf_buf_addr = 0; 363 bf->bf_mpdu = NULL; 364 } 365 } 366 367 if (sc->rx.rxdma.dd_desc_len != 0) 368 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 369 } 370 } 371 372 /* 373 * Calculate the receive filter according to the 374 * operating mode and state: 375 * 376 * o always accept unicast, broadcast, and multicast traffic 377 * o maintain current state of phy error reception (the hal 378 * may enable phy error frames for noise immunity work) 379 * o probe request frames are accepted only when operating in 380 * hostap, adhoc, or monitor modes 381 * o enable promiscuous mode according to the interface state 382 * o accept beacons: 383 * - when operating in adhoc mode so the 802.11 layer creates 384 * node table entries for peers, 385 * - when operating in station mode for collecting rssi data when 386 * the station is otherwise quiet, or 387 * - when operating as a repeater so we see repeater-sta beacons 388 * - when scanning 389 */ 390 391 u32 ath_calcrxfilter(struct ath_softc *sc) 392 { 393 u32 rfilt; 394 395 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 396 | ATH9K_RX_FILTER_MCAST; 397 398 if (sc->rx.rxfilter & FIF_PROBE_REQ) 399 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 400 401 /* 402 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 403 * mode interface or when in monitor mode. AP mode does not need this 404 * since it receives all in-BSS frames anyway. 405 */ 406 if (sc->sc_ah->is_monitoring) 407 rfilt |= ATH9K_RX_FILTER_PROM; 408 409 if (sc->rx.rxfilter & FIF_CONTROL) 410 rfilt |= ATH9K_RX_FILTER_CONTROL; 411 412 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 413 (sc->nvifs <= 1) && 414 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 415 rfilt |= ATH9K_RX_FILTER_MYBEACON; 416 else 417 rfilt |= ATH9K_RX_FILTER_BEACON; 418 419 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 420 (sc->rx.rxfilter & FIF_PSPOLL)) 421 rfilt |= ATH9K_RX_FILTER_PSPOLL; 422 423 if (conf_is_ht(&sc->hw->conf)) 424 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 425 426 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 427 /* This is needed for older chips */ 428 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) 429 rfilt |= ATH9K_RX_FILTER_PROM; 430 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 431 } 432 433 if (AR_SREV_9550(sc->sc_ah)) 434 rfilt |= ATH9K_RX_FILTER_4ADDRESS; 435 436 return rfilt; 437 438 } 439 440 int ath_startrecv(struct ath_softc *sc) 441 { 442 struct ath_hw *ah = sc->sc_ah; 443 struct ath_buf *bf, *tbf; 444 445 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 446 ath_edma_start_recv(sc); 447 return 0; 448 } 449 450 spin_lock_bh(&sc->rx.rxbuflock); 451 if (list_empty(&sc->rx.rxbuf)) 452 goto start_recv; 453 454 sc->rx.rxlink = NULL; 455 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 456 ath_rx_buf_link(sc, bf); 457 } 458 459 /* We could have deleted elements so the list may be empty now */ 460 if (list_empty(&sc->rx.rxbuf)) 461 goto start_recv; 462 463 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 464 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 465 ath9k_hw_rxena(ah); 466 467 start_recv: 468 ath_opmode_init(sc); 469 ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 470 471 spin_unlock_bh(&sc->rx.rxbuflock); 472 473 return 0; 474 } 475 476 bool ath_stoprecv(struct ath_softc *sc) 477 { 478 struct ath_hw *ah = sc->sc_ah; 479 bool stopped, reset = false; 480 481 spin_lock_bh(&sc->rx.rxbuflock); 482 ath9k_hw_abortpcurecv(ah); 483 ath9k_hw_setrxfilter(ah, 0); 484 stopped = ath9k_hw_stopdmarecv(ah, &reset); 485 486 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 487 ath_edma_stop_recv(sc); 488 else 489 sc->rx.rxlink = NULL; 490 spin_unlock_bh(&sc->rx.rxbuflock); 491 492 if (!(ah->ah_flags & AH_UNPLUGGED) && 493 unlikely(!stopped)) { 494 ath_err(ath9k_hw_common(sc->sc_ah), 495 "Could not stop RX, we could be " 496 "confusing the DMA engine when we start RX up\n"); 497 ATH_DBG_WARN_ON_ONCE(!stopped); 498 } 499 return stopped && !reset; 500 } 501 502 void ath_flushrecv(struct ath_softc *sc) 503 { 504 set_bit(SC_OP_RXFLUSH, &sc->sc_flags); 505 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 506 ath_rx_tasklet(sc, 1, true); 507 ath_rx_tasklet(sc, 1, false); 508 clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); 509 } 510 511 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 512 { 513 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 514 struct ieee80211_mgmt *mgmt; 515 u8 *pos, *end, id, elen; 516 struct ieee80211_tim_ie *tim; 517 518 mgmt = (struct ieee80211_mgmt *)skb->data; 519 pos = mgmt->u.beacon.variable; 520 end = skb->data + skb->len; 521 522 while (pos + 2 < end) { 523 id = *pos++; 524 elen = *pos++; 525 if (pos + elen > end) 526 break; 527 528 if (id == WLAN_EID_TIM) { 529 if (elen < sizeof(*tim)) 530 break; 531 tim = (struct ieee80211_tim_ie *) pos; 532 if (tim->dtim_count != 0) 533 break; 534 return tim->bitmap_ctrl & 0x01; 535 } 536 537 pos += elen; 538 } 539 540 return false; 541 } 542 543 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 544 { 545 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 546 547 if (skb->len < 24 + 8 + 2 + 2) 548 return; 549 550 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 551 552 if (sc->ps_flags & PS_BEACON_SYNC) { 553 sc->ps_flags &= ~PS_BEACON_SYNC; 554 ath_dbg(common, PS, 555 "Reconfigure Beacon timers based on timestamp from the AP\n"); 556 ath9k_set_beacon(sc); 557 } 558 559 if (ath_beacon_dtim_pending_cab(skb)) { 560 /* 561 * Remain awake waiting for buffered broadcast/multicast 562 * frames. If the last broadcast/multicast frame is not 563 * received properly, the next beacon frame will work as 564 * a backup trigger for returning into NETWORK SLEEP state, 565 * so we are waiting for it as well. 566 */ 567 ath_dbg(common, PS, 568 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 569 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 570 return; 571 } 572 573 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 574 /* 575 * This can happen if a broadcast frame is dropped or the AP 576 * fails to send a frame indicating that all CAB frames have 577 * been delivered. 578 */ 579 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 580 ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 581 } 582 } 583 584 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 585 { 586 struct ieee80211_hdr *hdr; 587 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 588 589 hdr = (struct ieee80211_hdr *)skb->data; 590 591 /* Process Beacon and CAB receive in PS state */ 592 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 593 && mybeacon) { 594 ath_rx_ps_beacon(sc, skb); 595 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 596 (ieee80211_is_data(hdr->frame_control) || 597 ieee80211_is_action(hdr->frame_control)) && 598 is_multicast_ether_addr(hdr->addr1) && 599 !ieee80211_has_moredata(hdr->frame_control)) { 600 /* 601 * No more broadcast/multicast frames to be received at this 602 * point. 603 */ 604 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 605 ath_dbg(common, PS, 606 "All PS CAB frames received, back to sleep\n"); 607 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 608 !is_multicast_ether_addr(hdr->addr1) && 609 !ieee80211_has_morefrags(hdr->frame_control)) { 610 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 611 ath_dbg(common, PS, 612 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 613 sc->ps_flags & (PS_WAIT_FOR_BEACON | 614 PS_WAIT_FOR_CAB | 615 PS_WAIT_FOR_PSPOLL_DATA | 616 PS_WAIT_FOR_TX_ACK)); 617 } 618 } 619 620 static bool ath_edma_get_buffers(struct ath_softc *sc, 621 enum ath9k_rx_qtype qtype, 622 struct ath_rx_status *rs, 623 struct ath_buf **dest) 624 { 625 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 626 struct ath_hw *ah = sc->sc_ah; 627 struct ath_common *common = ath9k_hw_common(ah); 628 struct sk_buff *skb; 629 struct ath_buf *bf; 630 int ret; 631 632 skb = skb_peek(&rx_edma->rx_fifo); 633 if (!skb) 634 return false; 635 636 bf = SKB_CB_ATHBUF(skb); 637 BUG_ON(!bf); 638 639 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 640 common->rx_bufsize, DMA_FROM_DEVICE); 641 642 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 643 if (ret == -EINPROGRESS) { 644 /*let device gain the buffer again*/ 645 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 646 common->rx_bufsize, DMA_FROM_DEVICE); 647 return false; 648 } 649 650 __skb_unlink(skb, &rx_edma->rx_fifo); 651 if (ret == -EINVAL) { 652 /* corrupt descriptor, skip this one and the following one */ 653 list_add_tail(&bf->list, &sc->rx.rxbuf); 654 ath_rx_edma_buf_link(sc, qtype); 655 656 skb = skb_peek(&rx_edma->rx_fifo); 657 if (skb) { 658 bf = SKB_CB_ATHBUF(skb); 659 BUG_ON(!bf); 660 661 __skb_unlink(skb, &rx_edma->rx_fifo); 662 list_add_tail(&bf->list, &sc->rx.rxbuf); 663 ath_rx_edma_buf_link(sc, qtype); 664 } 665 666 bf = NULL; 667 } 668 669 *dest = bf; 670 return true; 671 } 672 673 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 674 struct ath_rx_status *rs, 675 enum ath9k_rx_qtype qtype) 676 { 677 struct ath_buf *bf = NULL; 678 679 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 680 if (!bf) 681 continue; 682 683 return bf; 684 } 685 return NULL; 686 } 687 688 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 689 struct ath_rx_status *rs) 690 { 691 struct ath_hw *ah = sc->sc_ah; 692 struct ath_common *common = ath9k_hw_common(ah); 693 struct ath_desc *ds; 694 struct ath_buf *bf; 695 int ret; 696 697 if (list_empty(&sc->rx.rxbuf)) { 698 sc->rx.rxlink = NULL; 699 return NULL; 700 } 701 702 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 703 ds = bf->bf_desc; 704 705 /* 706 * Must provide the virtual address of the current 707 * descriptor, the physical address, and the virtual 708 * address of the next descriptor in the h/w chain. 709 * This allows the HAL to look ahead to see if the 710 * hardware is done with a descriptor by checking the 711 * done bit in the following descriptor and the address 712 * of the current descriptor the DMA engine is working 713 * on. All this is necessary because of our use of 714 * a self-linked list to avoid rx overruns. 715 */ 716 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 717 if (ret == -EINPROGRESS) { 718 struct ath_rx_status trs; 719 struct ath_buf *tbf; 720 struct ath_desc *tds; 721 722 memset(&trs, 0, sizeof(trs)); 723 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 724 sc->rx.rxlink = NULL; 725 return NULL; 726 } 727 728 tbf = list_entry(bf->list.next, struct ath_buf, list); 729 730 /* 731 * On some hardware the descriptor status words could 732 * get corrupted, including the done bit. Because of 733 * this, check if the next descriptor's done bit is 734 * set or not. 735 * 736 * If the next descriptor's done bit is set, the current 737 * descriptor has been corrupted. Force s/w to discard 738 * this descriptor and continue... 739 */ 740 741 tds = tbf->bf_desc; 742 ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 743 if (ret == -EINPROGRESS) 744 return NULL; 745 } 746 747 if (!bf->bf_mpdu) 748 return bf; 749 750 /* 751 * Synchronize the DMA transfer with CPU before 752 * 1. accessing the frame 753 * 2. requeueing the same buffer to h/w 754 */ 755 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 756 common->rx_bufsize, 757 DMA_FROM_DEVICE); 758 759 return bf; 760 } 761 762 /* Assumes you've already done the endian to CPU conversion */ 763 static bool ath9k_rx_accept(struct ath_common *common, 764 struct ieee80211_hdr *hdr, 765 struct ieee80211_rx_status *rxs, 766 struct ath_rx_status *rx_stats, 767 bool *decrypt_error) 768 { 769 struct ath_softc *sc = (struct ath_softc *) common->priv; 770 bool is_mc, is_valid_tkip, strip_mic, mic_error; 771 struct ath_hw *ah = common->ah; 772 __le16 fc; 773 u8 rx_status_len = ah->caps.rx_status_len; 774 775 fc = hdr->frame_control; 776 777 is_mc = !!is_multicast_ether_addr(hdr->addr1); 778 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 779 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 780 strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 781 ieee80211_has_protected(fc) && 782 !(rx_stats->rs_status & 783 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 784 ATH9K_RXERR_KEYMISS)); 785 786 /* 787 * Key miss events are only relevant for pairwise keys where the 788 * descriptor does contain a valid key index. This has been observed 789 * mostly with CCMP encryption. 790 */ 791 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || 792 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) 793 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 794 795 if (!rx_stats->rs_datalen) { 796 RX_STAT_INC(rx_len_err); 797 return false; 798 } 799 800 /* 801 * rs_status follows rs_datalen so if rs_datalen is too large 802 * we can take a hint that hardware corrupted it, so ignore 803 * those frames. 804 */ 805 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { 806 RX_STAT_INC(rx_len_err); 807 return false; 808 } 809 810 /* Only use error bits from the last fragment */ 811 if (rx_stats->rs_more) 812 return true; 813 814 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 815 !ieee80211_has_morefrags(fc) && 816 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 817 (rx_stats->rs_status & ATH9K_RXERR_MIC); 818 819 /* 820 * The rx_stats->rs_status will not be set until the end of the 821 * chained descriptors so it can be ignored if rs_more is set. The 822 * rs_more will be false at the last element of the chained 823 * descriptors. 824 */ 825 if (rx_stats->rs_status != 0) { 826 u8 status_mask; 827 828 if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 829 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 830 mic_error = false; 831 } 832 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 833 return false; 834 835 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 836 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 837 *decrypt_error = true; 838 mic_error = false; 839 } 840 841 /* 842 * Reject error frames with the exception of 843 * decryption and MIC failures. For monitor mode, 844 * we also ignore the CRC error. 845 */ 846 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 847 ATH9K_RXERR_KEYMISS; 848 849 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 850 status_mask |= ATH9K_RXERR_CRC; 851 852 if (rx_stats->rs_status & ~status_mask) 853 return false; 854 } 855 856 /* 857 * For unicast frames the MIC error bit can have false positives, 858 * so all MIC error reports need to be validated in software. 859 * False negatives are not common, so skip software verification 860 * if the hardware considers the MIC valid. 861 */ 862 if (strip_mic) 863 rxs->flag |= RX_FLAG_MMIC_STRIPPED; 864 else if (is_mc && mic_error) 865 rxs->flag |= RX_FLAG_MMIC_ERROR; 866 867 return true; 868 } 869 870 static int ath9k_process_rate(struct ath_common *common, 871 struct ieee80211_hw *hw, 872 struct ath_rx_status *rx_stats, 873 struct ieee80211_rx_status *rxs) 874 { 875 struct ieee80211_supported_band *sband; 876 enum ieee80211_band band; 877 unsigned int i = 0; 878 struct ath_softc __maybe_unused *sc = common->priv; 879 880 band = hw->conf.channel->band; 881 sband = hw->wiphy->bands[band]; 882 883 if (rx_stats->rs_rate & 0x80) { 884 /* HT rate */ 885 rxs->flag |= RX_FLAG_HT; 886 if (rx_stats->rs_flags & ATH9K_RX_2040) 887 rxs->flag |= RX_FLAG_40MHZ; 888 if (rx_stats->rs_flags & ATH9K_RX_GI) 889 rxs->flag |= RX_FLAG_SHORT_GI; 890 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 891 return 0; 892 } 893 894 for (i = 0; i < sband->n_bitrates; i++) { 895 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 896 rxs->rate_idx = i; 897 return 0; 898 } 899 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 900 rxs->flag |= RX_FLAG_SHORTPRE; 901 rxs->rate_idx = i; 902 return 0; 903 } 904 } 905 906 /* 907 * No valid hardware bitrate found -- we should not get here 908 * because hardware has already validated this frame as OK. 909 */ 910 ath_dbg(common, ANY, 911 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 912 rx_stats->rs_rate); 913 RX_STAT_INC(rx_rate_err); 914 return -EINVAL; 915 } 916 917 static void ath9k_process_rssi(struct ath_common *common, 918 struct ieee80211_hw *hw, 919 struct ieee80211_hdr *hdr, 920 struct ath_rx_status *rx_stats) 921 { 922 struct ath_softc *sc = hw->priv; 923 struct ath_hw *ah = common->ah; 924 int last_rssi; 925 int rssi = rx_stats->rs_rssi; 926 927 if (!rx_stats->is_mybeacon || 928 ((ah->opmode != NL80211_IFTYPE_STATION) && 929 (ah->opmode != NL80211_IFTYPE_ADHOC))) 930 return; 931 932 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 933 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 934 935 last_rssi = sc->last_rssi; 936 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 937 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 938 if (rssi < 0) 939 rssi = 0; 940 941 /* Update Beacon RSSI, this is used by ANI. */ 942 ah->stats.avgbrssi = rssi; 943 } 944 945 /* 946 * For Decrypt or Demic errors, we only mark packet status here and always push 947 * up the frame up to let mac80211 handle the actual error case, be it no 948 * decryption key or real decryption error. This let us keep statistics there. 949 */ 950 static int ath9k_rx_skb_preprocess(struct ath_common *common, 951 struct ieee80211_hw *hw, 952 struct ieee80211_hdr *hdr, 953 struct ath_rx_status *rx_stats, 954 struct ieee80211_rx_status *rx_status, 955 bool *decrypt_error) 956 { 957 struct ath_hw *ah = common->ah; 958 959 /* 960 * everything but the rate is checked here, the rate check is done 961 * separately to avoid doing two lookups for a rate for each frame. 962 */ 963 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 964 return -EINVAL; 965 966 /* Only use status info from the last fragment */ 967 if (rx_stats->rs_more) 968 return 0; 969 970 ath9k_process_rssi(common, hw, hdr, rx_stats); 971 972 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 973 return -EINVAL; 974 975 rx_status->band = hw->conf.channel->band; 976 rx_status->freq = hw->conf.channel->center_freq; 977 rx_status->signal = ah->noise + rx_stats->rs_rssi; 978 rx_status->antenna = rx_stats->rs_antenna; 979 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 980 if (rx_stats->rs_moreaggr) 981 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 982 983 return 0; 984 } 985 986 static void ath9k_rx_skb_postprocess(struct ath_common *common, 987 struct sk_buff *skb, 988 struct ath_rx_status *rx_stats, 989 struct ieee80211_rx_status *rxs, 990 bool decrypt_error) 991 { 992 struct ath_hw *ah = common->ah; 993 struct ieee80211_hdr *hdr; 994 int hdrlen, padpos, padsize; 995 u8 keyix; 996 __le16 fc; 997 998 /* see if any padding is done by the hw and remove it */ 999 hdr = (struct ieee80211_hdr *) skb->data; 1000 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1001 fc = hdr->frame_control; 1002 padpos = ath9k_cmn_padpos(hdr->frame_control); 1003 1004 /* The MAC header is padded to have 32-bit boundary if the 1005 * packet payload is non-zero. The general calculation for 1006 * padsize would take into account odd header lengths: 1007 * padsize = (4 - padpos % 4) % 4; However, since only 1008 * even-length headers are used, padding can only be 0 or 2 1009 * bytes and we can optimize this a bit. In addition, we must 1010 * not try to remove padding from short control frames that do 1011 * not have payload. */ 1012 padsize = padpos & 3; 1013 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1014 memmove(skb->data + padsize, skb->data, padpos); 1015 skb_pull(skb, padsize); 1016 } 1017 1018 keyix = rx_stats->rs_keyix; 1019 1020 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1021 ieee80211_has_protected(fc)) { 1022 rxs->flag |= RX_FLAG_DECRYPTED; 1023 } else if (ieee80211_has_protected(fc) 1024 && !decrypt_error && skb->len >= hdrlen + 4) { 1025 keyix = skb->data[hdrlen + 3] >> 6; 1026 1027 if (test_bit(keyix, common->keymap)) 1028 rxs->flag |= RX_FLAG_DECRYPTED; 1029 } 1030 if (ah->sw_mgmt_crypto && 1031 (rxs->flag & RX_FLAG_DECRYPTED) && 1032 ieee80211_is_mgmt(fc)) 1033 /* Use software decrypt for management frames. */ 1034 rxs->flag &= ~RX_FLAG_DECRYPTED; 1035 } 1036 1037 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1038 { 1039 struct ath_buf *bf; 1040 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1041 struct ieee80211_rx_status *rxs; 1042 struct ath_hw *ah = sc->sc_ah; 1043 struct ath_common *common = ath9k_hw_common(ah); 1044 struct ieee80211_hw *hw = sc->hw; 1045 struct ieee80211_hdr *hdr; 1046 int retval; 1047 struct ath_rx_status rs; 1048 enum ath9k_rx_qtype qtype; 1049 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1050 int dma_type; 1051 u8 rx_status_len = ah->caps.rx_status_len; 1052 u64 tsf = 0; 1053 u32 tsf_lower = 0; 1054 unsigned long flags; 1055 1056 if (edma) 1057 dma_type = DMA_BIDIRECTIONAL; 1058 else 1059 dma_type = DMA_FROM_DEVICE; 1060 1061 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1062 spin_lock_bh(&sc->rx.rxbuflock); 1063 1064 tsf = ath9k_hw_gettsf64(ah); 1065 tsf_lower = tsf & 0xffffffff; 1066 1067 do { 1068 bool decrypt_error = false; 1069 /* If handling rx interrupt and flush is in progress => exit */ 1070 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) 1071 break; 1072 1073 memset(&rs, 0, sizeof(rs)); 1074 if (edma) 1075 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1076 else 1077 bf = ath_get_next_rx_buf(sc, &rs); 1078 1079 if (!bf) 1080 break; 1081 1082 skb = bf->bf_mpdu; 1083 if (!skb) 1084 continue; 1085 1086 /* 1087 * Take frame header from the first fragment and RX status from 1088 * the last one. 1089 */ 1090 if (sc->rx.frag) 1091 hdr_skb = sc->rx.frag; 1092 else 1093 hdr_skb = skb; 1094 1095 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1096 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1097 if (ieee80211_is_beacon(hdr->frame_control)) { 1098 RX_STAT_INC(rx_beacons); 1099 if (!is_zero_ether_addr(common->curbssid) && 1100 ether_addr_equal(hdr->addr3, common->curbssid)) 1101 rs.is_mybeacon = true; 1102 else 1103 rs.is_mybeacon = false; 1104 } 1105 else 1106 rs.is_mybeacon = false; 1107 1108 sc->rx.num_pkts++; 1109 ath_debug_stat_rx(sc, &rs); 1110 1111 /* 1112 * If we're asked to flush receive queue, directly 1113 * chain it back at the queue without processing it. 1114 */ 1115 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) { 1116 RX_STAT_INC(rx_drop_rxflush); 1117 goto requeue_drop_frag; 1118 } 1119 1120 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1121 1122 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1123 if (rs.rs_tstamp > tsf_lower && 1124 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1125 rxs->mactime -= 0x100000000ULL; 1126 1127 if (rs.rs_tstamp < tsf_lower && 1128 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1129 rxs->mactime += 0x100000000ULL; 1130 1131 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1132 rxs, &decrypt_error); 1133 if (retval) 1134 goto requeue_drop_frag; 1135 1136 if (rs.is_mybeacon) { 1137 sc->hw_busy_count = 0; 1138 ath_start_rx_poll(sc, 3); 1139 } 1140 /* Ensure we always have an skb to requeue once we are done 1141 * processing the current buffer's skb */ 1142 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1143 1144 /* If there is no memory we ignore the current RX'd frame, 1145 * tell hardware it can give us a new frame using the old 1146 * skb and put it at the tail of the sc->rx.rxbuf list for 1147 * processing. */ 1148 if (!requeue_skb) { 1149 RX_STAT_INC(rx_oom_err); 1150 goto requeue_drop_frag; 1151 } 1152 1153 /* Unmap the frame */ 1154 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1155 common->rx_bufsize, 1156 dma_type); 1157 1158 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1159 if (ah->caps.rx_status_len) 1160 skb_pull(skb, ah->caps.rx_status_len); 1161 1162 if (!rs.rs_more) 1163 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1164 rxs, decrypt_error); 1165 1166 /* We will now give hardware our shiny new allocated skb */ 1167 bf->bf_mpdu = requeue_skb; 1168 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1169 common->rx_bufsize, 1170 dma_type); 1171 if (unlikely(dma_mapping_error(sc->dev, 1172 bf->bf_buf_addr))) { 1173 dev_kfree_skb_any(requeue_skb); 1174 bf->bf_mpdu = NULL; 1175 bf->bf_buf_addr = 0; 1176 ath_err(common, "dma_mapping_error() on RX\n"); 1177 ieee80211_rx(hw, skb); 1178 break; 1179 } 1180 1181 if (rs.rs_more) { 1182 RX_STAT_INC(rx_frags); 1183 /* 1184 * rs_more indicates chained descriptors which can be 1185 * used to link buffers together for a sort of 1186 * scatter-gather operation. 1187 */ 1188 if (sc->rx.frag) { 1189 /* too many fragments - cannot handle frame */ 1190 dev_kfree_skb_any(sc->rx.frag); 1191 dev_kfree_skb_any(skb); 1192 RX_STAT_INC(rx_too_many_frags_err); 1193 skb = NULL; 1194 } 1195 sc->rx.frag = skb; 1196 goto requeue; 1197 } 1198 1199 if (sc->rx.frag) { 1200 int space = skb->len - skb_tailroom(hdr_skb); 1201 1202 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1203 dev_kfree_skb(skb); 1204 RX_STAT_INC(rx_oom_err); 1205 goto requeue_drop_frag; 1206 } 1207 1208 sc->rx.frag = NULL; 1209 1210 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1211 skb->len); 1212 dev_kfree_skb_any(skb); 1213 skb = hdr_skb; 1214 } 1215 1216 1217 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 1218 1219 /* 1220 * change the default rx antenna if rx diversity 1221 * chooses the other antenna 3 times in a row. 1222 */ 1223 if (sc->rx.defant != rs.rs_antenna) { 1224 if (++sc->rx.rxotherant >= 3) 1225 ath_setdefantenna(sc, rs.rs_antenna); 1226 } else { 1227 sc->rx.rxotherant = 0; 1228 } 1229 1230 } 1231 1232 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1233 skb_trim(skb, skb->len - 8); 1234 1235 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1236 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1237 PS_WAIT_FOR_CAB | 1238 PS_WAIT_FOR_PSPOLL_DATA)) || 1239 ath9k_check_auto_sleep(sc)) 1240 ath_rx_ps(sc, skb, rs.is_mybeacon); 1241 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1242 1243 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1244 ath_ant_comb_scan(sc, &rs); 1245 1246 ieee80211_rx(hw, skb); 1247 1248 requeue_drop_frag: 1249 if (sc->rx.frag) { 1250 dev_kfree_skb_any(sc->rx.frag); 1251 sc->rx.frag = NULL; 1252 } 1253 requeue: 1254 if (edma) { 1255 list_add_tail(&bf->list, &sc->rx.rxbuf); 1256 ath_rx_edma_buf_link(sc, qtype); 1257 } else { 1258 list_move_tail(&bf->list, &sc->rx.rxbuf); 1259 ath_rx_buf_link(sc, bf); 1260 if (!flush) 1261 ath9k_hw_rxena(ah); 1262 } 1263 } while (1); 1264 1265 spin_unlock_bh(&sc->rx.rxbuflock); 1266 1267 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1268 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 1269 ath9k_hw_set_interrupts(ah); 1270 } 1271 1272 return 0; 1273 } 1274