1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22 23 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 24 { 25 return sc->ps_enabled && 26 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 27 } 28 29 /* 30 * Setup and link descriptors. 31 * 32 * 11N: we can no longer afford to self link the last descriptor. 33 * MAC acknowledges BA status as long as it copies frames to host 34 * buffer (or rx fifo). This can incorrectly acknowledge packets 35 * to a sender if last desc is self-linked. 36 */ 37 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 38 { 39 struct ath_hw *ah = sc->sc_ah; 40 struct ath_common *common = ath9k_hw_common(ah); 41 struct ath_desc *ds; 42 struct sk_buff *skb; 43 44 ATH_RXBUF_RESET(bf); 45 46 ds = bf->bf_desc; 47 ds->ds_link = 0; /* link to null */ 48 ds->ds_data = bf->bf_buf_addr; 49 50 /* virtual addr of the beginning of the buffer. */ 51 skb = bf->bf_mpdu; 52 BUG_ON(skb == NULL); 53 ds->ds_vdata = skb->data; 54 55 /* 56 * setup rx descriptors. The rx_bufsize here tells the hardware 57 * how much data it can DMA to us and that we are prepared 58 * to process 59 */ 60 ath9k_hw_setuprxdesc(ah, ds, 61 common->rx_bufsize, 62 0); 63 64 if (sc->rx.rxlink == NULL) 65 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 66 else 67 *sc->rx.rxlink = bf->bf_daddr; 68 69 sc->rx.rxlink = &ds->ds_link; 70 } 71 72 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 73 { 74 /* XXX block beacon interrupts */ 75 ath9k_hw_setantenna(sc->sc_ah, antenna); 76 sc->rx.defant = antenna; 77 sc->rx.rxotherant = 0; 78 } 79 80 static void ath_opmode_init(struct ath_softc *sc) 81 { 82 struct ath_hw *ah = sc->sc_ah; 83 struct ath_common *common = ath9k_hw_common(ah); 84 85 u32 rfilt, mfilt[2]; 86 87 /* configure rx filter */ 88 rfilt = ath_calcrxfilter(sc); 89 ath9k_hw_setrxfilter(ah, rfilt); 90 91 /* configure bssid mask */ 92 ath_hw_setbssidmask(common); 93 94 /* configure operational mode */ 95 ath9k_hw_setopmode(ah); 96 97 /* calculate and install multicast filter */ 98 mfilt[0] = mfilt[1] = ~0; 99 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 100 } 101 102 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 103 enum ath9k_rx_qtype qtype) 104 { 105 struct ath_hw *ah = sc->sc_ah; 106 struct ath_rx_edma *rx_edma; 107 struct sk_buff *skb; 108 struct ath_buf *bf; 109 110 rx_edma = &sc->rx.rx_edma[qtype]; 111 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 112 return false; 113 114 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 115 list_del_init(&bf->list); 116 117 skb = bf->bf_mpdu; 118 119 ATH_RXBUF_RESET(bf); 120 memset(skb->data, 0, ah->caps.rx_status_len); 121 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 122 ah->caps.rx_status_len, DMA_TO_DEVICE); 123 124 SKB_CB_ATHBUF(skb) = bf; 125 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 126 skb_queue_tail(&rx_edma->rx_fifo, skb); 127 128 return true; 129 } 130 131 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 132 enum ath9k_rx_qtype qtype, int size) 133 { 134 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 135 struct ath_buf *bf, *tbf; 136 137 if (list_empty(&sc->rx.rxbuf)) { 138 ath_dbg(common, QUEUE, "No free rx buf available\n"); 139 return; 140 } 141 142 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 143 if (!ath_rx_edma_buf_link(sc, qtype)) 144 break; 145 146 } 147 148 static void ath_rx_remove_buffer(struct ath_softc *sc, 149 enum ath9k_rx_qtype qtype) 150 { 151 struct ath_buf *bf; 152 struct ath_rx_edma *rx_edma; 153 struct sk_buff *skb; 154 155 rx_edma = &sc->rx.rx_edma[qtype]; 156 157 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 158 bf = SKB_CB_ATHBUF(skb); 159 BUG_ON(!bf); 160 list_add_tail(&bf->list, &sc->rx.rxbuf); 161 } 162 } 163 164 static void ath_rx_edma_cleanup(struct ath_softc *sc) 165 { 166 struct ath_hw *ah = sc->sc_ah; 167 struct ath_common *common = ath9k_hw_common(ah); 168 struct ath_buf *bf; 169 170 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 171 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 172 173 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 174 if (bf->bf_mpdu) { 175 dma_unmap_single(sc->dev, bf->bf_buf_addr, 176 common->rx_bufsize, 177 DMA_BIDIRECTIONAL); 178 dev_kfree_skb_any(bf->bf_mpdu); 179 bf->bf_buf_addr = 0; 180 bf->bf_mpdu = NULL; 181 } 182 } 183 184 INIT_LIST_HEAD(&sc->rx.rxbuf); 185 186 kfree(sc->rx.rx_bufptr); 187 sc->rx.rx_bufptr = NULL; 188 } 189 190 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 191 { 192 skb_queue_head_init(&rx_edma->rx_fifo); 193 rx_edma->rx_fifo_hwsize = size; 194 } 195 196 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 197 { 198 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 199 struct ath_hw *ah = sc->sc_ah; 200 struct sk_buff *skb; 201 struct ath_buf *bf; 202 int error = 0, i; 203 u32 size; 204 205 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 206 ah->caps.rx_status_len); 207 208 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 209 ah->caps.rx_lp_qdepth); 210 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 211 ah->caps.rx_hp_qdepth); 212 213 size = sizeof(struct ath_buf) * nbufs; 214 bf = kzalloc(size, GFP_KERNEL); 215 if (!bf) 216 return -ENOMEM; 217 218 INIT_LIST_HEAD(&sc->rx.rxbuf); 219 sc->rx.rx_bufptr = bf; 220 221 for (i = 0; i < nbufs; i++, bf++) { 222 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 223 if (!skb) { 224 error = -ENOMEM; 225 goto rx_init_fail; 226 } 227 228 memset(skb->data, 0, common->rx_bufsize); 229 bf->bf_mpdu = skb; 230 231 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 232 common->rx_bufsize, 233 DMA_BIDIRECTIONAL); 234 if (unlikely(dma_mapping_error(sc->dev, 235 bf->bf_buf_addr))) { 236 dev_kfree_skb_any(skb); 237 bf->bf_mpdu = NULL; 238 bf->bf_buf_addr = 0; 239 ath_err(common, 240 "dma_mapping_error() on RX init\n"); 241 error = -ENOMEM; 242 goto rx_init_fail; 243 } 244 245 list_add_tail(&bf->list, &sc->rx.rxbuf); 246 } 247 248 return 0; 249 250 rx_init_fail: 251 ath_rx_edma_cleanup(sc); 252 return error; 253 } 254 255 static void ath_edma_start_recv(struct ath_softc *sc) 256 { 257 spin_lock_bh(&sc->rx.rxbuflock); 258 259 ath9k_hw_rxena(sc->sc_ah); 260 261 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 262 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 263 264 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 265 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 266 267 ath_opmode_init(sc); 268 269 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 270 271 spin_unlock_bh(&sc->rx.rxbuflock); 272 } 273 274 static void ath_edma_stop_recv(struct ath_softc *sc) 275 { 276 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 277 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 278 } 279 280 int ath_rx_init(struct ath_softc *sc, int nbufs) 281 { 282 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 283 struct sk_buff *skb; 284 struct ath_buf *bf; 285 int error = 0; 286 287 spin_lock_init(&sc->sc_pcu_lock); 288 spin_lock_init(&sc->rx.rxbuflock); 289 290 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 291 sc->sc_ah->caps.rx_status_len; 292 293 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 294 return ath_rx_edma_init(sc, nbufs); 295 } else { 296 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 297 common->cachelsz, common->rx_bufsize); 298 299 /* Initialize rx descriptors */ 300 301 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 302 "rx", nbufs, 1, 0); 303 if (error != 0) { 304 ath_err(common, 305 "failed to allocate rx descriptors: %d\n", 306 error); 307 goto err; 308 } 309 310 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 311 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 312 GFP_KERNEL); 313 if (skb == NULL) { 314 error = -ENOMEM; 315 goto err; 316 } 317 318 bf->bf_mpdu = skb; 319 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 320 common->rx_bufsize, 321 DMA_FROM_DEVICE); 322 if (unlikely(dma_mapping_error(sc->dev, 323 bf->bf_buf_addr))) { 324 dev_kfree_skb_any(skb); 325 bf->bf_mpdu = NULL; 326 bf->bf_buf_addr = 0; 327 ath_err(common, 328 "dma_mapping_error() on RX init\n"); 329 error = -ENOMEM; 330 goto err; 331 } 332 } 333 sc->rx.rxlink = NULL; 334 } 335 336 err: 337 if (error) 338 ath_rx_cleanup(sc); 339 340 return error; 341 } 342 343 void ath_rx_cleanup(struct ath_softc *sc) 344 { 345 struct ath_hw *ah = sc->sc_ah; 346 struct ath_common *common = ath9k_hw_common(ah); 347 struct sk_buff *skb; 348 struct ath_buf *bf; 349 350 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 351 ath_rx_edma_cleanup(sc); 352 return; 353 } else { 354 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 355 skb = bf->bf_mpdu; 356 if (skb) { 357 dma_unmap_single(sc->dev, bf->bf_buf_addr, 358 common->rx_bufsize, 359 DMA_FROM_DEVICE); 360 dev_kfree_skb(skb); 361 bf->bf_buf_addr = 0; 362 bf->bf_mpdu = NULL; 363 } 364 } 365 366 if (sc->rx.rxdma.dd_desc_len != 0) 367 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 368 } 369 } 370 371 /* 372 * Calculate the receive filter according to the 373 * operating mode and state: 374 * 375 * o always accept unicast, broadcast, and multicast traffic 376 * o maintain current state of phy error reception (the hal 377 * may enable phy error frames for noise immunity work) 378 * o probe request frames are accepted only when operating in 379 * hostap, adhoc, or monitor modes 380 * o enable promiscuous mode according to the interface state 381 * o accept beacons: 382 * - when operating in adhoc mode so the 802.11 layer creates 383 * node table entries for peers, 384 * - when operating in station mode for collecting rssi data when 385 * the station is otherwise quiet, or 386 * - when operating as a repeater so we see repeater-sta beacons 387 * - when scanning 388 */ 389 390 u32 ath_calcrxfilter(struct ath_softc *sc) 391 { 392 u32 rfilt; 393 394 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 395 | ATH9K_RX_FILTER_MCAST; 396 397 if (sc->rx.rxfilter & FIF_PROBE_REQ) 398 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 399 400 /* 401 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 402 * mode interface or when in monitor mode. AP mode does not need this 403 * since it receives all in-BSS frames anyway. 404 */ 405 if (sc->sc_ah->is_monitoring) 406 rfilt |= ATH9K_RX_FILTER_PROM; 407 408 if (sc->rx.rxfilter & FIF_CONTROL) 409 rfilt |= ATH9K_RX_FILTER_CONTROL; 410 411 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 412 (sc->nvifs <= 1) && 413 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 414 rfilt |= ATH9K_RX_FILTER_MYBEACON; 415 else 416 rfilt |= ATH9K_RX_FILTER_BEACON; 417 418 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 419 (sc->rx.rxfilter & FIF_PSPOLL)) 420 rfilt |= ATH9K_RX_FILTER_PSPOLL; 421 422 if (conf_is_ht(&sc->hw->conf)) 423 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 424 425 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 426 /* This is needed for older chips */ 427 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) 428 rfilt |= ATH9K_RX_FILTER_PROM; 429 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 430 } 431 432 if (AR_SREV_9550(sc->sc_ah)) 433 rfilt |= ATH9K_RX_FILTER_4ADDRESS; 434 435 return rfilt; 436 437 } 438 439 int ath_startrecv(struct ath_softc *sc) 440 { 441 struct ath_hw *ah = sc->sc_ah; 442 struct ath_buf *bf, *tbf; 443 444 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 445 ath_edma_start_recv(sc); 446 return 0; 447 } 448 449 spin_lock_bh(&sc->rx.rxbuflock); 450 if (list_empty(&sc->rx.rxbuf)) 451 goto start_recv; 452 453 sc->rx.rxlink = NULL; 454 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 455 ath_rx_buf_link(sc, bf); 456 } 457 458 /* We could have deleted elements so the list may be empty now */ 459 if (list_empty(&sc->rx.rxbuf)) 460 goto start_recv; 461 462 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 463 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 464 ath9k_hw_rxena(ah); 465 466 start_recv: 467 ath_opmode_init(sc); 468 ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 469 470 spin_unlock_bh(&sc->rx.rxbuflock); 471 472 return 0; 473 } 474 475 bool ath_stoprecv(struct ath_softc *sc) 476 { 477 struct ath_hw *ah = sc->sc_ah; 478 bool stopped, reset = false; 479 480 spin_lock_bh(&sc->rx.rxbuflock); 481 ath9k_hw_abortpcurecv(ah); 482 ath9k_hw_setrxfilter(ah, 0); 483 stopped = ath9k_hw_stopdmarecv(ah, &reset); 484 485 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 486 ath_edma_stop_recv(sc); 487 else 488 sc->rx.rxlink = NULL; 489 spin_unlock_bh(&sc->rx.rxbuflock); 490 491 if (!(ah->ah_flags & AH_UNPLUGGED) && 492 unlikely(!stopped)) { 493 ath_err(ath9k_hw_common(sc->sc_ah), 494 "Could not stop RX, we could be " 495 "confusing the DMA engine when we start RX up\n"); 496 ATH_DBG_WARN_ON_ONCE(!stopped); 497 } 498 return stopped && !reset; 499 } 500 501 void ath_flushrecv(struct ath_softc *sc) 502 { 503 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 504 ath_rx_tasklet(sc, 1, true); 505 ath_rx_tasklet(sc, 1, false); 506 } 507 508 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 509 { 510 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 511 struct ieee80211_mgmt *mgmt; 512 u8 *pos, *end, id, elen; 513 struct ieee80211_tim_ie *tim; 514 515 mgmt = (struct ieee80211_mgmt *)skb->data; 516 pos = mgmt->u.beacon.variable; 517 end = skb->data + skb->len; 518 519 while (pos + 2 < end) { 520 id = *pos++; 521 elen = *pos++; 522 if (pos + elen > end) 523 break; 524 525 if (id == WLAN_EID_TIM) { 526 if (elen < sizeof(*tim)) 527 break; 528 tim = (struct ieee80211_tim_ie *) pos; 529 if (tim->dtim_count != 0) 530 break; 531 return tim->bitmap_ctrl & 0x01; 532 } 533 534 pos += elen; 535 } 536 537 return false; 538 } 539 540 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 541 { 542 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 543 544 if (skb->len < 24 + 8 + 2 + 2) 545 return; 546 547 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 548 549 if (sc->ps_flags & PS_BEACON_SYNC) { 550 sc->ps_flags &= ~PS_BEACON_SYNC; 551 ath_dbg(common, PS, 552 "Reconfigure Beacon timers based on timestamp from the AP\n"); 553 ath9k_set_beacon(sc); 554 } 555 556 if (ath_beacon_dtim_pending_cab(skb)) { 557 /* 558 * Remain awake waiting for buffered broadcast/multicast 559 * frames. If the last broadcast/multicast frame is not 560 * received properly, the next beacon frame will work as 561 * a backup trigger for returning into NETWORK SLEEP state, 562 * so we are waiting for it as well. 563 */ 564 ath_dbg(common, PS, 565 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 566 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 567 return; 568 } 569 570 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 571 /* 572 * This can happen if a broadcast frame is dropped or the AP 573 * fails to send a frame indicating that all CAB frames have 574 * been delivered. 575 */ 576 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 577 ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 578 } 579 } 580 581 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 582 { 583 struct ieee80211_hdr *hdr; 584 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 585 586 hdr = (struct ieee80211_hdr *)skb->data; 587 588 /* Process Beacon and CAB receive in PS state */ 589 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 590 && mybeacon) { 591 ath_rx_ps_beacon(sc, skb); 592 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 593 (ieee80211_is_data(hdr->frame_control) || 594 ieee80211_is_action(hdr->frame_control)) && 595 is_multicast_ether_addr(hdr->addr1) && 596 !ieee80211_has_moredata(hdr->frame_control)) { 597 /* 598 * No more broadcast/multicast frames to be received at this 599 * point. 600 */ 601 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 602 ath_dbg(common, PS, 603 "All PS CAB frames received, back to sleep\n"); 604 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 605 !is_multicast_ether_addr(hdr->addr1) && 606 !ieee80211_has_morefrags(hdr->frame_control)) { 607 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 608 ath_dbg(common, PS, 609 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 610 sc->ps_flags & (PS_WAIT_FOR_BEACON | 611 PS_WAIT_FOR_CAB | 612 PS_WAIT_FOR_PSPOLL_DATA | 613 PS_WAIT_FOR_TX_ACK)); 614 } 615 } 616 617 static bool ath_edma_get_buffers(struct ath_softc *sc, 618 enum ath9k_rx_qtype qtype, 619 struct ath_rx_status *rs, 620 struct ath_buf **dest) 621 { 622 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 623 struct ath_hw *ah = sc->sc_ah; 624 struct ath_common *common = ath9k_hw_common(ah); 625 struct sk_buff *skb; 626 struct ath_buf *bf; 627 int ret; 628 629 skb = skb_peek(&rx_edma->rx_fifo); 630 if (!skb) 631 return false; 632 633 bf = SKB_CB_ATHBUF(skb); 634 BUG_ON(!bf); 635 636 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 637 common->rx_bufsize, DMA_FROM_DEVICE); 638 639 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 640 if (ret == -EINPROGRESS) { 641 /*let device gain the buffer again*/ 642 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 643 common->rx_bufsize, DMA_FROM_DEVICE); 644 return false; 645 } 646 647 __skb_unlink(skb, &rx_edma->rx_fifo); 648 if (ret == -EINVAL) { 649 /* corrupt descriptor, skip this one and the following one */ 650 list_add_tail(&bf->list, &sc->rx.rxbuf); 651 ath_rx_edma_buf_link(sc, qtype); 652 653 skb = skb_peek(&rx_edma->rx_fifo); 654 if (skb) { 655 bf = SKB_CB_ATHBUF(skb); 656 BUG_ON(!bf); 657 658 __skb_unlink(skb, &rx_edma->rx_fifo); 659 list_add_tail(&bf->list, &sc->rx.rxbuf); 660 ath_rx_edma_buf_link(sc, qtype); 661 } 662 663 bf = NULL; 664 } 665 666 *dest = bf; 667 return true; 668 } 669 670 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 671 struct ath_rx_status *rs, 672 enum ath9k_rx_qtype qtype) 673 { 674 struct ath_buf *bf = NULL; 675 676 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 677 if (!bf) 678 continue; 679 680 return bf; 681 } 682 return NULL; 683 } 684 685 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 686 struct ath_rx_status *rs) 687 { 688 struct ath_hw *ah = sc->sc_ah; 689 struct ath_common *common = ath9k_hw_common(ah); 690 struct ath_desc *ds; 691 struct ath_buf *bf; 692 int ret; 693 694 if (list_empty(&sc->rx.rxbuf)) { 695 sc->rx.rxlink = NULL; 696 return NULL; 697 } 698 699 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 700 ds = bf->bf_desc; 701 702 /* 703 * Must provide the virtual address of the current 704 * descriptor, the physical address, and the virtual 705 * address of the next descriptor in the h/w chain. 706 * This allows the HAL to look ahead to see if the 707 * hardware is done with a descriptor by checking the 708 * done bit in the following descriptor and the address 709 * of the current descriptor the DMA engine is working 710 * on. All this is necessary because of our use of 711 * a self-linked list to avoid rx overruns. 712 */ 713 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 714 if (ret == -EINPROGRESS) { 715 struct ath_rx_status trs; 716 struct ath_buf *tbf; 717 struct ath_desc *tds; 718 719 memset(&trs, 0, sizeof(trs)); 720 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 721 sc->rx.rxlink = NULL; 722 return NULL; 723 } 724 725 tbf = list_entry(bf->list.next, struct ath_buf, list); 726 727 /* 728 * On some hardware the descriptor status words could 729 * get corrupted, including the done bit. Because of 730 * this, check if the next descriptor's done bit is 731 * set or not. 732 * 733 * If the next descriptor's done bit is set, the current 734 * descriptor has been corrupted. Force s/w to discard 735 * this descriptor and continue... 736 */ 737 738 tds = tbf->bf_desc; 739 ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 740 if (ret == -EINPROGRESS) 741 return NULL; 742 } 743 744 list_del(&bf->list); 745 if (!bf->bf_mpdu) 746 return bf; 747 748 /* 749 * Synchronize the DMA transfer with CPU before 750 * 1. accessing the frame 751 * 2. requeueing the same buffer to h/w 752 */ 753 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 754 common->rx_bufsize, 755 DMA_FROM_DEVICE); 756 757 return bf; 758 } 759 760 /* Assumes you've already done the endian to CPU conversion */ 761 static bool ath9k_rx_accept(struct ath_common *common, 762 struct ieee80211_hdr *hdr, 763 struct ieee80211_rx_status *rxs, 764 struct ath_rx_status *rx_stats, 765 bool *decrypt_error) 766 { 767 struct ath_softc *sc = (struct ath_softc *) common->priv; 768 bool is_mc, is_valid_tkip, strip_mic, mic_error; 769 struct ath_hw *ah = common->ah; 770 __le16 fc; 771 u8 rx_status_len = ah->caps.rx_status_len; 772 773 fc = hdr->frame_control; 774 775 is_mc = !!is_multicast_ether_addr(hdr->addr1); 776 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 777 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 778 strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 779 ieee80211_has_protected(fc) && 780 !(rx_stats->rs_status & 781 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 782 ATH9K_RXERR_KEYMISS)); 783 784 /* 785 * Key miss events are only relevant for pairwise keys where the 786 * descriptor does contain a valid key index. This has been observed 787 * mostly with CCMP encryption. 788 */ 789 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || 790 !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) 791 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 792 793 if (!rx_stats->rs_datalen) { 794 RX_STAT_INC(rx_len_err); 795 return false; 796 } 797 798 /* 799 * rs_status follows rs_datalen so if rs_datalen is too large 800 * we can take a hint that hardware corrupted it, so ignore 801 * those frames. 802 */ 803 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { 804 RX_STAT_INC(rx_len_err); 805 return false; 806 } 807 808 /* Only use error bits from the last fragment */ 809 if (rx_stats->rs_more) 810 return true; 811 812 mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 813 !ieee80211_has_morefrags(fc) && 814 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 815 (rx_stats->rs_status & ATH9K_RXERR_MIC); 816 817 /* 818 * The rx_stats->rs_status will not be set until the end of the 819 * chained descriptors so it can be ignored if rs_more is set. The 820 * rs_more will be false at the last element of the chained 821 * descriptors. 822 */ 823 if (rx_stats->rs_status != 0) { 824 u8 status_mask; 825 826 if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 827 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 828 mic_error = false; 829 } 830 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 831 return false; 832 833 if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 834 (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 835 *decrypt_error = true; 836 mic_error = false; 837 } 838 839 /* 840 * Reject error frames with the exception of 841 * decryption and MIC failures. For monitor mode, 842 * we also ignore the CRC error. 843 */ 844 status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 845 ATH9K_RXERR_KEYMISS; 846 847 if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 848 status_mask |= ATH9K_RXERR_CRC; 849 850 if (rx_stats->rs_status & ~status_mask) 851 return false; 852 } 853 854 /* 855 * For unicast frames the MIC error bit can have false positives, 856 * so all MIC error reports need to be validated in software. 857 * False negatives are not common, so skip software verification 858 * if the hardware considers the MIC valid. 859 */ 860 if (strip_mic) 861 rxs->flag |= RX_FLAG_MMIC_STRIPPED; 862 else if (is_mc && mic_error) 863 rxs->flag |= RX_FLAG_MMIC_ERROR; 864 865 return true; 866 } 867 868 static int ath9k_process_rate(struct ath_common *common, 869 struct ieee80211_hw *hw, 870 struct ath_rx_status *rx_stats, 871 struct ieee80211_rx_status *rxs) 872 { 873 struct ieee80211_supported_band *sband; 874 enum ieee80211_band band; 875 unsigned int i = 0; 876 struct ath_softc __maybe_unused *sc = common->priv; 877 878 band = hw->conf.channel->band; 879 sband = hw->wiphy->bands[band]; 880 881 if (rx_stats->rs_rate & 0x80) { 882 /* HT rate */ 883 rxs->flag |= RX_FLAG_HT; 884 if (rx_stats->rs_flags & ATH9K_RX_2040) 885 rxs->flag |= RX_FLAG_40MHZ; 886 if (rx_stats->rs_flags & ATH9K_RX_GI) 887 rxs->flag |= RX_FLAG_SHORT_GI; 888 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 889 return 0; 890 } 891 892 for (i = 0; i < sband->n_bitrates; i++) { 893 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 894 rxs->rate_idx = i; 895 return 0; 896 } 897 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 898 rxs->flag |= RX_FLAG_SHORTPRE; 899 rxs->rate_idx = i; 900 return 0; 901 } 902 } 903 904 /* 905 * No valid hardware bitrate found -- we should not get here 906 * because hardware has already validated this frame as OK. 907 */ 908 ath_dbg(common, ANY, 909 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 910 rx_stats->rs_rate); 911 RX_STAT_INC(rx_rate_err); 912 return -EINVAL; 913 } 914 915 static void ath9k_process_rssi(struct ath_common *common, 916 struct ieee80211_hw *hw, 917 struct ieee80211_hdr *hdr, 918 struct ath_rx_status *rx_stats) 919 { 920 struct ath_softc *sc = hw->priv; 921 struct ath_hw *ah = common->ah; 922 int last_rssi; 923 int rssi = rx_stats->rs_rssi; 924 925 if (!rx_stats->is_mybeacon || 926 ((ah->opmode != NL80211_IFTYPE_STATION) && 927 (ah->opmode != NL80211_IFTYPE_ADHOC))) 928 return; 929 930 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 931 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 932 933 last_rssi = sc->last_rssi; 934 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 935 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 936 if (rssi < 0) 937 rssi = 0; 938 939 /* Update Beacon RSSI, this is used by ANI. */ 940 ah->stats.avgbrssi = rssi; 941 } 942 943 /* 944 * For Decrypt or Demic errors, we only mark packet status here and always push 945 * up the frame up to let mac80211 handle the actual error case, be it no 946 * decryption key or real decryption error. This let us keep statistics there. 947 */ 948 static int ath9k_rx_skb_preprocess(struct ath_common *common, 949 struct ieee80211_hw *hw, 950 struct ieee80211_hdr *hdr, 951 struct ath_rx_status *rx_stats, 952 struct ieee80211_rx_status *rx_status, 953 bool *decrypt_error) 954 { 955 struct ath_hw *ah = common->ah; 956 957 /* 958 * everything but the rate is checked here, the rate check is done 959 * separately to avoid doing two lookups for a rate for each frame. 960 */ 961 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 962 return -EINVAL; 963 964 /* Only use status info from the last fragment */ 965 if (rx_stats->rs_more) 966 return 0; 967 968 ath9k_process_rssi(common, hw, hdr, rx_stats); 969 970 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 971 return -EINVAL; 972 973 rx_status->band = hw->conf.channel->band; 974 rx_status->freq = hw->conf.channel->center_freq; 975 rx_status->signal = ah->noise + rx_stats->rs_rssi; 976 rx_status->antenna = rx_stats->rs_antenna; 977 rx_status->flag |= RX_FLAG_MACTIME_END; 978 if (rx_stats->rs_moreaggr) 979 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 980 981 return 0; 982 } 983 984 static void ath9k_rx_skb_postprocess(struct ath_common *common, 985 struct sk_buff *skb, 986 struct ath_rx_status *rx_stats, 987 struct ieee80211_rx_status *rxs, 988 bool decrypt_error) 989 { 990 struct ath_hw *ah = common->ah; 991 struct ieee80211_hdr *hdr; 992 int hdrlen, padpos, padsize; 993 u8 keyix; 994 __le16 fc; 995 996 /* see if any padding is done by the hw and remove it */ 997 hdr = (struct ieee80211_hdr *) skb->data; 998 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 999 fc = hdr->frame_control; 1000 padpos = ath9k_cmn_padpos(hdr->frame_control); 1001 1002 /* The MAC header is padded to have 32-bit boundary if the 1003 * packet payload is non-zero. The general calculation for 1004 * padsize would take into account odd header lengths: 1005 * padsize = (4 - padpos % 4) % 4; However, since only 1006 * even-length headers are used, padding can only be 0 or 2 1007 * bytes and we can optimize this a bit. In addition, we must 1008 * not try to remove padding from short control frames that do 1009 * not have payload. */ 1010 padsize = padpos & 3; 1011 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1012 memmove(skb->data + padsize, skb->data, padpos); 1013 skb_pull(skb, padsize); 1014 } 1015 1016 keyix = rx_stats->rs_keyix; 1017 1018 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1019 ieee80211_has_protected(fc)) { 1020 rxs->flag |= RX_FLAG_DECRYPTED; 1021 } else if (ieee80211_has_protected(fc) 1022 && !decrypt_error && skb->len >= hdrlen + 4) { 1023 keyix = skb->data[hdrlen + 3] >> 6; 1024 1025 if (test_bit(keyix, common->keymap)) 1026 rxs->flag |= RX_FLAG_DECRYPTED; 1027 } 1028 if (ah->sw_mgmt_crypto && 1029 (rxs->flag & RX_FLAG_DECRYPTED) && 1030 ieee80211_is_mgmt(fc)) 1031 /* Use software decrypt for management frames. */ 1032 rxs->flag &= ~RX_FLAG_DECRYPTED; 1033 } 1034 1035 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1036 { 1037 struct ath_buf *bf; 1038 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1039 struct ieee80211_rx_status *rxs; 1040 struct ath_hw *ah = sc->sc_ah; 1041 struct ath_common *common = ath9k_hw_common(ah); 1042 struct ieee80211_hw *hw = sc->hw; 1043 struct ieee80211_hdr *hdr; 1044 int retval; 1045 struct ath_rx_status rs; 1046 enum ath9k_rx_qtype qtype; 1047 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1048 int dma_type; 1049 u8 rx_status_len = ah->caps.rx_status_len; 1050 u64 tsf = 0; 1051 u32 tsf_lower = 0; 1052 unsigned long flags; 1053 1054 if (edma) 1055 dma_type = DMA_BIDIRECTIONAL; 1056 else 1057 dma_type = DMA_FROM_DEVICE; 1058 1059 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1060 spin_lock_bh(&sc->rx.rxbuflock); 1061 1062 tsf = ath9k_hw_gettsf64(ah); 1063 tsf_lower = tsf & 0xffffffff; 1064 1065 do { 1066 bool decrypt_error = false; 1067 1068 memset(&rs, 0, sizeof(rs)); 1069 if (edma) 1070 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1071 else 1072 bf = ath_get_next_rx_buf(sc, &rs); 1073 1074 if (!bf) 1075 break; 1076 1077 skb = bf->bf_mpdu; 1078 if (!skb) 1079 continue; 1080 1081 /* 1082 * Take frame header from the first fragment and RX status from 1083 * the last one. 1084 */ 1085 if (sc->rx.frag) 1086 hdr_skb = sc->rx.frag; 1087 else 1088 hdr_skb = skb; 1089 1090 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1091 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1092 if (ieee80211_is_beacon(hdr->frame_control)) { 1093 RX_STAT_INC(rx_beacons); 1094 if (!is_zero_ether_addr(common->curbssid) && 1095 ether_addr_equal(hdr->addr3, common->curbssid)) 1096 rs.is_mybeacon = true; 1097 else 1098 rs.is_mybeacon = false; 1099 } 1100 else 1101 rs.is_mybeacon = false; 1102 1103 if (ieee80211_is_data_present(hdr->frame_control) && 1104 !ieee80211_is_qos_nullfunc(hdr->frame_control)) 1105 sc->rx.num_pkts++; 1106 1107 ath_debug_stat_rx(sc, &rs); 1108 1109 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1110 1111 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1112 if (rs.rs_tstamp > tsf_lower && 1113 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1114 rxs->mactime -= 0x100000000ULL; 1115 1116 if (rs.rs_tstamp < tsf_lower && 1117 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1118 rxs->mactime += 0x100000000ULL; 1119 1120 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1121 rxs, &decrypt_error); 1122 if (retval) 1123 goto requeue_drop_frag; 1124 1125 if (rs.is_mybeacon) { 1126 sc->hw_busy_count = 0; 1127 ath_start_rx_poll(sc, 3); 1128 } 1129 /* Ensure we always have an skb to requeue once we are done 1130 * processing the current buffer's skb */ 1131 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1132 1133 /* If there is no memory we ignore the current RX'd frame, 1134 * tell hardware it can give us a new frame using the old 1135 * skb and put it at the tail of the sc->rx.rxbuf list for 1136 * processing. */ 1137 if (!requeue_skb) { 1138 RX_STAT_INC(rx_oom_err); 1139 goto requeue_drop_frag; 1140 } 1141 1142 /* Unmap the frame */ 1143 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1144 common->rx_bufsize, 1145 dma_type); 1146 1147 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1148 if (ah->caps.rx_status_len) 1149 skb_pull(skb, ah->caps.rx_status_len); 1150 1151 if (!rs.rs_more) 1152 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1153 rxs, decrypt_error); 1154 1155 /* We will now give hardware our shiny new allocated skb */ 1156 bf->bf_mpdu = requeue_skb; 1157 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1158 common->rx_bufsize, 1159 dma_type); 1160 if (unlikely(dma_mapping_error(sc->dev, 1161 bf->bf_buf_addr))) { 1162 dev_kfree_skb_any(requeue_skb); 1163 bf->bf_mpdu = NULL; 1164 bf->bf_buf_addr = 0; 1165 ath_err(common, "dma_mapping_error() on RX\n"); 1166 ieee80211_rx(hw, skb); 1167 break; 1168 } 1169 1170 if (rs.rs_more) { 1171 RX_STAT_INC(rx_frags); 1172 /* 1173 * rs_more indicates chained descriptors which can be 1174 * used to link buffers together for a sort of 1175 * scatter-gather operation. 1176 */ 1177 if (sc->rx.frag) { 1178 /* too many fragments - cannot handle frame */ 1179 dev_kfree_skb_any(sc->rx.frag); 1180 dev_kfree_skb_any(skb); 1181 RX_STAT_INC(rx_too_many_frags_err); 1182 skb = NULL; 1183 } 1184 sc->rx.frag = skb; 1185 goto requeue; 1186 } 1187 1188 if (sc->rx.frag) { 1189 int space = skb->len - skb_tailroom(hdr_skb); 1190 1191 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1192 dev_kfree_skb(skb); 1193 RX_STAT_INC(rx_oom_err); 1194 goto requeue_drop_frag; 1195 } 1196 1197 sc->rx.frag = NULL; 1198 1199 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1200 skb->len); 1201 dev_kfree_skb_any(skb); 1202 skb = hdr_skb; 1203 } 1204 1205 1206 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 1207 1208 /* 1209 * change the default rx antenna if rx diversity 1210 * chooses the other antenna 3 times in a row. 1211 */ 1212 if (sc->rx.defant != rs.rs_antenna) { 1213 if (++sc->rx.rxotherant >= 3) 1214 ath_setdefantenna(sc, rs.rs_antenna); 1215 } else { 1216 sc->rx.rxotherant = 0; 1217 } 1218 1219 } 1220 1221 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1222 skb_trim(skb, skb->len - 8); 1223 1224 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1225 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1226 PS_WAIT_FOR_CAB | 1227 PS_WAIT_FOR_PSPOLL_DATA)) || 1228 ath9k_check_auto_sleep(sc)) 1229 ath_rx_ps(sc, skb, rs.is_mybeacon); 1230 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1231 1232 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1233 ath_ant_comb_scan(sc, &rs); 1234 1235 ieee80211_rx(hw, skb); 1236 1237 requeue_drop_frag: 1238 if (sc->rx.frag) { 1239 dev_kfree_skb_any(sc->rx.frag); 1240 sc->rx.frag = NULL; 1241 } 1242 requeue: 1243 list_add_tail(&bf->list, &sc->rx.rxbuf); 1244 if (flush) 1245 continue; 1246 1247 if (edma) { 1248 ath_rx_edma_buf_link(sc, qtype); 1249 } else { 1250 ath_rx_buf_link(sc, bf); 1251 ath9k_hw_rxena(ah); 1252 } 1253 } while (1); 1254 1255 spin_unlock_bh(&sc->rx.rxbuflock); 1256 1257 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1258 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 1259 ath9k_hw_set_interrupts(ah); 1260 } 1261 1262 return 0; 1263 } 1264