1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb)) 22 23 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 24 { 25 return sc->ps_enabled && 26 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 27 } 28 29 /* 30 * Setup and link descriptors. 31 * 32 * 11N: we can no longer afford to self link the last descriptor. 33 * MAC acknowledges BA status as long as it copies frames to host 34 * buffer (or rx fifo). This can incorrectly acknowledge packets 35 * to a sender if last desc is self-linked. 36 */ 37 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf, 38 bool flush) 39 { 40 struct ath_hw *ah = sc->sc_ah; 41 struct ath_common *common = ath9k_hw_common(ah); 42 struct ath_desc *ds; 43 struct sk_buff *skb; 44 45 ds = bf->bf_desc; 46 ds->ds_link = 0; /* link to null */ 47 ds->ds_data = bf->bf_buf_addr; 48 49 /* virtual addr of the beginning of the buffer. */ 50 skb = bf->bf_mpdu; 51 BUG_ON(skb == NULL); 52 ds->ds_vdata = skb->data; 53 54 /* 55 * setup rx descriptors. The rx_bufsize here tells the hardware 56 * how much data it can DMA to us and that we are prepared 57 * to process 58 */ 59 ath9k_hw_setuprxdesc(ah, ds, 60 common->rx_bufsize, 61 0); 62 63 if (sc->rx.rxlink) 64 *sc->rx.rxlink = bf->bf_daddr; 65 else if (!flush) 66 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 67 68 sc->rx.rxlink = &ds->ds_link; 69 } 70 71 static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf, 72 bool flush) 73 { 74 if (sc->rx.buf_hold) 75 ath_rx_buf_link(sc, sc->rx.buf_hold, flush); 76 77 sc->rx.buf_hold = bf; 78 } 79 80 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 81 { 82 /* XXX block beacon interrupts */ 83 ath9k_hw_setantenna(sc->sc_ah, antenna); 84 sc->rx.defant = antenna; 85 sc->rx.rxotherant = 0; 86 } 87 88 static void ath_opmode_init(struct ath_softc *sc) 89 { 90 struct ath_hw *ah = sc->sc_ah; 91 struct ath_common *common = ath9k_hw_common(ah); 92 93 u32 rfilt, mfilt[2]; 94 95 /* configure rx filter */ 96 rfilt = ath_calcrxfilter(sc); 97 ath9k_hw_setrxfilter(ah, rfilt); 98 99 /* configure bssid mask */ 100 ath_hw_setbssidmask(common); 101 102 /* configure operational mode */ 103 ath9k_hw_setopmode(ah); 104 105 /* calculate and install multicast filter */ 106 mfilt[0] = mfilt[1] = ~0; 107 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 108 } 109 110 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 111 enum ath9k_rx_qtype qtype) 112 { 113 struct ath_hw *ah = sc->sc_ah; 114 struct ath_rx_edma *rx_edma; 115 struct sk_buff *skb; 116 struct ath_rxbuf *bf; 117 118 rx_edma = &sc->rx.rx_edma[qtype]; 119 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 120 return false; 121 122 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); 123 list_del_init(&bf->list); 124 125 skb = bf->bf_mpdu; 126 127 memset(skb->data, 0, ah->caps.rx_status_len); 128 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 129 ah->caps.rx_status_len, DMA_TO_DEVICE); 130 131 SKB_CB_ATHBUF(skb) = bf; 132 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 133 __skb_queue_tail(&rx_edma->rx_fifo, skb); 134 135 return true; 136 } 137 138 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 139 enum ath9k_rx_qtype qtype) 140 { 141 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 142 struct ath_rxbuf *bf, *tbf; 143 144 if (list_empty(&sc->rx.rxbuf)) { 145 ath_dbg(common, QUEUE, "No free rx buf available\n"); 146 return; 147 } 148 149 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 150 if (!ath_rx_edma_buf_link(sc, qtype)) 151 break; 152 153 } 154 155 static void ath_rx_remove_buffer(struct ath_softc *sc, 156 enum ath9k_rx_qtype qtype) 157 { 158 struct ath_rxbuf *bf; 159 struct ath_rx_edma *rx_edma; 160 struct sk_buff *skb; 161 162 rx_edma = &sc->rx.rx_edma[qtype]; 163 164 while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 165 bf = SKB_CB_ATHBUF(skb); 166 BUG_ON(!bf); 167 list_add_tail(&bf->list, &sc->rx.rxbuf); 168 } 169 } 170 171 static void ath_rx_edma_cleanup(struct ath_softc *sc) 172 { 173 struct ath_hw *ah = sc->sc_ah; 174 struct ath_common *common = ath9k_hw_common(ah); 175 struct ath_rxbuf *bf; 176 177 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 178 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 179 180 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 181 if (bf->bf_mpdu) { 182 dma_unmap_single(sc->dev, bf->bf_buf_addr, 183 common->rx_bufsize, 184 DMA_BIDIRECTIONAL); 185 dev_kfree_skb_any(bf->bf_mpdu); 186 bf->bf_buf_addr = 0; 187 bf->bf_mpdu = NULL; 188 } 189 } 190 } 191 192 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 193 { 194 __skb_queue_head_init(&rx_edma->rx_fifo); 195 rx_edma->rx_fifo_hwsize = size; 196 } 197 198 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 199 { 200 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 201 struct ath_hw *ah = sc->sc_ah; 202 struct sk_buff *skb; 203 struct ath_rxbuf *bf; 204 int error = 0, i; 205 u32 size; 206 207 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 208 ah->caps.rx_status_len); 209 210 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 211 ah->caps.rx_lp_qdepth); 212 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 213 ah->caps.rx_hp_qdepth); 214 215 size = sizeof(struct ath_rxbuf) * nbufs; 216 bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); 217 if (!bf) 218 return -ENOMEM; 219 220 INIT_LIST_HEAD(&sc->rx.rxbuf); 221 222 for (i = 0; i < nbufs; i++, bf++) { 223 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 224 if (!skb) { 225 error = -ENOMEM; 226 goto rx_init_fail; 227 } 228 229 memset(skb->data, 0, common->rx_bufsize); 230 bf->bf_mpdu = skb; 231 232 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 233 common->rx_bufsize, 234 DMA_BIDIRECTIONAL); 235 if (unlikely(dma_mapping_error(sc->dev, 236 bf->bf_buf_addr))) { 237 dev_kfree_skb_any(skb); 238 bf->bf_mpdu = NULL; 239 bf->bf_buf_addr = 0; 240 ath_err(common, 241 "dma_mapping_error() on RX init\n"); 242 error = -ENOMEM; 243 goto rx_init_fail; 244 } 245 246 list_add_tail(&bf->list, &sc->rx.rxbuf); 247 } 248 249 return 0; 250 251 rx_init_fail: 252 ath_rx_edma_cleanup(sc); 253 return error; 254 } 255 256 static void ath_edma_start_recv(struct ath_softc *sc) 257 { 258 ath9k_hw_rxena(sc->sc_ah); 259 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP); 260 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP); 261 ath_opmode_init(sc); 262 ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel); 263 } 264 265 static void ath_edma_stop_recv(struct ath_softc *sc) 266 { 267 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 268 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 269 } 270 271 int ath_rx_init(struct ath_softc *sc, int nbufs) 272 { 273 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 274 struct sk_buff *skb; 275 struct ath_rxbuf *bf; 276 int error = 0; 277 278 spin_lock_init(&sc->sc_pcu_lock); 279 280 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 281 sc->sc_ah->caps.rx_status_len; 282 283 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 284 return ath_rx_edma_init(sc, nbufs); 285 286 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 287 common->cachelsz, common->rx_bufsize); 288 289 /* Initialize rx descriptors */ 290 291 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 292 "rx", nbufs, 1, 0); 293 if (error != 0) { 294 ath_err(common, 295 "failed to allocate rx descriptors: %d\n", 296 error); 297 goto err; 298 } 299 300 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 301 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 302 GFP_KERNEL); 303 if (skb == NULL) { 304 error = -ENOMEM; 305 goto err; 306 } 307 308 bf->bf_mpdu = skb; 309 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 310 common->rx_bufsize, 311 DMA_FROM_DEVICE); 312 if (unlikely(dma_mapping_error(sc->dev, 313 bf->bf_buf_addr))) { 314 dev_kfree_skb_any(skb); 315 bf->bf_mpdu = NULL; 316 bf->bf_buf_addr = 0; 317 ath_err(common, 318 "dma_mapping_error() on RX init\n"); 319 error = -ENOMEM; 320 goto err; 321 } 322 } 323 sc->rx.rxlink = NULL; 324 err: 325 if (error) 326 ath_rx_cleanup(sc); 327 328 return error; 329 } 330 331 void ath_rx_cleanup(struct ath_softc *sc) 332 { 333 struct ath_hw *ah = sc->sc_ah; 334 struct ath_common *common = ath9k_hw_common(ah); 335 struct sk_buff *skb; 336 struct ath_rxbuf *bf; 337 338 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 339 ath_rx_edma_cleanup(sc); 340 return; 341 } 342 343 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 344 skb = bf->bf_mpdu; 345 if (skb) { 346 dma_unmap_single(sc->dev, bf->bf_buf_addr, 347 common->rx_bufsize, 348 DMA_FROM_DEVICE); 349 dev_kfree_skb(skb); 350 bf->bf_buf_addr = 0; 351 bf->bf_mpdu = NULL; 352 } 353 } 354 } 355 356 /* 357 * Calculate the receive filter according to the 358 * operating mode and state: 359 * 360 * o always accept unicast, broadcast, and multicast traffic 361 * o maintain current state of phy error reception (the hal 362 * may enable phy error frames for noise immunity work) 363 * o probe request frames are accepted only when operating in 364 * hostap, adhoc, or monitor modes 365 * o enable promiscuous mode according to the interface state 366 * o accept beacons: 367 * - when operating in adhoc mode so the 802.11 layer creates 368 * node table entries for peers, 369 * - when operating in station mode for collecting rssi data when 370 * the station is otherwise quiet, or 371 * - when operating as a repeater so we see repeater-sta beacons 372 * - when scanning 373 */ 374 375 u32 ath_calcrxfilter(struct ath_softc *sc) 376 { 377 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 378 u32 rfilt; 379 380 if (IS_ENABLED(CONFIG_ATH9K_TX99)) 381 return 0; 382 383 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 384 | ATH9K_RX_FILTER_MCAST; 385 386 /* if operating on a DFS channel, enable radar pulse detection */ 387 if (sc->hw->conf.radar_enabled) 388 rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR; 389 390 spin_lock_bh(&sc->chan_lock); 391 392 if (sc->cur_chan->rxfilter & FIF_PROBE_REQ) 393 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 394 395 if (sc->sc_ah->is_monitoring) 396 rfilt |= ATH9K_RX_FILTER_PROM; 397 398 if ((sc->cur_chan->rxfilter & FIF_CONTROL) || 399 sc->sc_ah->dynack.enabled) 400 rfilt |= ATH9K_RX_FILTER_CONTROL; 401 402 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 403 (sc->cur_chan->nvifs <= 1) && 404 !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC)) 405 rfilt |= ATH9K_RX_FILTER_MYBEACON; 406 else if (sc->sc_ah->opmode != NL80211_IFTYPE_OCB) 407 rfilt |= ATH9K_RX_FILTER_BEACON; 408 409 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 410 (sc->cur_chan->rxfilter & FIF_PSPOLL)) 411 rfilt |= ATH9K_RX_FILTER_PSPOLL; 412 413 if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT) 414 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 415 416 if (sc->cur_chan->nvifs > 1 || (sc->cur_chan->rxfilter & FIF_OTHER_BSS)) { 417 /* This is needed for older chips */ 418 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) 419 rfilt |= ATH9K_RX_FILTER_PROM; 420 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 421 } 422 423 if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) || 424 AR_SREV_9561(sc->sc_ah)) 425 rfilt |= ATH9K_RX_FILTER_4ADDRESS; 426 427 if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah)) 428 rfilt |= ATH9K_RX_FILTER_CONTROL_WRAPPER; 429 430 if (ath9k_is_chanctx_enabled() && 431 test_bit(ATH_OP_SCANNING, &common->op_flags)) 432 rfilt |= ATH9K_RX_FILTER_BEACON; 433 434 spin_unlock_bh(&sc->chan_lock); 435 436 return rfilt; 437 438 } 439 440 void ath_startrecv(struct ath_softc *sc) 441 { 442 struct ath_hw *ah = sc->sc_ah; 443 struct ath_rxbuf *bf, *tbf; 444 445 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 446 ath_edma_start_recv(sc); 447 return; 448 } 449 450 if (list_empty(&sc->rx.rxbuf)) 451 goto start_recv; 452 453 sc->rx.buf_hold = NULL; 454 sc->rx.rxlink = NULL; 455 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 456 ath_rx_buf_link(sc, bf, false); 457 } 458 459 /* We could have deleted elements so the list may be empty now */ 460 if (list_empty(&sc->rx.rxbuf)) 461 goto start_recv; 462 463 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); 464 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 465 ath9k_hw_rxena(ah); 466 467 start_recv: 468 ath_opmode_init(sc); 469 ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel); 470 } 471 472 static void ath_flushrecv(struct ath_softc *sc) 473 { 474 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 475 ath_rx_tasklet(sc, 1, true); 476 ath_rx_tasklet(sc, 1, false); 477 } 478 479 bool ath_stoprecv(struct ath_softc *sc) 480 { 481 struct ath_hw *ah = sc->sc_ah; 482 bool stopped, reset = false; 483 484 ath9k_hw_abortpcurecv(ah); 485 ath9k_hw_setrxfilter(ah, 0); 486 stopped = ath9k_hw_stopdmarecv(ah, &reset); 487 488 ath_flushrecv(sc); 489 490 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 491 ath_edma_stop_recv(sc); 492 else 493 sc->rx.rxlink = NULL; 494 495 if (!(ah->ah_flags & AH_UNPLUGGED) && 496 unlikely(!stopped)) { 497 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, 498 "Failed to stop Rx DMA\n"); 499 RESET_STAT_INC(sc, RESET_RX_DMA_ERROR); 500 } 501 return stopped && !reset; 502 } 503 504 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 505 { 506 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 507 struct ieee80211_mgmt *mgmt; 508 u8 *pos, *end, id, elen; 509 struct ieee80211_tim_ie *tim; 510 511 mgmt = (struct ieee80211_mgmt *)skb->data; 512 pos = mgmt->u.beacon.variable; 513 end = skb->data + skb->len; 514 515 while (pos + 2 < end) { 516 id = *pos++; 517 elen = *pos++; 518 if (pos + elen > end) 519 break; 520 521 if (id == WLAN_EID_TIM) { 522 if (elen < sizeof(*tim)) 523 break; 524 tim = (struct ieee80211_tim_ie *) pos; 525 if (tim->dtim_count != 0) 526 break; 527 return tim->bitmap_ctrl & 0x01; 528 } 529 530 pos += elen; 531 } 532 533 return false; 534 } 535 536 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 537 { 538 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 539 bool skip_beacon = false; 540 541 if (skb->len < 24 + 8 + 2 + 2) 542 return; 543 544 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 545 546 if (sc->ps_flags & PS_BEACON_SYNC) { 547 sc->ps_flags &= ~PS_BEACON_SYNC; 548 ath_dbg(common, PS, 549 "Reconfigure beacon timers based on synchronized timestamp\n"); 550 551 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 552 if (ath9k_is_chanctx_enabled()) { 553 if (sc->cur_chan == &sc->offchannel.chan) 554 skip_beacon = true; 555 } 556 #endif 557 558 if (!skip_beacon && 559 !(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0))) 560 ath9k_set_beacon(sc); 561 562 ath9k_p2p_beacon_sync(sc); 563 } 564 565 if (ath_beacon_dtim_pending_cab(skb)) { 566 /* 567 * Remain awake waiting for buffered broadcast/multicast 568 * frames. If the last broadcast/multicast frame is not 569 * received properly, the next beacon frame will work as 570 * a backup trigger for returning into NETWORK SLEEP state, 571 * so we are waiting for it as well. 572 */ 573 ath_dbg(common, PS, 574 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 575 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 576 return; 577 } 578 579 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 580 /* 581 * This can happen if a broadcast frame is dropped or the AP 582 * fails to send a frame indicating that all CAB frames have 583 * been delivered. 584 */ 585 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 586 ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 587 } 588 } 589 590 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 591 { 592 struct ieee80211_hdr *hdr; 593 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 594 595 hdr = (struct ieee80211_hdr *)skb->data; 596 597 /* Process Beacon and CAB receive in PS state */ 598 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 599 && mybeacon) { 600 ath_rx_ps_beacon(sc, skb); 601 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 602 (ieee80211_is_data(hdr->frame_control) || 603 ieee80211_is_action(hdr->frame_control)) && 604 is_multicast_ether_addr(hdr->addr1) && 605 !ieee80211_has_moredata(hdr->frame_control)) { 606 /* 607 * No more broadcast/multicast frames to be received at this 608 * point. 609 */ 610 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 611 ath_dbg(common, PS, 612 "All PS CAB frames received, back to sleep\n"); 613 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 614 !is_multicast_ether_addr(hdr->addr1) && 615 !ieee80211_has_morefrags(hdr->frame_control)) { 616 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 617 ath_dbg(common, PS, 618 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 619 sc->ps_flags & (PS_WAIT_FOR_BEACON | 620 PS_WAIT_FOR_CAB | 621 PS_WAIT_FOR_PSPOLL_DATA | 622 PS_WAIT_FOR_TX_ACK)); 623 } 624 } 625 626 static bool ath_edma_get_buffers(struct ath_softc *sc, 627 enum ath9k_rx_qtype qtype, 628 struct ath_rx_status *rs, 629 struct ath_rxbuf **dest) 630 { 631 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 632 struct ath_hw *ah = sc->sc_ah; 633 struct ath_common *common = ath9k_hw_common(ah); 634 struct sk_buff *skb; 635 struct ath_rxbuf *bf; 636 int ret; 637 638 skb = skb_peek(&rx_edma->rx_fifo); 639 if (!skb) 640 return false; 641 642 bf = SKB_CB_ATHBUF(skb); 643 BUG_ON(!bf); 644 645 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 646 common->rx_bufsize, DMA_FROM_DEVICE); 647 648 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 649 if (ret == -EINPROGRESS) { 650 /*let device gain the buffer again*/ 651 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 652 common->rx_bufsize, DMA_FROM_DEVICE); 653 return false; 654 } 655 656 __skb_unlink(skb, &rx_edma->rx_fifo); 657 if (ret == -EINVAL) { 658 /* corrupt descriptor, skip this one and the following one */ 659 list_add_tail(&bf->list, &sc->rx.rxbuf); 660 ath_rx_edma_buf_link(sc, qtype); 661 662 skb = skb_peek(&rx_edma->rx_fifo); 663 if (skb) { 664 bf = SKB_CB_ATHBUF(skb); 665 BUG_ON(!bf); 666 667 __skb_unlink(skb, &rx_edma->rx_fifo); 668 list_add_tail(&bf->list, &sc->rx.rxbuf); 669 ath_rx_edma_buf_link(sc, qtype); 670 } 671 672 bf = NULL; 673 } 674 675 *dest = bf; 676 return true; 677 } 678 679 static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 680 struct ath_rx_status *rs, 681 enum ath9k_rx_qtype qtype) 682 { 683 struct ath_rxbuf *bf = NULL; 684 685 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 686 if (!bf) 687 continue; 688 689 return bf; 690 } 691 return NULL; 692 } 693 694 static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, 695 struct ath_rx_status *rs) 696 { 697 struct ath_hw *ah = sc->sc_ah; 698 struct ath_common *common = ath9k_hw_common(ah); 699 struct ath_desc *ds; 700 struct ath_rxbuf *bf; 701 int ret; 702 703 if (list_empty(&sc->rx.rxbuf)) { 704 sc->rx.rxlink = NULL; 705 return NULL; 706 } 707 708 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); 709 if (bf == sc->rx.buf_hold) 710 return NULL; 711 712 ds = bf->bf_desc; 713 714 /* 715 * Must provide the virtual address of the current 716 * descriptor, the physical address, and the virtual 717 * address of the next descriptor in the h/w chain. 718 * This allows the HAL to look ahead to see if the 719 * hardware is done with a descriptor by checking the 720 * done bit in the following descriptor and the address 721 * of the current descriptor the DMA engine is working 722 * on. All this is necessary because of our use of 723 * a self-linked list to avoid rx overruns. 724 */ 725 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 726 if (ret == -EINPROGRESS) { 727 struct ath_rx_status trs; 728 struct ath_rxbuf *tbf; 729 struct ath_desc *tds; 730 731 memset(&trs, 0, sizeof(trs)); 732 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 733 sc->rx.rxlink = NULL; 734 return NULL; 735 } 736 737 tbf = list_entry(bf->list.next, struct ath_rxbuf, list); 738 739 /* 740 * On some hardware the descriptor status words could 741 * get corrupted, including the done bit. Because of 742 * this, check if the next descriptor's done bit is 743 * set or not. 744 * 745 * If the next descriptor's done bit is set, the current 746 * descriptor has been corrupted. Force s/w to discard 747 * this descriptor and continue... 748 */ 749 750 tds = tbf->bf_desc; 751 ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 752 if (ret == -EINPROGRESS) 753 return NULL; 754 755 /* 756 * Re-check previous descriptor, in case it has been filled 757 * in the mean time. 758 */ 759 ret = ath9k_hw_rxprocdesc(ah, ds, rs); 760 if (ret == -EINPROGRESS) { 761 /* 762 * mark descriptor as zero-length and set the 'more' 763 * flag to ensure that both buffers get discarded 764 */ 765 rs->rs_datalen = 0; 766 rs->rs_more = true; 767 } 768 } 769 770 list_del(&bf->list); 771 if (!bf->bf_mpdu) 772 return bf; 773 774 /* 775 * Synchronize the DMA transfer with CPU before 776 * 1. accessing the frame 777 * 2. requeueing the same buffer to h/w 778 */ 779 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 780 common->rx_bufsize, 781 DMA_FROM_DEVICE); 782 783 return bf; 784 } 785 786 static void ath9k_process_tsf(struct ath_rx_status *rs, 787 struct ieee80211_rx_status *rxs, 788 u64 tsf) 789 { 790 u32 tsf_lower = tsf & 0xffffffff; 791 792 rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp; 793 if (rs->rs_tstamp > tsf_lower && 794 unlikely(rs->rs_tstamp - tsf_lower > 0x10000000)) 795 rxs->mactime -= 0x100000000ULL; 796 797 if (rs->rs_tstamp < tsf_lower && 798 unlikely(tsf_lower - rs->rs_tstamp > 0x10000000)) 799 rxs->mactime += 0x100000000ULL; 800 } 801 802 /* 803 * For Decrypt or Demic errors, we only mark packet status here and always push 804 * up the frame up to let mac80211 handle the actual error case, be it no 805 * decryption key or real decryption error. This let us keep statistics there. 806 */ 807 static int ath9k_rx_skb_preprocess(struct ath_softc *sc, 808 struct sk_buff *skb, 809 struct ath_rx_status *rx_stats, 810 struct ieee80211_rx_status *rx_status, 811 bool *decrypt_error, u64 tsf) 812 { 813 struct ieee80211_hw *hw = sc->hw; 814 struct ath_hw *ah = sc->sc_ah; 815 struct ath_common *common = ath9k_hw_common(ah); 816 struct ieee80211_hdr *hdr; 817 bool discard_current = sc->rx.discard_next; 818 819 /* 820 * Discard corrupt descriptors which are marked in 821 * ath_get_next_rx_buf(). 822 */ 823 if (discard_current) 824 goto corrupt; 825 826 sc->rx.discard_next = false; 827 828 /* 829 * Discard zero-length packets and packets smaller than an ACK 830 */ 831 if (rx_stats->rs_datalen < 10) { 832 RX_STAT_INC(sc, rx_len_err); 833 goto corrupt; 834 } 835 836 /* 837 * rs_status follows rs_datalen so if rs_datalen is too large 838 * we can take a hint that hardware corrupted it, so ignore 839 * those frames. 840 */ 841 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { 842 RX_STAT_INC(sc, rx_len_err); 843 goto corrupt; 844 } 845 846 /* Only use status info from the last fragment */ 847 if (rx_stats->rs_more) 848 return 0; 849 850 /* 851 * Return immediately if the RX descriptor has been marked 852 * as corrupt based on the various error bits. 853 * 854 * This is different from the other corrupt descriptor 855 * condition handled above. 856 */ 857 if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) 858 goto corrupt; 859 860 hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); 861 862 ath9k_process_tsf(rx_stats, rx_status, tsf); 863 ath_debug_stat_rx(sc, rx_stats); 864 865 /* 866 * Process PHY errors and return so that the packet 867 * can be dropped. 868 */ 869 if (rx_stats->rs_status & ATH9K_RXERR_PHY) { 870 /* 871 * DFS and spectral are mutually exclusive 872 * 873 * Since some chips use PHYERR_RADAR as indication for both, we 874 * need to double check which feature is enabled to prevent 875 * feeding spectral or dfs-detector with wrong frames. 876 */ 877 if (hw->conf.radar_enabled) { 878 ath9k_dfs_process_phyerr(sc, hdr, rx_stats, 879 rx_status->mactime); 880 } else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED && 881 ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, 882 rx_status->mactime)) { 883 RX_STAT_INC(sc, rx_spectral); 884 } 885 return -EINVAL; 886 } 887 888 /* 889 * everything but the rate is checked here, the rate check is done 890 * separately to avoid doing two lookups for a rate for each frame. 891 */ 892 spin_lock_bh(&sc->chan_lock); 893 if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, 894 sc->cur_chan->rxfilter)) { 895 spin_unlock_bh(&sc->chan_lock); 896 return -EINVAL; 897 } 898 spin_unlock_bh(&sc->chan_lock); 899 900 if (ath_is_mybeacon(common, hdr)) { 901 RX_STAT_INC(sc, rx_beacons); 902 rx_stats->is_mybeacon = true; 903 } 904 905 /* 906 * This shouldn't happen, but have a safety check anyway. 907 */ 908 if (WARN_ON(!ah->curchan)) 909 return -EINVAL; 910 911 if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) { 912 /* 913 * No valid hardware bitrate found -- we should not get here 914 * because hardware has already validated this frame as OK. 915 */ 916 ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 917 rx_stats->rs_rate); 918 RX_STAT_INC(sc, rx_rate_err); 919 return -EINVAL; 920 } 921 922 if (ath9k_is_chanctx_enabled()) { 923 if (rx_stats->is_mybeacon) 924 ath_chanctx_beacon_recv_ev(sc, 925 ATH_CHANCTX_EVENT_BEACON_RECEIVED); 926 } 927 928 ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status); 929 930 rx_status->band = ah->curchan->chan->band; 931 rx_status->freq = ah->curchan->chan->center_freq; 932 rx_status->antenna = rx_stats->rs_antenna; 933 rx_status->flag |= RX_FLAG_MACTIME_END; 934 935 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 936 if (ieee80211_is_data_present(hdr->frame_control) && 937 !ieee80211_is_qos_nullfunc(hdr->frame_control)) 938 sc->rx.num_pkts++; 939 #endif 940 941 return 0; 942 943 corrupt: 944 sc->rx.discard_next = rx_stats->rs_more; 945 return -EINVAL; 946 } 947 948 /* 949 * Run the LNA combining algorithm only in these cases: 950 * 951 * Standalone WLAN cards with both LNA/Antenna diversity 952 * enabled in the EEPROM. 953 * 954 * WLAN+BT cards which are in the supported card list 955 * in ath_pci_id_table and the user has loaded the 956 * driver with "bt_ant_diversity" set to true. 957 */ 958 static void ath9k_antenna_check(struct ath_softc *sc, 959 struct ath_rx_status *rs) 960 { 961 struct ath_hw *ah = sc->sc_ah; 962 struct ath9k_hw_capabilities *pCap = &ah->caps; 963 struct ath_common *common = ath9k_hw_common(ah); 964 965 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) 966 return; 967 968 /* 969 * Change the default rx antenna if rx diversity 970 * chooses the other antenna 3 times in a row. 971 */ 972 if (sc->rx.defant != rs->rs_antenna) { 973 if (++sc->rx.rxotherant >= 3) 974 ath_setdefantenna(sc, rs->rs_antenna); 975 } else { 976 sc->rx.rxotherant = 0; 977 } 978 979 if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { 980 if (common->bt_ant_diversity) 981 ath_ant_comb_scan(sc, rs); 982 } else { 983 ath_ant_comb_scan(sc, rs); 984 } 985 } 986 987 static void ath9k_apply_ampdu_details(struct ath_softc *sc, 988 struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) 989 { 990 if (rs->rs_isaggr) { 991 rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 992 993 rxs->ampdu_reference = sc->rx.ampdu_ref; 994 995 if (!rs->rs_moreaggr) { 996 rxs->flag |= RX_FLAG_AMPDU_IS_LAST; 997 sc->rx.ampdu_ref++; 998 } 999 1000 if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE) 1001 rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR; 1002 } 1003 } 1004 1005 static void ath_rx_count_airtime(struct ath_softc *sc, 1006 struct ath_rx_status *rs, 1007 struct sk_buff *skb) 1008 { 1009 struct ath_node *an; 1010 struct ath_acq *acq; 1011 struct ath_vif *avp; 1012 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1013 struct ath_hw *ah = sc->sc_ah; 1014 struct ath_common *common = ath9k_hw_common(ah); 1015 struct ieee80211_sta *sta; 1016 struct ieee80211_rx_status *rxs; 1017 const struct ieee80211_rate *rate; 1018 bool is_sgi, is_40, is_sp; 1019 int phy; 1020 u16 len = rs->rs_datalen; 1021 u32 airtime = 0; 1022 u8 tidno, acno; 1023 1024 if (!ieee80211_is_data(hdr->frame_control)) 1025 return; 1026 1027 rcu_read_lock(); 1028 1029 sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL); 1030 if (!sta) 1031 goto exit; 1032 an = (struct ath_node *) sta->drv_priv; 1033 avp = (struct ath_vif *) an->vif->drv_priv; 1034 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1035 acno = TID_TO_WME_AC(tidno); 1036 acq = &avp->chanctx->acq[acno]; 1037 1038 rxs = IEEE80211_SKB_RXCB(skb); 1039 1040 is_sgi = !!(rxs->enc_flags & RX_ENC_FLAG_SHORT_GI); 1041 is_40 = !!(rxs->bw == RATE_INFO_BW_40); 1042 is_sp = !!(rxs->enc_flags & RX_ENC_FLAG_SHORTPRE); 1043 1044 if (!!(rxs->encoding == RX_ENC_HT)) { 1045 /* MCS rates */ 1046 1047 airtime += ath_pkt_duration(sc, rxs->rate_idx, len, 1048 is_40, is_sgi, is_sp); 1049 } else { 1050 1051 phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM; 1052 rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx]; 1053 airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100, 1054 len, rxs->rate_idx, is_sp); 1055 } 1056 1057 if (!!(sc->airtime_flags & AIRTIME_USE_RX)) { 1058 spin_lock_bh(&acq->lock); 1059 an->airtime_deficit[acno] -= airtime; 1060 if (an->airtime_deficit[acno] <= 0) 1061 __ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno)); 1062 spin_unlock_bh(&acq->lock); 1063 } 1064 ath_debug_airtime(sc, an, airtime, 0); 1065 exit: 1066 rcu_read_unlock(); 1067 } 1068 1069 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1070 { 1071 struct ath_rxbuf *bf; 1072 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1073 struct ieee80211_rx_status *rxs; 1074 struct ath_hw *ah = sc->sc_ah; 1075 struct ath_common *common = ath9k_hw_common(ah); 1076 struct ieee80211_hw *hw = sc->hw; 1077 int retval; 1078 struct ath_rx_status rs; 1079 enum ath9k_rx_qtype qtype; 1080 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1081 int dma_type; 1082 u64 tsf = 0; 1083 unsigned long flags; 1084 dma_addr_t new_buf_addr; 1085 unsigned int budget = 512; 1086 struct ieee80211_hdr *hdr; 1087 1088 if (edma) 1089 dma_type = DMA_BIDIRECTIONAL; 1090 else 1091 dma_type = DMA_FROM_DEVICE; 1092 1093 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1094 1095 tsf = ath9k_hw_gettsf64(ah); 1096 1097 do { 1098 bool decrypt_error = false; 1099 1100 memset(&rs, 0, sizeof(rs)); 1101 if (edma) 1102 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1103 else 1104 bf = ath_get_next_rx_buf(sc, &rs); 1105 1106 if (!bf) 1107 break; 1108 1109 skb = bf->bf_mpdu; 1110 if (!skb) 1111 continue; 1112 1113 /* 1114 * Take frame header from the first fragment and RX status from 1115 * the last one. 1116 */ 1117 if (sc->rx.frag) 1118 hdr_skb = sc->rx.frag; 1119 else 1120 hdr_skb = skb; 1121 1122 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1123 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1124 1125 retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs, 1126 &decrypt_error, tsf); 1127 if (retval) 1128 goto requeue_drop_frag; 1129 1130 /* Ensure we always have an skb to requeue once we are done 1131 * processing the current buffer's skb */ 1132 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1133 1134 /* If there is no memory we ignore the current RX'd frame, 1135 * tell hardware it can give us a new frame using the old 1136 * skb and put it at the tail of the sc->rx.rxbuf list for 1137 * processing. */ 1138 if (!requeue_skb) { 1139 RX_STAT_INC(sc, rx_oom_err); 1140 goto requeue_drop_frag; 1141 } 1142 1143 /* We will now give hardware our shiny new allocated skb */ 1144 new_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1145 common->rx_bufsize, dma_type); 1146 if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) { 1147 dev_kfree_skb_any(requeue_skb); 1148 goto requeue_drop_frag; 1149 } 1150 1151 /* Unmap the frame */ 1152 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1153 common->rx_bufsize, dma_type); 1154 1155 bf->bf_mpdu = requeue_skb; 1156 bf->bf_buf_addr = new_buf_addr; 1157 1158 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1159 if (ah->caps.rx_status_len) 1160 skb_pull(skb, ah->caps.rx_status_len); 1161 1162 if (!rs.rs_more) 1163 ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs, 1164 rxs, decrypt_error); 1165 1166 if (rs.rs_more) { 1167 RX_STAT_INC(sc, rx_frags); 1168 /* 1169 * rs_more indicates chained descriptors which can be 1170 * used to link buffers together for a sort of 1171 * scatter-gather operation. 1172 */ 1173 if (sc->rx.frag) { 1174 /* too many fragments - cannot handle frame */ 1175 dev_kfree_skb_any(sc->rx.frag); 1176 dev_kfree_skb_any(skb); 1177 RX_STAT_INC(sc, rx_too_many_frags_err); 1178 skb = NULL; 1179 } 1180 sc->rx.frag = skb; 1181 goto requeue; 1182 } 1183 1184 if (sc->rx.frag) { 1185 int space = skb->len - skb_tailroom(hdr_skb); 1186 1187 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1188 dev_kfree_skb(skb); 1189 RX_STAT_INC(sc, rx_oom_err); 1190 goto requeue_drop_frag; 1191 } 1192 1193 sc->rx.frag = NULL; 1194 1195 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1196 skb->len); 1197 dev_kfree_skb_any(skb); 1198 skb = hdr_skb; 1199 } 1200 1201 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1202 skb_trim(skb, skb->len - 8); 1203 1204 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1205 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1206 PS_WAIT_FOR_CAB | 1207 PS_WAIT_FOR_PSPOLL_DATA)) || 1208 ath9k_check_auto_sleep(sc)) 1209 ath_rx_ps(sc, skb, rs.is_mybeacon); 1210 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1211 1212 ath9k_antenna_check(sc, &rs); 1213 ath9k_apply_ampdu_details(sc, &rs, rxs); 1214 ath_debug_rate_stats(sc, &rs, skb); 1215 ath_rx_count_airtime(sc, &rs, skb); 1216 1217 hdr = (struct ieee80211_hdr *)skb->data; 1218 if (ieee80211_is_ack(hdr->frame_control)) 1219 ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp); 1220 1221 ieee80211_rx(hw, skb); 1222 1223 requeue_drop_frag: 1224 if (sc->rx.frag) { 1225 dev_kfree_skb_any(sc->rx.frag); 1226 sc->rx.frag = NULL; 1227 } 1228 requeue: 1229 list_add_tail(&bf->list, &sc->rx.rxbuf); 1230 1231 if (!edma) { 1232 ath_rx_buf_relink(sc, bf, flush); 1233 if (!flush) 1234 ath9k_hw_rxena(ah); 1235 } else if (!flush) { 1236 ath_rx_edma_buf_link(sc, qtype); 1237 } 1238 1239 if (!budget--) 1240 break; 1241 } while (1); 1242 1243 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1244 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 1245 ath9k_hw_set_interrupts(ah); 1246 } 1247 1248 return 0; 1249 } 1250