1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "ath9k.h" 18 #include "ar9003_mac.h" 19 20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21 22 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 23 { 24 return sc->ps_enabled && 25 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 26 } 27 28 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 29 struct ieee80211_hdr *hdr) 30 { 31 struct ieee80211_hw *hw = sc->pri_wiphy->hw; 32 int i; 33 34 spin_lock_bh(&sc->wiphy_lock); 35 for (i = 0; i < sc->num_sec_wiphy; i++) { 36 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 37 if (aphy == NULL) 38 continue; 39 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) 40 == 0) { 41 hw = aphy->hw; 42 break; 43 } 44 } 45 spin_unlock_bh(&sc->wiphy_lock); 46 return hw; 47 } 48 49 /* 50 * Setup and link descriptors. 51 * 52 * 11N: we can no longer afford to self link the last descriptor. 53 * MAC acknowledges BA status as long as it copies frames to host 54 * buffer (or rx fifo). This can incorrectly acknowledge packets 55 * to a sender if last desc is self-linked. 56 */ 57 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 58 { 59 struct ath_hw *ah = sc->sc_ah; 60 struct ath_common *common = ath9k_hw_common(ah); 61 struct ath_desc *ds; 62 struct sk_buff *skb; 63 64 ATH_RXBUF_RESET(bf); 65 66 ds = bf->bf_desc; 67 ds->ds_link = 0; /* link to null */ 68 ds->ds_data = bf->bf_buf_addr; 69 70 /* virtual addr of the beginning of the buffer. */ 71 skb = bf->bf_mpdu; 72 BUG_ON(skb == NULL); 73 ds->ds_vdata = skb->data; 74 75 /* 76 * setup rx descriptors. The rx_bufsize here tells the hardware 77 * how much data it can DMA to us and that we are prepared 78 * to process 79 */ 80 ath9k_hw_setuprxdesc(ah, ds, 81 common->rx_bufsize, 82 0); 83 84 if (sc->rx.rxlink == NULL) 85 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 86 else 87 *sc->rx.rxlink = bf->bf_daddr; 88 89 sc->rx.rxlink = &ds->ds_link; 90 ath9k_hw_rxena(ah); 91 } 92 93 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 94 { 95 /* XXX block beacon interrupts */ 96 ath9k_hw_setantenna(sc->sc_ah, antenna); 97 sc->rx.defant = antenna; 98 sc->rx.rxotherant = 0; 99 } 100 101 static void ath_opmode_init(struct ath_softc *sc) 102 { 103 struct ath_hw *ah = sc->sc_ah; 104 struct ath_common *common = ath9k_hw_common(ah); 105 106 u32 rfilt, mfilt[2]; 107 108 /* configure rx filter */ 109 rfilt = ath_calcrxfilter(sc); 110 ath9k_hw_setrxfilter(ah, rfilt); 111 112 /* configure bssid mask */ 113 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 114 ath_hw_setbssidmask(common); 115 116 /* configure operational mode */ 117 ath9k_hw_setopmode(ah); 118 119 /* Handle any link-level address change. */ 120 ath9k_hw_setmac(ah, common->macaddr); 121 122 /* calculate and install multicast filter */ 123 mfilt[0] = mfilt[1] = ~0; 124 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 125 } 126 127 static bool ath_rx_edma_buf_link(struct ath_softc *sc, 128 enum ath9k_rx_qtype qtype) 129 { 130 struct ath_hw *ah = sc->sc_ah; 131 struct ath_rx_edma *rx_edma; 132 struct sk_buff *skb; 133 struct ath_buf *bf; 134 135 rx_edma = &sc->rx.rx_edma[qtype]; 136 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 137 return false; 138 139 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 140 list_del_init(&bf->list); 141 142 skb = bf->bf_mpdu; 143 144 ATH_RXBUF_RESET(bf); 145 memset(skb->data, 0, ah->caps.rx_status_len); 146 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 147 ah->caps.rx_status_len, DMA_TO_DEVICE); 148 149 SKB_CB_ATHBUF(skb) = bf; 150 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 151 skb_queue_tail(&rx_edma->rx_fifo, skb); 152 153 return true; 154 } 155 156 static void ath_rx_addbuffer_edma(struct ath_softc *sc, 157 enum ath9k_rx_qtype qtype, int size) 158 { 159 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 160 u32 nbuf = 0; 161 162 if (list_empty(&sc->rx.rxbuf)) { 163 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); 164 return; 165 } 166 167 while (!list_empty(&sc->rx.rxbuf)) { 168 nbuf++; 169 170 if (!ath_rx_edma_buf_link(sc, qtype)) 171 break; 172 173 if (nbuf >= size) 174 break; 175 } 176 } 177 178 static void ath_rx_remove_buffer(struct ath_softc *sc, 179 enum ath9k_rx_qtype qtype) 180 { 181 struct ath_buf *bf; 182 struct ath_rx_edma *rx_edma; 183 struct sk_buff *skb; 184 185 rx_edma = &sc->rx.rx_edma[qtype]; 186 187 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 188 bf = SKB_CB_ATHBUF(skb); 189 BUG_ON(!bf); 190 list_add_tail(&bf->list, &sc->rx.rxbuf); 191 } 192 } 193 194 static void ath_rx_edma_cleanup(struct ath_softc *sc) 195 { 196 struct ath_buf *bf; 197 198 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 199 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 200 201 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 202 if (bf->bf_mpdu) 203 dev_kfree_skb_any(bf->bf_mpdu); 204 } 205 206 INIT_LIST_HEAD(&sc->rx.rxbuf); 207 208 kfree(sc->rx.rx_bufptr); 209 sc->rx.rx_bufptr = NULL; 210 } 211 212 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 213 { 214 skb_queue_head_init(&rx_edma->rx_fifo); 215 skb_queue_head_init(&rx_edma->rx_buffers); 216 rx_edma->rx_fifo_hwsize = size; 217 } 218 219 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 220 { 221 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 222 struct ath_hw *ah = sc->sc_ah; 223 struct sk_buff *skb; 224 struct ath_buf *bf; 225 int error = 0, i; 226 u32 size; 227 228 229 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + 230 ah->caps.rx_status_len, 231 min(common->cachelsz, (u16)64)); 232 233 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 234 ah->caps.rx_status_len); 235 236 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 237 ah->caps.rx_lp_qdepth); 238 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 239 ah->caps.rx_hp_qdepth); 240 241 size = sizeof(struct ath_buf) * nbufs; 242 bf = kzalloc(size, GFP_KERNEL); 243 if (!bf) 244 return -ENOMEM; 245 246 INIT_LIST_HEAD(&sc->rx.rxbuf); 247 sc->rx.rx_bufptr = bf; 248 249 for (i = 0; i < nbufs; i++, bf++) { 250 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 251 if (!skb) { 252 error = -ENOMEM; 253 goto rx_init_fail; 254 } 255 256 memset(skb->data, 0, common->rx_bufsize); 257 bf->bf_mpdu = skb; 258 259 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 260 common->rx_bufsize, 261 DMA_BIDIRECTIONAL); 262 if (unlikely(dma_mapping_error(sc->dev, 263 bf->bf_buf_addr))) { 264 dev_kfree_skb_any(skb); 265 bf->bf_mpdu = NULL; 266 ath_print(common, ATH_DBG_FATAL, 267 "dma_mapping_error() on RX init\n"); 268 error = -ENOMEM; 269 goto rx_init_fail; 270 } 271 272 list_add_tail(&bf->list, &sc->rx.rxbuf); 273 } 274 275 return 0; 276 277 rx_init_fail: 278 ath_rx_edma_cleanup(sc); 279 return error; 280 } 281 282 static void ath_edma_start_recv(struct ath_softc *sc) 283 { 284 spin_lock_bh(&sc->rx.rxbuflock); 285 286 ath9k_hw_rxena(sc->sc_ah); 287 288 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 289 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 290 291 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 292 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 293 294 spin_unlock_bh(&sc->rx.rxbuflock); 295 296 ath_opmode_init(sc); 297 298 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING)); 299 } 300 301 static void ath_edma_stop_recv(struct ath_softc *sc) 302 { 303 spin_lock_bh(&sc->rx.rxbuflock); 304 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 305 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 306 spin_unlock_bh(&sc->rx.rxbuflock); 307 } 308 309 int ath_rx_init(struct ath_softc *sc, int nbufs) 310 { 311 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 312 struct sk_buff *skb; 313 struct ath_buf *bf; 314 int error = 0; 315 316 spin_lock_init(&sc->rx.rxflushlock); 317 sc->sc_flags &= ~SC_OP_RXFLUSH; 318 spin_lock_init(&sc->rx.rxbuflock); 319 320 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 321 return ath_rx_edma_init(sc, nbufs); 322 } else { 323 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 324 min(common->cachelsz, (u16)64)); 325 326 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 327 common->cachelsz, common->rx_bufsize); 328 329 /* Initialize rx descriptors */ 330 331 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 332 "rx", nbufs, 1, 0); 333 if (error != 0) { 334 ath_print(common, ATH_DBG_FATAL, 335 "failed to allocate rx descriptors: %d\n", 336 error); 337 goto err; 338 } 339 340 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 341 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 342 GFP_KERNEL); 343 if (skb == NULL) { 344 error = -ENOMEM; 345 goto err; 346 } 347 348 bf->bf_mpdu = skb; 349 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 350 common->rx_bufsize, 351 DMA_FROM_DEVICE); 352 if (unlikely(dma_mapping_error(sc->dev, 353 bf->bf_buf_addr))) { 354 dev_kfree_skb_any(skb); 355 bf->bf_mpdu = NULL; 356 ath_print(common, ATH_DBG_FATAL, 357 "dma_mapping_error() on RX init\n"); 358 error = -ENOMEM; 359 goto err; 360 } 361 bf->bf_dmacontext = bf->bf_buf_addr; 362 } 363 sc->rx.rxlink = NULL; 364 } 365 366 err: 367 if (error) 368 ath_rx_cleanup(sc); 369 370 return error; 371 } 372 373 void ath_rx_cleanup(struct ath_softc *sc) 374 { 375 struct ath_hw *ah = sc->sc_ah; 376 struct ath_common *common = ath9k_hw_common(ah); 377 struct sk_buff *skb; 378 struct ath_buf *bf; 379 380 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 381 ath_rx_edma_cleanup(sc); 382 return; 383 } else { 384 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 385 skb = bf->bf_mpdu; 386 if (skb) { 387 dma_unmap_single(sc->dev, bf->bf_buf_addr, 388 common->rx_bufsize, 389 DMA_FROM_DEVICE); 390 dev_kfree_skb(skb); 391 } 392 } 393 394 if (sc->rx.rxdma.dd_desc_len != 0) 395 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 396 } 397 } 398 399 /* 400 * Calculate the receive filter according to the 401 * operating mode and state: 402 * 403 * o always accept unicast, broadcast, and multicast traffic 404 * o maintain current state of phy error reception (the hal 405 * may enable phy error frames for noise immunity work) 406 * o probe request frames are accepted only when operating in 407 * hostap, adhoc, or monitor modes 408 * o enable promiscuous mode according to the interface state 409 * o accept beacons: 410 * - when operating in adhoc mode so the 802.11 layer creates 411 * node table entries for peers, 412 * - when operating in station mode for collecting rssi data when 413 * the station is otherwise quiet, or 414 * - when operating as a repeater so we see repeater-sta beacons 415 * - when scanning 416 */ 417 418 u32 ath_calcrxfilter(struct ath_softc *sc) 419 { 420 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 421 422 u32 rfilt; 423 424 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 425 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 426 | ATH9K_RX_FILTER_MCAST; 427 428 /* If not a STA, enable processing of Probe Requests */ 429 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 430 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 431 432 /* 433 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 434 * mode interface or when in monitor mode. AP mode does not need this 435 * since it receives all in-BSS frames anyway. 436 */ 437 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 438 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 439 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 440 rfilt |= ATH9K_RX_FILTER_PROM; 441 442 if (sc->rx.rxfilter & FIF_CONTROL) 443 rfilt |= ATH9K_RX_FILTER_CONTROL; 444 445 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 446 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 447 rfilt |= ATH9K_RX_FILTER_MYBEACON; 448 else 449 rfilt |= ATH9K_RX_FILTER_BEACON; 450 451 if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || 452 AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && 453 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && 454 (sc->rx.rxfilter & FIF_PSPOLL)) 455 rfilt |= ATH9K_RX_FILTER_PSPOLL; 456 457 if (conf_is_ht(&sc->hw->conf)) 458 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 459 460 if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 461 /* TODO: only needed if more than one BSSID is in use in 462 * station/adhoc mode */ 463 /* The following may also be needed for other older chips */ 464 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 465 rfilt |= ATH9K_RX_FILTER_PROM; 466 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 467 } 468 469 return rfilt; 470 471 #undef RX_FILTER_PRESERVE 472 } 473 474 int ath_startrecv(struct ath_softc *sc) 475 { 476 struct ath_hw *ah = sc->sc_ah; 477 struct ath_buf *bf, *tbf; 478 479 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 480 ath_edma_start_recv(sc); 481 return 0; 482 } 483 484 spin_lock_bh(&sc->rx.rxbuflock); 485 if (list_empty(&sc->rx.rxbuf)) 486 goto start_recv; 487 488 sc->rx.rxlink = NULL; 489 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 490 ath_rx_buf_link(sc, bf); 491 } 492 493 /* We could have deleted elements so the list may be empty now */ 494 if (list_empty(&sc->rx.rxbuf)) 495 goto start_recv; 496 497 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 498 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 499 ath9k_hw_rxena(ah); 500 501 start_recv: 502 spin_unlock_bh(&sc->rx.rxbuflock); 503 ath_opmode_init(sc); 504 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING)); 505 506 return 0; 507 } 508 509 bool ath_stoprecv(struct ath_softc *sc) 510 { 511 struct ath_hw *ah = sc->sc_ah; 512 bool stopped; 513 514 ath9k_hw_stoppcurecv(ah); 515 ath9k_hw_setrxfilter(ah, 0); 516 stopped = ath9k_hw_stopdmarecv(ah); 517 518 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 519 ath_edma_stop_recv(sc); 520 else 521 sc->rx.rxlink = NULL; 522 523 return stopped; 524 } 525 526 void ath_flushrecv(struct ath_softc *sc) 527 { 528 spin_lock_bh(&sc->rx.rxflushlock); 529 sc->sc_flags |= SC_OP_RXFLUSH; 530 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 531 ath_rx_tasklet(sc, 1, true); 532 ath_rx_tasklet(sc, 1, false); 533 sc->sc_flags &= ~SC_OP_RXFLUSH; 534 spin_unlock_bh(&sc->rx.rxflushlock); 535 } 536 537 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 538 { 539 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 540 struct ieee80211_mgmt *mgmt; 541 u8 *pos, *end, id, elen; 542 struct ieee80211_tim_ie *tim; 543 544 mgmt = (struct ieee80211_mgmt *)skb->data; 545 pos = mgmt->u.beacon.variable; 546 end = skb->data + skb->len; 547 548 while (pos + 2 < end) { 549 id = *pos++; 550 elen = *pos++; 551 if (pos + elen > end) 552 break; 553 554 if (id == WLAN_EID_TIM) { 555 if (elen < sizeof(*tim)) 556 break; 557 tim = (struct ieee80211_tim_ie *) pos; 558 if (tim->dtim_count != 0) 559 break; 560 return tim->bitmap_ctrl & 0x01; 561 } 562 563 pos += elen; 564 } 565 566 return false; 567 } 568 569 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 570 { 571 struct ieee80211_mgmt *mgmt; 572 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 573 574 if (skb->len < 24 + 8 + 2 + 2) 575 return; 576 577 mgmt = (struct ieee80211_mgmt *)skb->data; 578 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 579 return; /* not from our current AP */ 580 581 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 582 583 if (sc->ps_flags & PS_BEACON_SYNC) { 584 sc->ps_flags &= ~PS_BEACON_SYNC; 585 ath_print(common, ATH_DBG_PS, 586 "Reconfigure Beacon timers based on " 587 "timestamp from the AP\n"); 588 ath_beacon_config(sc, NULL); 589 } 590 591 if (ath_beacon_dtim_pending_cab(skb)) { 592 /* 593 * Remain awake waiting for buffered broadcast/multicast 594 * frames. If the last broadcast/multicast frame is not 595 * received properly, the next beacon frame will work as 596 * a backup trigger for returning into NETWORK SLEEP state, 597 * so we are waiting for it as well. 598 */ 599 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 600 "buffered broadcast/multicast frame(s)\n"); 601 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 602 return; 603 } 604 605 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 606 /* 607 * This can happen if a broadcast frame is dropped or the AP 608 * fails to send a frame indicating that all CAB frames have 609 * been delivered. 610 */ 611 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 612 ath_print(common, ATH_DBG_PS, 613 "PS wait for CAB frames timed out\n"); 614 } 615 } 616 617 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 618 { 619 struct ieee80211_hdr *hdr; 620 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 621 622 hdr = (struct ieee80211_hdr *)skb->data; 623 624 /* Process Beacon and CAB receive in PS state */ 625 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 626 && ieee80211_is_beacon(hdr->frame_control)) 627 ath_rx_ps_beacon(sc, skb); 628 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 629 (ieee80211_is_data(hdr->frame_control) || 630 ieee80211_is_action(hdr->frame_control)) && 631 is_multicast_ether_addr(hdr->addr1) && 632 !ieee80211_has_moredata(hdr->frame_control)) { 633 /* 634 * No more broadcast/multicast frames to be received at this 635 * point. 636 */ 637 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 638 ath_print(common, ATH_DBG_PS, 639 "All PS CAB frames received, back to sleep\n"); 640 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 641 !is_multicast_ether_addr(hdr->addr1) && 642 !ieee80211_has_morefrags(hdr->frame_control)) { 643 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 644 ath_print(common, ATH_DBG_PS, 645 "Going back to sleep after having received " 646 "PS-Poll data (0x%lx)\n", 647 sc->ps_flags & (PS_WAIT_FOR_BEACON | 648 PS_WAIT_FOR_CAB | 649 PS_WAIT_FOR_PSPOLL_DATA | 650 PS_WAIT_FOR_TX_ACK)); 651 } 652 } 653 654 static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, 655 struct ath_softc *sc, struct sk_buff *skb, 656 struct ieee80211_rx_status *rxs) 657 { 658 struct ieee80211_hdr *hdr; 659 660 hdr = (struct ieee80211_hdr *)skb->data; 661 662 /* Send the frame to mac80211 */ 663 if (is_multicast_ether_addr(hdr->addr1)) { 664 int i; 665 /* 666 * Deliver broadcast/multicast frames to all suitable 667 * virtual wiphys. 668 */ 669 /* TODO: filter based on channel configuration */ 670 for (i = 0; i < sc->num_sec_wiphy; i++) { 671 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 672 struct sk_buff *nskb; 673 if (aphy == NULL) 674 continue; 675 nskb = skb_copy(skb, GFP_ATOMIC); 676 if (!nskb) 677 continue; 678 ieee80211_rx(aphy->hw, nskb); 679 } 680 ieee80211_rx(sc->hw, skb); 681 } else 682 /* Deliver unicast frames based on receiver address */ 683 ieee80211_rx(hw, skb); 684 } 685 686 static bool ath_edma_get_buffers(struct ath_softc *sc, 687 enum ath9k_rx_qtype qtype) 688 { 689 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 690 struct ath_hw *ah = sc->sc_ah; 691 struct ath_common *common = ath9k_hw_common(ah); 692 struct sk_buff *skb; 693 struct ath_buf *bf; 694 int ret; 695 696 skb = skb_peek(&rx_edma->rx_fifo); 697 if (!skb) 698 return false; 699 700 bf = SKB_CB_ATHBUF(skb); 701 BUG_ON(!bf); 702 703 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 704 common->rx_bufsize, DMA_FROM_DEVICE); 705 706 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 707 if (ret == -EINPROGRESS) { 708 /*let device gain the buffer again*/ 709 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 710 common->rx_bufsize, DMA_FROM_DEVICE); 711 return false; 712 } 713 714 __skb_unlink(skb, &rx_edma->rx_fifo); 715 if (ret == -EINVAL) { 716 /* corrupt descriptor, skip this one and the following one */ 717 list_add_tail(&bf->list, &sc->rx.rxbuf); 718 ath_rx_edma_buf_link(sc, qtype); 719 skb = skb_peek(&rx_edma->rx_fifo); 720 if (!skb) 721 return true; 722 723 bf = SKB_CB_ATHBUF(skb); 724 BUG_ON(!bf); 725 726 __skb_unlink(skb, &rx_edma->rx_fifo); 727 list_add_tail(&bf->list, &sc->rx.rxbuf); 728 ath_rx_edma_buf_link(sc, qtype); 729 return true; 730 } 731 skb_queue_tail(&rx_edma->rx_buffers, skb); 732 733 return true; 734 } 735 736 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 737 struct ath_rx_status *rs, 738 enum ath9k_rx_qtype qtype) 739 { 740 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 741 struct sk_buff *skb; 742 struct ath_buf *bf; 743 744 while (ath_edma_get_buffers(sc, qtype)); 745 skb = __skb_dequeue(&rx_edma->rx_buffers); 746 if (!skb) 747 return NULL; 748 749 bf = SKB_CB_ATHBUF(skb); 750 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); 751 return bf; 752 } 753 754 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 755 struct ath_rx_status *rs) 756 { 757 struct ath_hw *ah = sc->sc_ah; 758 struct ath_common *common = ath9k_hw_common(ah); 759 struct ath_desc *ds; 760 struct ath_buf *bf; 761 int ret; 762 763 if (list_empty(&sc->rx.rxbuf)) { 764 sc->rx.rxlink = NULL; 765 return NULL; 766 } 767 768 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 769 ds = bf->bf_desc; 770 771 /* 772 * Must provide the virtual address of the current 773 * descriptor, the physical address, and the virtual 774 * address of the next descriptor in the h/w chain. 775 * This allows the HAL to look ahead to see if the 776 * hardware is done with a descriptor by checking the 777 * done bit in the following descriptor and the address 778 * of the current descriptor the DMA engine is working 779 * on. All this is necessary because of our use of 780 * a self-linked list to avoid rx overruns. 781 */ 782 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); 783 if (ret == -EINPROGRESS) { 784 struct ath_rx_status trs; 785 struct ath_buf *tbf; 786 struct ath_desc *tds; 787 788 memset(&trs, 0, sizeof(trs)); 789 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 790 sc->rx.rxlink = NULL; 791 return NULL; 792 } 793 794 tbf = list_entry(bf->list.next, struct ath_buf, list); 795 796 /* 797 * On some hardware the descriptor status words could 798 * get corrupted, including the done bit. Because of 799 * this, check if the next descriptor's done bit is 800 * set or not. 801 * 802 * If the next descriptor's done bit is set, the current 803 * descriptor has been corrupted. Force s/w to discard 804 * this descriptor and continue... 805 */ 806 807 tds = tbf->bf_desc; 808 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); 809 if (ret == -EINPROGRESS) 810 return NULL; 811 } 812 813 if (!bf->bf_mpdu) 814 return bf; 815 816 /* 817 * Synchronize the DMA transfer with CPU before 818 * 1. accessing the frame 819 * 2. requeueing the same buffer to h/w 820 */ 821 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 822 common->rx_bufsize, 823 DMA_FROM_DEVICE); 824 825 return bf; 826 } 827 828 /* Assumes you've already done the endian to CPU conversion */ 829 static bool ath9k_rx_accept(struct ath_common *common, 830 struct ieee80211_hdr *hdr, 831 struct ieee80211_rx_status *rxs, 832 struct ath_rx_status *rx_stats, 833 bool *decrypt_error) 834 { 835 struct ath_hw *ah = common->ah; 836 __le16 fc; 837 u8 rx_status_len = ah->caps.rx_status_len; 838 839 fc = hdr->frame_control; 840 841 if (!rx_stats->rs_datalen) 842 return false; 843 /* 844 * rs_status follows rs_datalen so if rs_datalen is too large 845 * we can take a hint that hardware corrupted it, so ignore 846 * those frames. 847 */ 848 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 849 return false; 850 851 /* 852 * rs_more indicates chained descriptors which can be used 853 * to link buffers together for a sort of scatter-gather 854 * operation. 855 * reject the frame, we don't support scatter-gather yet and 856 * the frame is probably corrupt anyway 857 */ 858 if (rx_stats->rs_more) 859 return false; 860 861 /* 862 * The rx_stats->rs_status will not be set until the end of the 863 * chained descriptors so it can be ignored if rs_more is set. The 864 * rs_more will be false at the last element of the chained 865 * descriptors. 866 */ 867 if (rx_stats->rs_status != 0) { 868 if (rx_stats->rs_status & ATH9K_RXERR_CRC) 869 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 870 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 871 return false; 872 873 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 874 *decrypt_error = true; 875 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 876 if (ieee80211_is_ctl(fc)) 877 /* 878 * Sometimes, we get invalid 879 * MIC failures on valid control frames. 880 * Remove these mic errors. 881 */ 882 rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 883 else 884 rxs->flag |= RX_FLAG_MMIC_ERROR; 885 } 886 /* 887 * Reject error frames with the exception of 888 * decryption and MIC failures. For monitor mode, 889 * we also ignore the CRC error. 890 */ 891 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 892 if (rx_stats->rs_status & 893 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 894 ATH9K_RXERR_CRC)) 895 return false; 896 } else { 897 if (rx_stats->rs_status & 898 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 899 return false; 900 } 901 } 902 } 903 return true; 904 } 905 906 static int ath9k_process_rate(struct ath_common *common, 907 struct ieee80211_hw *hw, 908 struct ath_rx_status *rx_stats, 909 struct ieee80211_rx_status *rxs) 910 { 911 struct ieee80211_supported_band *sband; 912 enum ieee80211_band band; 913 unsigned int i = 0; 914 915 band = hw->conf.channel->band; 916 sband = hw->wiphy->bands[band]; 917 918 if (rx_stats->rs_rate & 0x80) { 919 /* HT rate */ 920 rxs->flag |= RX_FLAG_HT; 921 if (rx_stats->rs_flags & ATH9K_RX_2040) 922 rxs->flag |= RX_FLAG_40MHZ; 923 if (rx_stats->rs_flags & ATH9K_RX_GI) 924 rxs->flag |= RX_FLAG_SHORT_GI; 925 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 926 return 0; 927 } 928 929 for (i = 0; i < sband->n_bitrates; i++) { 930 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 931 rxs->rate_idx = i; 932 return 0; 933 } 934 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 935 rxs->flag |= RX_FLAG_SHORTPRE; 936 rxs->rate_idx = i; 937 return 0; 938 } 939 } 940 941 /* 942 * No valid hardware bitrate found -- we should not get here 943 * because hardware has already validated this frame as OK. 944 */ 945 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " 946 "0x%02x using 1 Mbit\n", rx_stats->rs_rate); 947 948 return -EINVAL; 949 } 950 951 static void ath9k_process_rssi(struct ath_common *common, 952 struct ieee80211_hw *hw, 953 struct ieee80211_hdr *hdr, 954 struct ath_rx_status *rx_stats) 955 { 956 struct ath_hw *ah = common->ah; 957 struct ieee80211_sta *sta; 958 struct ath_node *an; 959 int last_rssi = ATH_RSSI_DUMMY_MARKER; 960 __le16 fc; 961 962 fc = hdr->frame_control; 963 964 rcu_read_lock(); 965 /* 966 * XXX: use ieee80211_find_sta! This requires quite a bit of work 967 * under the current ath9k virtual wiphy implementation as we have 968 * no way of tying a vif to wiphy. Typically vifs are attached to 969 * at least one sdata of a wiphy on mac80211 but with ath9k virtual 970 * wiphy you'd have to iterate over every wiphy and each sdata. 971 */ 972 sta = ieee80211_find_sta_by_hw(hw, hdr->addr2); 973 if (sta) { 974 an = (struct ath_node *) sta->drv_priv; 975 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && 976 !rx_stats->rs_moreaggr) 977 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); 978 last_rssi = an->last_rssi; 979 } 980 rcu_read_unlock(); 981 982 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 983 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 984 ATH_RSSI_EP_MULTIPLIER); 985 if (rx_stats->rs_rssi < 0) 986 rx_stats->rs_rssi = 0; 987 988 /* Update Beacon RSSI, this is used by ANI. */ 989 if (ieee80211_is_beacon(fc)) 990 ah->stats.avgbrssi = rx_stats->rs_rssi; 991 } 992 993 /* 994 * For Decrypt or Demic errors, we only mark packet status here and always push 995 * up the frame up to let mac80211 handle the actual error case, be it no 996 * decryption key or real decryption error. This let us keep statistics there. 997 */ 998 static int ath9k_rx_skb_preprocess(struct ath_common *common, 999 struct ieee80211_hw *hw, 1000 struct ieee80211_hdr *hdr, 1001 struct ath_rx_status *rx_stats, 1002 struct ieee80211_rx_status *rx_status, 1003 bool *decrypt_error) 1004 { 1005 struct ath_hw *ah = common->ah; 1006 1007 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 1008 1009 /* 1010 * everything but the rate is checked here, the rate check is done 1011 * separately to avoid doing two lookups for a rate for each frame. 1012 */ 1013 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 1014 return -EINVAL; 1015 1016 ath9k_process_rssi(common, hw, hdr, rx_stats); 1017 1018 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1019 return -EINVAL; 1020 1021 rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp); 1022 rx_status->band = hw->conf.channel->band; 1023 rx_status->freq = hw->conf.channel->center_freq; 1024 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 1025 rx_status->antenna = rx_stats->rs_antenna; 1026 rx_status->flag |= RX_FLAG_TSFT; 1027 1028 return 0; 1029 } 1030 1031 static void ath9k_rx_skb_postprocess(struct ath_common *common, 1032 struct sk_buff *skb, 1033 struct ath_rx_status *rx_stats, 1034 struct ieee80211_rx_status *rxs, 1035 bool decrypt_error) 1036 { 1037 struct ath_hw *ah = common->ah; 1038 struct ieee80211_hdr *hdr; 1039 int hdrlen, padpos, padsize; 1040 u8 keyix; 1041 __le16 fc; 1042 1043 /* see if any padding is done by the hw and remove it */ 1044 hdr = (struct ieee80211_hdr *) skb->data; 1045 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1046 fc = hdr->frame_control; 1047 padpos = ath9k_cmn_padpos(hdr->frame_control); 1048 1049 /* The MAC header is padded to have 32-bit boundary if the 1050 * packet payload is non-zero. The general calculation for 1051 * padsize would take into account odd header lengths: 1052 * padsize = (4 - padpos % 4) % 4; However, since only 1053 * even-length headers are used, padding can only be 0 or 2 1054 * bytes and we can optimize this a bit. In addition, we must 1055 * not try to remove padding from short control frames that do 1056 * not have payload. */ 1057 padsize = padpos & 3; 1058 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1059 memmove(skb->data + padsize, skb->data, padpos); 1060 skb_pull(skb, padsize); 1061 } 1062 1063 keyix = rx_stats->rs_keyix; 1064 1065 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1066 ieee80211_has_protected(fc)) { 1067 rxs->flag |= RX_FLAG_DECRYPTED; 1068 } else if (ieee80211_has_protected(fc) 1069 && !decrypt_error && skb->len >= hdrlen + 4) { 1070 keyix = skb->data[hdrlen + 3] >> 6; 1071 1072 if (test_bit(keyix, common->keymap)) 1073 rxs->flag |= RX_FLAG_DECRYPTED; 1074 } 1075 if (ah->sw_mgmt_crypto && 1076 (rxs->flag & RX_FLAG_DECRYPTED) && 1077 ieee80211_is_mgmt(fc)) 1078 /* Use software decrypt for management frames. */ 1079 rxs->flag &= ~RX_FLAG_DECRYPTED; 1080 } 1081 1082 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1083 { 1084 struct ath_buf *bf; 1085 struct sk_buff *skb = NULL, *requeue_skb; 1086 struct ieee80211_rx_status *rxs; 1087 struct ath_hw *ah = sc->sc_ah; 1088 struct ath_common *common = ath9k_hw_common(ah); 1089 /* 1090 * The hw can techncically differ from common->hw when using ath9k 1091 * virtual wiphy so to account for that we iterate over the active 1092 * wiphys and find the appropriate wiphy and therefore hw. 1093 */ 1094 struct ieee80211_hw *hw = NULL; 1095 struct ieee80211_hdr *hdr; 1096 int retval; 1097 bool decrypt_error = false; 1098 struct ath_rx_status rs; 1099 enum ath9k_rx_qtype qtype; 1100 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1101 int dma_type; 1102 u8 rx_status_len = ah->caps.rx_status_len; 1103 1104 if (edma) 1105 dma_type = DMA_BIDIRECTIONAL; 1106 else 1107 dma_type = DMA_FROM_DEVICE; 1108 1109 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1110 spin_lock_bh(&sc->rx.rxbuflock); 1111 1112 do { 1113 /* If handling rx interrupt and flush is in progress => exit */ 1114 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1115 break; 1116 1117 memset(&rs, 0, sizeof(rs)); 1118 if (edma) 1119 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1120 else 1121 bf = ath_get_next_rx_buf(sc, &rs); 1122 1123 if (!bf) 1124 break; 1125 1126 skb = bf->bf_mpdu; 1127 if (!skb) 1128 continue; 1129 1130 hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); 1131 rxs = IEEE80211_SKB_RXCB(skb); 1132 1133 hw = ath_get_virt_hw(sc, hdr); 1134 1135 ath_debug_stat_rx(sc, &rs); 1136 1137 /* 1138 * If we're asked to flush receive queue, directly 1139 * chain it back at the queue without processing it. 1140 */ 1141 if (flush) 1142 goto requeue; 1143 1144 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1145 rxs, &decrypt_error); 1146 if (retval) 1147 goto requeue; 1148 1149 /* Ensure we always have an skb to requeue once we are done 1150 * processing the current buffer's skb */ 1151 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1152 1153 /* If there is no memory we ignore the current RX'd frame, 1154 * tell hardware it can give us a new frame using the old 1155 * skb and put it at the tail of the sc->rx.rxbuf list for 1156 * processing. */ 1157 if (!requeue_skb) 1158 goto requeue; 1159 1160 /* Unmap the frame */ 1161 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1162 common->rx_bufsize, 1163 dma_type); 1164 1165 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1166 if (ah->caps.rx_status_len) 1167 skb_pull(skb, ah->caps.rx_status_len); 1168 1169 ath9k_rx_skb_postprocess(common, skb, &rs, 1170 rxs, decrypt_error); 1171 1172 /* We will now give hardware our shiny new allocated skb */ 1173 bf->bf_mpdu = requeue_skb; 1174 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1175 common->rx_bufsize, 1176 dma_type); 1177 if (unlikely(dma_mapping_error(sc->dev, 1178 bf->bf_buf_addr))) { 1179 dev_kfree_skb_any(requeue_skb); 1180 bf->bf_mpdu = NULL; 1181 ath_print(common, ATH_DBG_FATAL, 1182 "dma_mapping_error() on RX\n"); 1183 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1184 break; 1185 } 1186 bf->bf_dmacontext = bf->bf_buf_addr; 1187 1188 /* 1189 * change the default rx antenna if rx diversity chooses the 1190 * other antenna 3 times in a row. 1191 */ 1192 if (sc->rx.defant != rs.rs_antenna) { 1193 if (++sc->rx.rxotherant >= 3) 1194 ath_setdefantenna(sc, rs.rs_antenna); 1195 } else { 1196 sc->rx.rxotherant = 0; 1197 } 1198 1199 if (unlikely(ath9k_check_auto_sleep(sc) || 1200 (sc->ps_flags & (PS_WAIT_FOR_BEACON | 1201 PS_WAIT_FOR_CAB | 1202 PS_WAIT_FOR_PSPOLL_DATA)))) 1203 ath_rx_ps(sc, skb); 1204 1205 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1206 1207 requeue: 1208 if (edma) { 1209 list_add_tail(&bf->list, &sc->rx.rxbuf); 1210 ath_rx_edma_buf_link(sc, qtype); 1211 } else { 1212 list_move_tail(&bf->list, &sc->rx.rxbuf); 1213 ath_rx_buf_link(sc, bf); 1214 } 1215 } while (1); 1216 1217 spin_unlock_bh(&sc->rx.rxbuflock); 1218 1219 return 0; 1220 } 1221