1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "ath9k.h" 18 19 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 20 struct ieee80211_hdr *hdr) 21 { 22 struct ieee80211_hw *hw = sc->pri_wiphy->hw; 23 int i; 24 25 spin_lock_bh(&sc->wiphy_lock); 26 for (i = 0; i < sc->num_sec_wiphy; i++) { 27 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 28 if (aphy == NULL) 29 continue; 30 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) 31 == 0) { 32 hw = aphy->hw; 33 break; 34 } 35 } 36 spin_unlock_bh(&sc->wiphy_lock); 37 return hw; 38 } 39 40 /* 41 * Setup and link descriptors. 42 * 43 * 11N: we can no longer afford to self link the last descriptor. 44 * MAC acknowledges BA status as long as it copies frames to host 45 * buffer (or rx fifo). This can incorrectly acknowledge packets 46 * to a sender if last desc is self-linked. 47 */ 48 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 49 { 50 struct ath_hw *ah = sc->sc_ah; 51 struct ath_desc *ds; 52 struct sk_buff *skb; 53 54 ATH_RXBUF_RESET(bf); 55 56 ds = bf->bf_desc; 57 ds->ds_link = 0; /* link to null */ 58 ds->ds_data = bf->bf_buf_addr; 59 60 /* virtual addr of the beginning of the buffer. */ 61 skb = bf->bf_mpdu; 62 ASSERT(skb != NULL); 63 ds->ds_vdata = skb->data; 64 65 /* setup rx descriptors. The rx.bufsize here tells the harware 66 * how much data it can DMA to us and that we are prepared 67 * to process */ 68 ath9k_hw_setuprxdesc(ah, ds, 69 sc->rx.bufsize, 70 0); 71 72 if (sc->rx.rxlink == NULL) 73 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 74 else 75 *sc->rx.rxlink = bf->bf_daddr; 76 77 sc->rx.rxlink = &ds->ds_link; 78 ath9k_hw_rxena(ah); 79 } 80 81 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 82 { 83 /* XXX block beacon interrupts */ 84 ath9k_hw_setantenna(sc->sc_ah, antenna); 85 sc->rx.defant = antenna; 86 sc->rx.rxotherant = 0; 87 } 88 89 /* 90 * Extend 15-bit time stamp from rx descriptor to 91 * a full 64-bit TSF using the current h/w TSF. 92 */ 93 static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp) 94 { 95 u64 tsf; 96 97 tsf = ath9k_hw_gettsf64(sc->sc_ah); 98 if ((tsf & 0x7fff) < rstamp) 99 tsf -= 0x8000; 100 return (tsf & ~0x7fff) | rstamp; 101 } 102 103 /* 104 * For Decrypt or Demic errors, we only mark packet status here and always push 105 * up the frame up to let mac80211 handle the actual error case, be it no 106 * decryption key or real decryption error. This let us keep statistics there. 107 */ 108 static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds, 109 struct ieee80211_rx_status *rx_status, bool *decrypt_error, 110 struct ath_softc *sc) 111 { 112 struct ieee80211_hdr *hdr; 113 u8 ratecode; 114 __le16 fc; 115 struct ieee80211_hw *hw; 116 struct ieee80211_sta *sta; 117 struct ath_node *an; 118 int last_rssi = ATH_RSSI_DUMMY_MARKER; 119 120 121 hdr = (struct ieee80211_hdr *)skb->data; 122 fc = hdr->frame_control; 123 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 124 hw = ath_get_virt_hw(sc, hdr); 125 126 if (ds->ds_rxstat.rs_more) { 127 /* 128 * Frame spans multiple descriptors; this cannot happen yet 129 * as we don't support jumbograms. If not in monitor mode, 130 * discard the frame. Enable this if you want to see 131 * error frames in Monitor mode. 132 */ 133 if (sc->sc_ah->opmode != NL80211_IFTYPE_MONITOR) 134 goto rx_next; 135 } else if (ds->ds_rxstat.rs_status != 0) { 136 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) 137 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 138 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) 139 goto rx_next; 140 141 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { 142 *decrypt_error = true; 143 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { 144 if (ieee80211_is_ctl(fc)) 145 /* 146 * Sometimes, we get invalid 147 * MIC failures on valid control frames. 148 * Remove these mic errors. 149 */ 150 ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC; 151 else 152 rx_status->flag |= RX_FLAG_MMIC_ERROR; 153 } 154 /* 155 * Reject error frames with the exception of 156 * decryption and MIC failures. For monitor mode, 157 * we also ignore the CRC error. 158 */ 159 if (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR) { 160 if (ds->ds_rxstat.rs_status & 161 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 162 ATH9K_RXERR_CRC)) 163 goto rx_next; 164 } else { 165 if (ds->ds_rxstat.rs_status & 166 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 167 goto rx_next; 168 } 169 } 170 } 171 172 ratecode = ds->ds_rxstat.rs_rate; 173 174 if (ratecode & 0x80) { 175 /* HT rate */ 176 rx_status->flag |= RX_FLAG_HT; 177 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) 178 rx_status->flag |= RX_FLAG_40MHZ; 179 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) 180 rx_status->flag |= RX_FLAG_SHORT_GI; 181 rx_status->rate_idx = ratecode & 0x7f; 182 } else { 183 int i = 0, cur_band, n_rates; 184 185 cur_band = hw->conf.channel->band; 186 n_rates = sc->sbands[cur_band].n_bitrates; 187 188 for (i = 0; i < n_rates; i++) { 189 if (sc->sbands[cur_band].bitrates[i].hw_value == 190 ratecode) { 191 rx_status->rate_idx = i; 192 break; 193 } 194 195 if (sc->sbands[cur_band].bitrates[i].hw_value_short == 196 ratecode) { 197 rx_status->rate_idx = i; 198 rx_status->flag |= RX_FLAG_SHORTPRE; 199 break; 200 } 201 } 202 } 203 204 rcu_read_lock(); 205 sta = ieee80211_find_sta(sc->hw, hdr->addr2); 206 if (sta) { 207 an = (struct ath_node *) sta->drv_priv; 208 if (ds->ds_rxstat.rs_rssi != ATH9K_RSSI_BAD && 209 !ds->ds_rxstat.rs_moreaggr) 210 ATH_RSSI_LPF(an->last_rssi, ds->ds_rxstat.rs_rssi); 211 last_rssi = an->last_rssi; 212 } 213 rcu_read_unlock(); 214 215 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 216 ds->ds_rxstat.rs_rssi = ATH_EP_RND(last_rssi, 217 ATH_RSSI_EP_MULTIPLIER); 218 if (ds->ds_rxstat.rs_rssi < 0) 219 ds->ds_rxstat.rs_rssi = 0; 220 else if (ds->ds_rxstat.rs_rssi > 127) 221 ds->ds_rxstat.rs_rssi = 127; 222 223 /* Update Beacon RSSI, this is used by ANI. */ 224 if (ieee80211_is_beacon(fc)) 225 sc->sc_ah->stats.avgbrssi = ds->ds_rxstat.rs_rssi; 226 227 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); 228 rx_status->band = hw->conf.channel->band; 229 rx_status->freq = hw->conf.channel->center_freq; 230 rx_status->noise = sc->ani.noise_floor; 231 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + ds->ds_rxstat.rs_rssi; 232 rx_status->antenna = ds->ds_rxstat.rs_antenna; 233 234 /* 235 * Theory for reporting quality: 236 * 237 * At a hardware RSSI of 45 you will be able to use MCS 7 reliably. 238 * At a hardware RSSI of 45 you will be able to use MCS 15 reliably. 239 * At a hardware RSSI of 35 you should be able use 54 Mbps reliably. 240 * 241 * MCS 7 is the highets MCS index usable by a 1-stream device. 242 * MCS 15 is the highest MCS index usable by a 2-stream device. 243 * 244 * All ath9k devices are either 1-stream or 2-stream. 245 * 246 * How many bars you see is derived from the qual reporting. 247 * 248 * A more elaborate scheme can be used here but it requires tables 249 * of SNR/throughput for each possible mode used. For the MCS table 250 * you can refer to the wireless wiki: 251 * 252 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 253 * 254 */ 255 if (conf_is_ht(&hw->conf)) 256 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45; 257 else 258 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 35; 259 260 /* rssi can be more than 45 though, anything above that 261 * should be considered at 100% */ 262 if (rx_status->qual > 100) 263 rx_status->qual = 100; 264 265 rx_status->flag |= RX_FLAG_TSFT; 266 267 return 1; 268 rx_next: 269 return 0; 270 } 271 272 static void ath_opmode_init(struct ath_softc *sc) 273 { 274 struct ath_hw *ah = sc->sc_ah; 275 u32 rfilt, mfilt[2]; 276 277 /* configure rx filter */ 278 rfilt = ath_calcrxfilter(sc); 279 ath9k_hw_setrxfilter(ah, rfilt); 280 281 /* configure bssid mask */ 282 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 283 ath9k_hw_setbssidmask(sc); 284 285 /* configure operational mode */ 286 ath9k_hw_setopmode(ah); 287 288 /* Handle any link-level address change. */ 289 ath9k_hw_setmac(ah, sc->sc_ah->macaddr); 290 291 /* calculate and install multicast filter */ 292 mfilt[0] = mfilt[1] = ~0; 293 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 294 } 295 296 int ath_rx_init(struct ath_softc *sc, int nbufs) 297 { 298 struct sk_buff *skb; 299 struct ath_buf *bf; 300 int error = 0; 301 302 spin_lock_init(&sc->rx.rxflushlock); 303 sc->sc_flags &= ~SC_OP_RXFLUSH; 304 spin_lock_init(&sc->rx.rxbuflock); 305 306 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 307 min(sc->common.cachelsz, (u16)64)); 308 309 DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 310 sc->common.cachelsz, sc->rx.bufsize); 311 312 /* Initialize rx descriptors */ 313 314 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 315 "rx", nbufs, 1); 316 if (error != 0) { 317 DPRINTF(sc, ATH_DBG_FATAL, 318 "failed to allocate rx descriptors: %d\n", error); 319 goto err; 320 } 321 322 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 323 skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_KERNEL); 324 if (skb == NULL) { 325 error = -ENOMEM; 326 goto err; 327 } 328 329 bf->bf_mpdu = skb; 330 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 331 sc->rx.bufsize, 332 DMA_FROM_DEVICE); 333 if (unlikely(dma_mapping_error(sc->dev, 334 bf->bf_buf_addr))) { 335 dev_kfree_skb_any(skb); 336 bf->bf_mpdu = NULL; 337 DPRINTF(sc, ATH_DBG_FATAL, 338 "dma_mapping_error() on RX init\n"); 339 error = -ENOMEM; 340 goto err; 341 } 342 bf->bf_dmacontext = bf->bf_buf_addr; 343 } 344 sc->rx.rxlink = NULL; 345 346 err: 347 if (error) 348 ath_rx_cleanup(sc); 349 350 return error; 351 } 352 353 void ath_rx_cleanup(struct ath_softc *sc) 354 { 355 struct sk_buff *skb; 356 struct ath_buf *bf; 357 358 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 359 skb = bf->bf_mpdu; 360 if (skb) { 361 dma_unmap_single(sc->dev, bf->bf_buf_addr, 362 sc->rx.bufsize, DMA_FROM_DEVICE); 363 dev_kfree_skb(skb); 364 } 365 } 366 367 if (sc->rx.rxdma.dd_desc_len != 0) 368 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 369 } 370 371 /* 372 * Calculate the receive filter according to the 373 * operating mode and state: 374 * 375 * o always accept unicast, broadcast, and multicast traffic 376 * o maintain current state of phy error reception (the hal 377 * may enable phy error frames for noise immunity work) 378 * o probe request frames are accepted only when operating in 379 * hostap, adhoc, or monitor modes 380 * o enable promiscuous mode according to the interface state 381 * o accept beacons: 382 * - when operating in adhoc mode so the 802.11 layer creates 383 * node table entries for peers, 384 * - when operating in station mode for collecting rssi data when 385 * the station is otherwise quiet, or 386 * - when operating as a repeater so we see repeater-sta beacons 387 * - when scanning 388 */ 389 390 u32 ath_calcrxfilter(struct ath_softc *sc) 391 { 392 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 393 394 u32 rfilt; 395 396 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 397 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 398 | ATH9K_RX_FILTER_MCAST; 399 400 /* If not a STA, enable processing of Probe Requests */ 401 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 402 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 403 404 /* 405 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 406 * mode interface or when in monitor mode. AP mode does not need this 407 * since it receives all in-BSS frames anyway. 408 */ 409 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 410 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 411 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 412 rfilt |= ATH9K_RX_FILTER_PROM; 413 414 if (sc->rx.rxfilter & FIF_CONTROL) 415 rfilt |= ATH9K_RX_FILTER_CONTROL; 416 417 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 418 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 419 rfilt |= ATH9K_RX_FILTER_MYBEACON; 420 else 421 rfilt |= ATH9K_RX_FILTER_BEACON; 422 423 if (sc->rx.rxfilter & FIF_PSPOLL) 424 rfilt |= ATH9K_RX_FILTER_PSPOLL; 425 426 if (conf_is_ht(&sc->hw->conf)) 427 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 428 429 if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 430 /* TODO: only needed if more than one BSSID is in use in 431 * station/adhoc mode */ 432 /* The following may also be needed for other older chips */ 433 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 434 rfilt |= ATH9K_RX_FILTER_PROM; 435 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 436 } 437 438 return rfilt; 439 440 #undef RX_FILTER_PRESERVE 441 } 442 443 int ath_startrecv(struct ath_softc *sc) 444 { 445 struct ath_hw *ah = sc->sc_ah; 446 struct ath_buf *bf, *tbf; 447 448 spin_lock_bh(&sc->rx.rxbuflock); 449 if (list_empty(&sc->rx.rxbuf)) 450 goto start_recv; 451 452 sc->rx.rxlink = NULL; 453 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 454 ath_rx_buf_link(sc, bf); 455 } 456 457 /* We could have deleted elements so the list may be empty now */ 458 if (list_empty(&sc->rx.rxbuf)) 459 goto start_recv; 460 461 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 462 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 463 ath9k_hw_rxena(ah); 464 465 start_recv: 466 spin_unlock_bh(&sc->rx.rxbuflock); 467 ath_opmode_init(sc); 468 ath9k_hw_startpcureceive(ah); 469 470 return 0; 471 } 472 473 bool ath_stoprecv(struct ath_softc *sc) 474 { 475 struct ath_hw *ah = sc->sc_ah; 476 bool stopped; 477 478 ath9k_hw_stoppcurecv(ah); 479 ath9k_hw_setrxfilter(ah, 0); 480 stopped = ath9k_hw_stopdmarecv(ah); 481 sc->rx.rxlink = NULL; 482 483 return stopped; 484 } 485 486 void ath_flushrecv(struct ath_softc *sc) 487 { 488 spin_lock_bh(&sc->rx.rxflushlock); 489 sc->sc_flags |= SC_OP_RXFLUSH; 490 ath_rx_tasklet(sc, 1); 491 sc->sc_flags &= ~SC_OP_RXFLUSH; 492 spin_unlock_bh(&sc->rx.rxflushlock); 493 } 494 495 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 496 { 497 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 498 struct ieee80211_mgmt *mgmt; 499 u8 *pos, *end, id, elen; 500 struct ieee80211_tim_ie *tim; 501 502 mgmt = (struct ieee80211_mgmt *)skb->data; 503 pos = mgmt->u.beacon.variable; 504 end = skb->data + skb->len; 505 506 while (pos + 2 < end) { 507 id = *pos++; 508 elen = *pos++; 509 if (pos + elen > end) 510 break; 511 512 if (id == WLAN_EID_TIM) { 513 if (elen < sizeof(*tim)) 514 break; 515 tim = (struct ieee80211_tim_ie *) pos; 516 if (tim->dtim_count != 0) 517 break; 518 return tim->bitmap_ctrl & 0x01; 519 } 520 521 pos += elen; 522 } 523 524 return false; 525 } 526 527 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 528 { 529 struct ieee80211_mgmt *mgmt; 530 531 if (skb->len < 24 + 8 + 2 + 2) 532 return; 533 534 mgmt = (struct ieee80211_mgmt *)skb->data; 535 if (memcmp(sc->curbssid, mgmt->bssid, ETH_ALEN) != 0) 536 return; /* not from our current AP */ 537 538 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON; 539 540 if (sc->sc_flags & SC_OP_BEACON_SYNC) { 541 sc->sc_flags &= ~SC_OP_BEACON_SYNC; 542 DPRINTF(sc, ATH_DBG_PS, "Reconfigure Beacon timers based on " 543 "timestamp from the AP\n"); 544 ath_beacon_config(sc, NULL); 545 } 546 547 if (ath_beacon_dtim_pending_cab(skb)) { 548 /* 549 * Remain awake waiting for buffered broadcast/multicast 550 * frames. If the last broadcast/multicast frame is not 551 * received properly, the next beacon frame will work as 552 * a backup trigger for returning into NETWORK SLEEP state, 553 * so we are waiting for it as well. 554 */ 555 DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " 556 "buffered broadcast/multicast frame(s)\n"); 557 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; 558 return; 559 } 560 561 if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) { 562 /* 563 * This can happen if a broadcast frame is dropped or the AP 564 * fails to send a frame indicating that all CAB frames have 565 * been delivered. 566 */ 567 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 568 DPRINTF(sc, ATH_DBG_PS, "PS wait for CAB frames timed out\n"); 569 } 570 } 571 572 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 573 { 574 struct ieee80211_hdr *hdr; 575 576 hdr = (struct ieee80211_hdr *)skb->data; 577 578 /* Process Beacon and CAB receive in PS state */ 579 if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) && 580 ieee80211_is_beacon(hdr->frame_control)) 581 ath_rx_ps_beacon(sc, skb); 582 else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) && 583 (ieee80211_is_data(hdr->frame_control) || 584 ieee80211_is_action(hdr->frame_control)) && 585 is_multicast_ether_addr(hdr->addr1) && 586 !ieee80211_has_moredata(hdr->frame_control)) { 587 /* 588 * No more broadcast/multicast frames to be received at this 589 * point. 590 */ 591 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 592 DPRINTF(sc, ATH_DBG_PS, "All PS CAB frames received, back to " 593 "sleep\n"); 594 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) && 595 !is_multicast_ether_addr(hdr->addr1) && 596 !ieee80211_has_morefrags(hdr->frame_control)) { 597 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA; 598 DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having " 599 "received PS-Poll data (0x%x)\n", 600 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 601 SC_OP_WAIT_FOR_CAB | 602 SC_OP_WAIT_FOR_PSPOLL_DATA | 603 SC_OP_WAIT_FOR_TX_ACK)); 604 } 605 } 606 607 static void ath_rx_send_to_mac80211(struct ath_softc *sc, struct sk_buff *skb, 608 struct ieee80211_rx_status *rx_status) 609 { 610 struct ieee80211_hdr *hdr; 611 612 hdr = (struct ieee80211_hdr *)skb->data; 613 614 /* Send the frame to mac80211 */ 615 if (is_multicast_ether_addr(hdr->addr1)) { 616 int i; 617 /* 618 * Deliver broadcast/multicast frames to all suitable 619 * virtual wiphys. 620 */ 621 /* TODO: filter based on channel configuration */ 622 for (i = 0; i < sc->num_sec_wiphy; i++) { 623 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 624 struct sk_buff *nskb; 625 if (aphy == NULL) 626 continue; 627 nskb = skb_copy(skb, GFP_ATOMIC); 628 if (nskb) { 629 memcpy(IEEE80211_SKB_RXCB(nskb), rx_status, 630 sizeof(*rx_status)); 631 ieee80211_rx(aphy->hw, nskb); 632 } 633 } 634 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); 635 ieee80211_rx(sc->hw, skb); 636 } else { 637 /* Deliver unicast frames based on receiver address */ 638 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); 639 ieee80211_rx(ath_get_virt_hw(sc, hdr), skb); 640 } 641 } 642 643 int ath_rx_tasklet(struct ath_softc *sc, int flush) 644 { 645 #define PA2DESC(_sc, _pa) \ 646 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ 647 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) 648 649 struct ath_buf *bf; 650 struct ath_desc *ds; 651 struct sk_buff *skb = NULL, *requeue_skb; 652 struct ieee80211_rx_status rx_status; 653 struct ath_hw *ah = sc->sc_ah; 654 struct ieee80211_hdr *hdr; 655 int hdrlen, padsize, retval; 656 bool decrypt_error = false; 657 u8 keyix; 658 __le16 fc; 659 660 spin_lock_bh(&sc->rx.rxbuflock); 661 662 do { 663 /* If handling rx interrupt and flush is in progress => exit */ 664 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 665 break; 666 667 if (list_empty(&sc->rx.rxbuf)) { 668 sc->rx.rxlink = NULL; 669 break; 670 } 671 672 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 673 ds = bf->bf_desc; 674 675 /* 676 * Must provide the virtual address of the current 677 * descriptor, the physical address, and the virtual 678 * address of the next descriptor in the h/w chain. 679 * This allows the HAL to look ahead to see if the 680 * hardware is done with a descriptor by checking the 681 * done bit in the following descriptor and the address 682 * of the current descriptor the DMA engine is working 683 * on. All this is necessary because of our use of 684 * a self-linked list to avoid rx overruns. 685 */ 686 retval = ath9k_hw_rxprocdesc(ah, ds, 687 bf->bf_daddr, 688 PA2DESC(sc, ds->ds_link), 689 0); 690 if (retval == -EINPROGRESS) { 691 struct ath_buf *tbf; 692 struct ath_desc *tds; 693 694 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 695 sc->rx.rxlink = NULL; 696 break; 697 } 698 699 tbf = list_entry(bf->list.next, struct ath_buf, list); 700 701 /* 702 * On some hardware the descriptor status words could 703 * get corrupted, including the done bit. Because of 704 * this, check if the next descriptor's done bit is 705 * set or not. 706 * 707 * If the next descriptor's done bit is set, the current 708 * descriptor has been corrupted. Force s/w to discard 709 * this descriptor and continue... 710 */ 711 712 tds = tbf->bf_desc; 713 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, 714 PA2DESC(sc, tds->ds_link), 0); 715 if (retval == -EINPROGRESS) { 716 break; 717 } 718 } 719 720 skb = bf->bf_mpdu; 721 if (!skb) 722 continue; 723 724 /* 725 * Synchronize the DMA transfer with CPU before 726 * 1. accessing the frame 727 * 2. requeueing the same buffer to h/w 728 */ 729 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 730 sc->rx.bufsize, 731 DMA_FROM_DEVICE); 732 733 /* 734 * If we're asked to flush receive queue, directly 735 * chain it back at the queue without processing it. 736 */ 737 if (flush) 738 goto requeue; 739 740 if (!ds->ds_rxstat.rs_datalen) 741 goto requeue; 742 743 /* The status portion of the descriptor could get corrupted. */ 744 if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen) 745 goto requeue; 746 747 if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) 748 goto requeue; 749 750 /* Ensure we always have an skb to requeue once we are done 751 * processing the current buffer's skb */ 752 requeue_skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_ATOMIC); 753 754 /* If there is no memory we ignore the current RX'd frame, 755 * tell hardware it can give us a new frame using the old 756 * skb and put it at the tail of the sc->rx.rxbuf list for 757 * processing. */ 758 if (!requeue_skb) 759 goto requeue; 760 761 /* Unmap the frame */ 762 dma_unmap_single(sc->dev, bf->bf_buf_addr, 763 sc->rx.bufsize, 764 DMA_FROM_DEVICE); 765 766 skb_put(skb, ds->ds_rxstat.rs_datalen); 767 768 /* see if any padding is done by the hw and remove it */ 769 hdr = (struct ieee80211_hdr *)skb->data; 770 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 771 fc = hdr->frame_control; 772 773 /* The MAC header is padded to have 32-bit boundary if the 774 * packet payload is non-zero. The general calculation for 775 * padsize would take into account odd header lengths: 776 * padsize = (4 - hdrlen % 4) % 4; However, since only 777 * even-length headers are used, padding can only be 0 or 2 778 * bytes and we can optimize this a bit. In addition, we must 779 * not try to remove padding from short control frames that do 780 * not have payload. */ 781 padsize = hdrlen & 3; 782 if (padsize && hdrlen >= 24) { 783 memmove(skb->data + padsize, skb->data, hdrlen); 784 skb_pull(skb, padsize); 785 } 786 787 keyix = ds->ds_rxstat.rs_keyix; 788 789 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { 790 rx_status.flag |= RX_FLAG_DECRYPTED; 791 } else if (ieee80211_has_protected(fc) 792 && !decrypt_error && skb->len >= hdrlen + 4) { 793 keyix = skb->data[hdrlen + 3] >> 6; 794 795 if (test_bit(keyix, sc->keymap)) 796 rx_status.flag |= RX_FLAG_DECRYPTED; 797 } 798 if (ah->sw_mgmt_crypto && 799 (rx_status.flag & RX_FLAG_DECRYPTED) && 800 ieee80211_is_mgmt(fc)) { 801 /* Use software decrypt for management frames. */ 802 rx_status.flag &= ~RX_FLAG_DECRYPTED; 803 } 804 805 /* We will now give hardware our shiny new allocated skb */ 806 bf->bf_mpdu = requeue_skb; 807 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 808 sc->rx.bufsize, 809 DMA_FROM_DEVICE); 810 if (unlikely(dma_mapping_error(sc->dev, 811 bf->bf_buf_addr))) { 812 dev_kfree_skb_any(requeue_skb); 813 bf->bf_mpdu = NULL; 814 DPRINTF(sc, ATH_DBG_FATAL, 815 "dma_mapping_error() on RX\n"); 816 ath_rx_send_to_mac80211(sc, skb, &rx_status); 817 break; 818 } 819 bf->bf_dmacontext = bf->bf_buf_addr; 820 821 /* 822 * change the default rx antenna if rx diversity chooses the 823 * other antenna 3 times in a row. 824 */ 825 if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { 826 if (++sc->rx.rxotherant >= 3) 827 ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); 828 } else { 829 sc->rx.rxotherant = 0; 830 } 831 832 if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 833 SC_OP_WAIT_FOR_CAB | 834 SC_OP_WAIT_FOR_PSPOLL_DATA))) 835 ath_rx_ps(sc, skb); 836 837 ath_rx_send_to_mac80211(sc, skb, &rx_status); 838 839 requeue: 840 list_move_tail(&bf->list, &sc->rx.rxbuf); 841 ath_rx_buf_link(sc, bf); 842 } while (1); 843 844 spin_unlock_bh(&sc->rx.rxbuflock); 845 846 return 0; 847 #undef PA2DESC 848 } 849