1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "ath9k.h" 18 #include "ar9003_mac.h" 19 20 #define BITS_PER_BYTE 8 21 #define OFDM_PLCP_BITS 22 22 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 23 #define L_STF 8 24 #define L_LTF 8 25 #define L_SIG 4 26 #define HT_SIG 8 27 #define HT_STF 4 28 #define HT_LTF(_ns) (4 * (_ns)) 29 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 30 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 31 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 32 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 33 34 35 static u16 bits_per_symbol[][2] = { 36 /* 20MHz 40MHz */ 37 { 26, 54 }, /* 0: BPSK */ 38 { 52, 108 }, /* 1: QPSK 1/2 */ 39 { 78, 162 }, /* 2: QPSK 3/4 */ 40 { 104, 216 }, /* 3: 16-QAM 1/2 */ 41 { 156, 324 }, /* 4: 16-QAM 3/4 */ 42 { 208, 432 }, /* 5: 64-QAM 2/3 */ 43 { 234, 486 }, /* 6: 64-QAM 3/4 */ 44 { 260, 540 }, /* 7: 64-QAM 5/6 */ 45 }; 46 47 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 48 49 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 50 struct ath_atx_tid *tid, 51 struct list_head *bf_head); 52 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 53 struct ath_txq *txq, struct list_head *bf_q, 54 struct ath_tx_status *ts, int txok, int sendbar); 55 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 56 struct list_head *head); 57 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len); 58 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 59 struct ath_tx_status *ts, int nframes, int nbad, 60 int txok, bool update_rc); 61 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 62 int seqno); 63 64 enum { 65 MCS_HT20, 66 MCS_HT20_SGI, 67 MCS_HT40, 68 MCS_HT40_SGI, 69 }; 70 71 static int ath_max_4ms_framelen[4][32] = { 72 [MCS_HT20] = { 73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172, 74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280, 75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532, 76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532, 77 }, 78 [MCS_HT20_SGI] = { 79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744, 80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532, 81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532, 82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532, 83 }, 84 [MCS_HT40] = { 85 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532, 86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532, 87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532, 88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532, 89 }, 90 [MCS_HT40_SGI] = { 91 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532, 92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532, 93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532, 94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532, 95 } 96 }; 97 98 /*********************/ 99 /* Aggregation logic */ 100 /*********************/ 101 102 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 103 { 104 struct ath_atx_ac *ac = tid->ac; 105 106 if (tid->paused) 107 return; 108 109 if (tid->sched) 110 return; 111 112 tid->sched = true; 113 list_add_tail(&tid->list, &ac->tid_q); 114 115 if (ac->sched) 116 return; 117 118 ac->sched = true; 119 list_add_tail(&ac->list, &txq->axq_acq); 120 } 121 122 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 123 { 124 struct ath_txq *txq = tid->ac->txq; 125 126 WARN_ON(!tid->paused); 127 128 spin_lock_bh(&txq->axq_lock); 129 tid->paused = false; 130 131 if (list_empty(&tid->buf_q)) 132 goto unlock; 133 134 ath_tx_queue_tid(txq, tid); 135 ath_txq_schedule(sc, txq); 136 unlock: 137 spin_unlock_bh(&txq->axq_lock); 138 } 139 140 static struct ath_frame_info *get_frame_info(struct sk_buff *skb) 141 { 142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 143 BUILD_BUG_ON(sizeof(struct ath_frame_info) > 144 sizeof(tx_info->rate_driver_data)); 145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; 146 } 147 148 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 149 { 150 struct ath_txq *txq = tid->ac->txq; 151 struct ath_buf *bf; 152 struct list_head bf_head; 153 struct ath_tx_status ts; 154 struct ath_frame_info *fi; 155 156 INIT_LIST_HEAD(&bf_head); 157 158 memset(&ts, 0, sizeof(ts)); 159 spin_lock_bh(&txq->axq_lock); 160 161 while (!list_empty(&tid->buf_q)) { 162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 163 list_move_tail(&bf->list, &bf_head); 164 165 spin_unlock_bh(&txq->axq_lock); 166 fi = get_frame_info(bf->bf_mpdu); 167 if (fi->retries) { 168 ath_tx_update_baw(sc, tid, fi->seqno); 169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); 170 } else { 171 ath_tx_send_normal(sc, txq, NULL, &bf_head); 172 } 173 spin_lock_bh(&txq->axq_lock); 174 } 175 176 spin_unlock_bh(&txq->axq_lock); 177 } 178 179 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 180 int seqno) 181 { 182 int index, cindex; 183 184 index = ATH_BA_INDEX(tid->seq_start, seqno); 185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 186 187 __clear_bit(cindex, tid->tx_buf); 188 189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { 190 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 191 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 192 } 193 } 194 195 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 196 u16 seqno) 197 { 198 int index, cindex; 199 200 index = ATH_BA_INDEX(tid->seq_start, seqno); 201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 202 __set_bit(cindex, tid->tx_buf); 203 204 if (index >= ((tid->baw_tail - tid->baw_head) & 205 (ATH_TID_MAX_BUFS - 1))) { 206 tid->baw_tail = cindex; 207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 208 } 209 } 210 211 /* 212 * TODO: For frame(s) that are in the retry state, we will reuse the 213 * sequence number(s) without setting the retry bit. The 214 * alternative is to give up on these and BAR the receiver's window 215 * forward. 216 */ 217 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 218 struct ath_atx_tid *tid) 219 220 { 221 struct ath_buf *bf; 222 struct list_head bf_head; 223 struct ath_tx_status ts; 224 struct ath_frame_info *fi; 225 226 memset(&ts, 0, sizeof(ts)); 227 INIT_LIST_HEAD(&bf_head); 228 229 for (;;) { 230 if (list_empty(&tid->buf_q)) 231 break; 232 233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 234 list_move_tail(&bf->list, &bf_head); 235 236 fi = get_frame_info(bf->bf_mpdu); 237 if (fi->retries) 238 ath_tx_update_baw(sc, tid, fi->seqno); 239 240 spin_unlock(&txq->axq_lock); 241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 242 spin_lock(&txq->axq_lock); 243 } 244 245 tid->seq_next = tid->seq_start; 246 tid->baw_tail = tid->baw_head; 247 } 248 249 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 250 struct sk_buff *skb) 251 { 252 struct ath_frame_info *fi = get_frame_info(skb); 253 struct ieee80211_hdr *hdr; 254 255 TX_STAT_INC(txq->axq_qnum, a_retries); 256 if (fi->retries++ > 0) 257 return; 258 259 hdr = (struct ieee80211_hdr *)skb->data; 260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 261 } 262 263 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 264 { 265 struct ath_buf *bf = NULL; 266 267 spin_lock_bh(&sc->tx.txbuflock); 268 269 if (unlikely(list_empty(&sc->tx.txbuf))) { 270 spin_unlock_bh(&sc->tx.txbuflock); 271 return NULL; 272 } 273 274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 275 list_del(&bf->list); 276 277 spin_unlock_bh(&sc->tx.txbuflock); 278 279 return bf; 280 } 281 282 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 283 { 284 spin_lock_bh(&sc->tx.txbuflock); 285 list_add_tail(&bf->list, &sc->tx.txbuf); 286 spin_unlock_bh(&sc->tx.txbuflock); 287 } 288 289 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 290 { 291 struct ath_buf *tbf; 292 293 tbf = ath_tx_get_buffer(sc); 294 if (WARN_ON(!tbf)) 295 return NULL; 296 297 ATH_TXBUF_RESET(tbf); 298 299 tbf->bf_mpdu = bf->bf_mpdu; 300 tbf->bf_buf_addr = bf->bf_buf_addr; 301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 302 tbf->bf_state = bf->bf_state; 303 304 return tbf; 305 } 306 307 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, 308 struct ath_tx_status *ts, int txok, 309 int *nframes, int *nbad) 310 { 311 struct ath_frame_info *fi; 312 u16 seq_st = 0; 313 u32 ba[WME_BA_BMP_SIZE >> 5]; 314 int ba_index; 315 int isaggr = 0; 316 317 *nbad = 0; 318 *nframes = 0; 319 320 isaggr = bf_isaggr(bf); 321 if (isaggr) { 322 seq_st = ts->ts_seqnum; 323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 324 } 325 326 while (bf) { 327 fi = get_frame_info(bf->bf_mpdu); 328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno); 329 330 (*nframes)++; 331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 332 (*nbad)++; 333 334 bf = bf->bf_next; 335 } 336 } 337 338 339 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 340 struct ath_buf *bf, struct list_head *bf_q, 341 struct ath_tx_status *ts, int txok, bool retry) 342 { 343 struct ath_node *an = NULL; 344 struct sk_buff *skb; 345 struct ieee80211_sta *sta; 346 struct ieee80211_hw *hw = sc->hw; 347 struct ieee80211_hdr *hdr; 348 struct ieee80211_tx_info *tx_info; 349 struct ath_atx_tid *tid = NULL; 350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 351 struct list_head bf_head, bf_pending; 352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 353 u32 ba[WME_BA_BMP_SIZE >> 5]; 354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 355 bool rc_update = true; 356 struct ieee80211_tx_rate rates[4]; 357 struct ath_frame_info *fi; 358 int nframes; 359 u8 tidno; 360 bool clear_filter; 361 362 skb = bf->bf_mpdu; 363 hdr = (struct ieee80211_hdr *)skb->data; 364 365 tx_info = IEEE80211_SKB_CB(skb); 366 367 memcpy(rates, tx_info->control.rates, sizeof(rates)); 368 369 rcu_read_lock(); 370 371 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 372 if (!sta) { 373 rcu_read_unlock(); 374 375 INIT_LIST_HEAD(&bf_head); 376 while (bf) { 377 bf_next = bf->bf_next; 378 379 bf->bf_state.bf_type |= BUF_XRETRY; 380 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || 381 !bf->bf_stale || bf_next != NULL) 382 list_move_tail(&bf->list, &bf_head); 383 384 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false); 385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 386 0, 0); 387 388 bf = bf_next; 389 } 390 return; 391 } 392 393 an = (struct ath_node *)sta->drv_priv; 394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 395 tid = ATH_AN_2_TID(an, tidno); 396 397 /* 398 * The hardware occasionally sends a tx status for the wrong TID. 399 * In this case, the BA status cannot be considered valid and all 400 * subframes need to be retransmitted 401 */ 402 if (tidno != ts->tid) 403 txok = false; 404 405 isaggr = bf_isaggr(bf); 406 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 407 408 if (isaggr && txok) { 409 if (ts->ts_flags & ATH9K_TX_BA) { 410 seq_st = ts->ts_seqnum; 411 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 412 } else { 413 /* 414 * AR5416 can become deaf/mute when BA 415 * issue happens. Chip needs to be reset. 416 * But AP code may have sychronization issues 417 * when perform internal reset in this routine. 418 * Only enable reset in STA mode for now. 419 */ 420 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 421 needreset = 1; 422 } 423 } 424 425 INIT_LIST_HEAD(&bf_pending); 426 INIT_LIST_HEAD(&bf_head); 427 428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 429 while (bf) { 430 txfail = txpending = sendbar = 0; 431 bf_next = bf->bf_next; 432 433 skb = bf->bf_mpdu; 434 tx_info = IEEE80211_SKB_CB(skb); 435 fi = get_frame_info(skb); 436 437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) { 438 /* transmit completion, subframe is 439 * acked by block ack */ 440 acked_cnt++; 441 } else if (!isaggr && txok) { 442 /* transmit completion */ 443 acked_cnt++; 444 } else { 445 if ((tid->state & AGGR_CLEANUP) || !retry) { 446 /* 447 * cleanup in progress, just fail 448 * the un-acked sub-frames 449 */ 450 txfail = 1; 451 } else if (fi->retries < ATH_MAX_SW_RETRIES) { 452 if (!(ts->ts_status & ATH9K_TXERR_FILT) || 453 !an->sleeping) 454 ath_tx_set_retry(sc, txq, bf->bf_mpdu); 455 456 clear_filter = true; 457 txpending = 1; 458 } else { 459 bf->bf_state.bf_type |= BUF_XRETRY; 460 txfail = 1; 461 sendbar = 1; 462 txfail_cnt++; 463 } 464 } 465 466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 467 bf_next == NULL) { 468 /* 469 * Make sure the last desc is reclaimed if it 470 * not a holding desc. 471 */ 472 if (!bf_last->bf_stale) 473 list_move_tail(&bf->list, &bf_head); 474 else 475 INIT_LIST_HEAD(&bf_head); 476 } else { 477 BUG_ON(list_empty(bf_q)); 478 list_move_tail(&bf->list, &bf_head); 479 } 480 481 if (!txpending || (tid->state & AGGR_CLEANUP)) { 482 /* 483 * complete the acked-ones/xretried ones; update 484 * block-ack window 485 */ 486 spin_lock_bh(&txq->axq_lock); 487 ath_tx_update_baw(sc, tid, fi->seqno); 488 spin_unlock_bh(&txq->axq_lock); 489 490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 491 memcpy(tx_info->control.rates, rates, sizeof(rates)); 492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true); 493 rc_update = false; 494 } else { 495 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false); 496 } 497 498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 499 !txfail, sendbar); 500 } else { 501 /* retry the un-acked ones */ 502 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false); 503 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { 504 if (bf->bf_next == NULL && bf_last->bf_stale) { 505 struct ath_buf *tbf; 506 507 tbf = ath_clone_txbuf(sc, bf_last); 508 /* 509 * Update tx baw and complete the 510 * frame with failed status if we 511 * run out of tx buf. 512 */ 513 if (!tbf) { 514 spin_lock_bh(&txq->axq_lock); 515 ath_tx_update_baw(sc, tid, fi->seqno); 516 spin_unlock_bh(&txq->axq_lock); 517 518 bf->bf_state.bf_type |= 519 BUF_XRETRY; 520 ath_tx_rc_status(sc, bf, ts, nframes, 521 nbad, 0, false); 522 ath_tx_complete_buf(sc, bf, txq, 523 &bf_head, 524 ts, 0, 0); 525 break; 526 } 527 528 ath9k_hw_cleartxdesc(sc->sc_ah, 529 tbf->bf_desc); 530 list_add_tail(&tbf->list, &bf_head); 531 } else { 532 /* 533 * Clear descriptor status words for 534 * software retry 535 */ 536 ath9k_hw_cleartxdesc(sc->sc_ah, 537 bf->bf_desc); 538 } 539 } 540 541 /* 542 * Put this buffer to the temporary pending 543 * queue to retain ordering 544 */ 545 list_splice_tail_init(&bf_head, &bf_pending); 546 } 547 548 bf = bf_next; 549 } 550 551 /* prepend un-acked frames to the beginning of the pending frame queue */ 552 if (!list_empty(&bf_pending)) { 553 if (an->sleeping) 554 ieee80211_sta_set_tim(sta); 555 556 spin_lock_bh(&txq->axq_lock); 557 if (clear_filter) 558 tid->ac->clear_ps_filter = true; 559 list_splice(&bf_pending, &tid->buf_q); 560 ath_tx_queue_tid(txq, tid); 561 spin_unlock_bh(&txq->axq_lock); 562 } 563 564 if (tid->state & AGGR_CLEANUP) { 565 ath_tx_flush_tid(sc, tid); 566 567 if (tid->baw_head == tid->baw_tail) { 568 tid->state &= ~AGGR_ADDBA_COMPLETE; 569 tid->state &= ~AGGR_CLEANUP; 570 } 571 } 572 573 rcu_read_unlock(); 574 575 if (needreset) { 576 spin_unlock_bh(&sc->sc_pcu_lock); 577 ath_reset(sc, false); 578 spin_lock_bh(&sc->sc_pcu_lock); 579 } 580 } 581 582 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 583 struct ath_atx_tid *tid) 584 { 585 struct sk_buff *skb; 586 struct ieee80211_tx_info *tx_info; 587 struct ieee80211_tx_rate *rates; 588 u32 max_4ms_framelen, frmlen; 589 u16 aggr_limit, legacy = 0; 590 int i; 591 592 skb = bf->bf_mpdu; 593 tx_info = IEEE80211_SKB_CB(skb); 594 rates = tx_info->control.rates; 595 596 /* 597 * Find the lowest frame length among the rate series that will have a 598 * 4ms transmit duration. 599 * TODO - TXOP limit needs to be considered. 600 */ 601 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 602 603 for (i = 0; i < 4; i++) { 604 if (rates[i].count) { 605 int modeidx; 606 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 607 legacy = 1; 608 break; 609 } 610 611 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 612 modeidx = MCS_HT40; 613 else 614 modeidx = MCS_HT20; 615 616 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 617 modeidx++; 618 619 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 620 max_4ms_framelen = min(max_4ms_framelen, frmlen); 621 } 622 } 623 624 /* 625 * limit aggregate size by the minimum rate if rate selected is 626 * not a probe rate, if rate selected is a probe rate then 627 * avoid aggregation of this packet. 628 */ 629 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 630 return 0; 631 632 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) 633 aggr_limit = min((max_4ms_framelen * 3) / 8, 634 (u32)ATH_AMPDU_LIMIT_MAX); 635 else 636 aggr_limit = min(max_4ms_framelen, 637 (u32)ATH_AMPDU_LIMIT_MAX); 638 639 /* 640 * h/w can accept aggregates up to 16 bit lengths (65535). 641 * The IE, however can hold up to 65536, which shows up here 642 * as zero. Ignore 65536 since we are constrained by hw. 643 */ 644 if (tid->an->maxampdu) 645 aggr_limit = min(aggr_limit, tid->an->maxampdu); 646 647 return aggr_limit; 648 } 649 650 /* 651 * Returns the number of delimiters to be added to 652 * meet the minimum required mpdudensity. 653 */ 654 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 655 struct ath_buf *bf, u16 frmlen) 656 { 657 struct sk_buff *skb = bf->bf_mpdu; 658 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 659 u32 nsymbits, nsymbols; 660 u16 minlen; 661 u8 flags, rix; 662 int width, streams, half_gi, ndelim, mindelim; 663 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 664 665 /* Select standard number of delimiters based on frame length alone */ 666 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 667 668 /* 669 * If encryption enabled, hardware requires some more padding between 670 * subframes. 671 * TODO - this could be improved to be dependent on the rate. 672 * The hardware can keep up at lower rates, but not higher rates 673 */ 674 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) && 675 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) 676 ndelim += ATH_AGGR_ENCRYPTDELIM; 677 678 /* 679 * Convert desired mpdu density from microeconds to bytes based 680 * on highest rate in rate series (i.e. first rate) to determine 681 * required minimum length for subframe. Take into account 682 * whether high rate is 20 or 40Mhz and half or full GI. 683 * 684 * If there is no mpdu density restriction, no further calculation 685 * is needed. 686 */ 687 688 if (tid->an->mpdudensity == 0) 689 return ndelim; 690 691 rix = tx_info->control.rates[0].idx; 692 flags = tx_info->control.rates[0].flags; 693 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 694 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 695 696 if (half_gi) 697 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 698 else 699 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 700 701 if (nsymbols == 0) 702 nsymbols = 1; 703 704 streams = HT_RC_2_STREAMS(rix); 705 nsymbits = bits_per_symbol[rix % 8][width] * streams; 706 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 707 708 if (frmlen < minlen) { 709 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 710 ndelim = max(mindelim, ndelim); 711 } 712 713 return ndelim; 714 } 715 716 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 717 struct ath_txq *txq, 718 struct ath_atx_tid *tid, 719 struct list_head *bf_q, 720 int *aggr_len) 721 { 722 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 723 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 724 int rl = 0, nframes = 0, ndelim, prev_al = 0; 725 u16 aggr_limit = 0, al = 0, bpad = 0, 726 al_delta, h_baw = tid->baw_size / 2; 727 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 728 struct ieee80211_tx_info *tx_info; 729 struct ath_frame_info *fi; 730 731 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 732 733 do { 734 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 735 fi = get_frame_info(bf->bf_mpdu); 736 737 /* do not step over block-ack window */ 738 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) { 739 status = ATH_AGGR_BAW_CLOSED; 740 break; 741 } 742 743 if (!rl) { 744 aggr_limit = ath_lookup_rate(sc, bf, tid); 745 rl = 1; 746 } 747 748 /* do not exceed aggregation limit */ 749 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; 750 751 if (nframes && 752 (aggr_limit < (al + bpad + al_delta + prev_al))) { 753 status = ATH_AGGR_LIMITED; 754 break; 755 } 756 757 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 758 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || 759 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))) 760 break; 761 762 /* do not exceed subframe limit */ 763 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 764 status = ATH_AGGR_LIMITED; 765 break; 766 } 767 nframes++; 768 769 /* add padding for previous frame to aggregation length */ 770 al += bpad + al_delta; 771 772 /* 773 * Get the delimiters needed to meet the MPDU 774 * density for this node. 775 */ 776 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen); 777 bpad = PADBYTES(al_delta) + (ndelim << 2); 778 779 bf->bf_next = NULL; 780 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0); 781 782 /* link buffers of this frame to the aggregate */ 783 if (!fi->retries) 784 ath_tx_addto_baw(sc, tid, fi->seqno); 785 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 786 list_move_tail(&bf->list, bf_q); 787 if (bf_prev) { 788 bf_prev->bf_next = bf; 789 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc, 790 bf->bf_daddr); 791 } 792 bf_prev = bf; 793 794 } while (!list_empty(&tid->buf_q)); 795 796 *aggr_len = al; 797 798 return status; 799 #undef PADBYTES 800 } 801 802 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 803 struct ath_atx_tid *tid) 804 { 805 struct ath_buf *bf; 806 enum ATH_AGGR_STATUS status; 807 struct ath_frame_info *fi; 808 struct list_head bf_q; 809 int aggr_len; 810 811 do { 812 if (list_empty(&tid->buf_q)) 813 return; 814 815 INIT_LIST_HEAD(&bf_q); 816 817 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); 818 819 /* 820 * no frames picked up to be aggregated; 821 * block-ack window is not open. 822 */ 823 if (list_empty(&bf_q)) 824 break; 825 826 bf = list_first_entry(&bf_q, struct ath_buf, list); 827 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 828 829 if (tid->ac->clear_ps_filter) { 830 tid->ac->clear_ps_filter = false; 831 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true); 832 } 833 834 /* if only one frame, send as non-aggregate */ 835 if (bf == bf->bf_lastbf) { 836 fi = get_frame_info(bf->bf_mpdu); 837 838 bf->bf_state.bf_type &= ~BUF_AGGR; 839 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); 840 ath_buf_set_rate(sc, bf, fi->framelen); 841 ath_tx_txqaddbuf(sc, txq, &bf_q); 842 continue; 843 } 844 845 /* setup first desc of aggregate */ 846 bf->bf_state.bf_type |= BUF_AGGR; 847 ath_buf_set_rate(sc, bf, aggr_len); 848 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len); 849 850 /* anchor last desc of aggregate */ 851 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 852 853 ath_tx_txqaddbuf(sc, txq, &bf_q); 854 TX_STAT_INC(txq->axq_qnum, a_aggr); 855 856 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && 857 status != ATH_AGGR_BAW_CLOSED); 858 } 859 860 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 861 u16 tid, u16 *ssn) 862 { 863 struct ath_atx_tid *txtid; 864 struct ath_node *an; 865 866 an = (struct ath_node *)sta->drv_priv; 867 txtid = ATH_AN_2_TID(an, tid); 868 869 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 870 return -EAGAIN; 871 872 txtid->state |= AGGR_ADDBA_PROGRESS; 873 txtid->paused = true; 874 *ssn = txtid->seq_start = txtid->seq_next; 875 876 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 877 txtid->baw_head = txtid->baw_tail = 0; 878 879 return 0; 880 } 881 882 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 883 { 884 struct ath_node *an = (struct ath_node *)sta->drv_priv; 885 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 886 struct ath_txq *txq = txtid->ac->txq; 887 888 if (txtid->state & AGGR_CLEANUP) 889 return; 890 891 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 892 txtid->state &= ~AGGR_ADDBA_PROGRESS; 893 return; 894 } 895 896 spin_lock_bh(&txq->axq_lock); 897 txtid->paused = true; 898 899 /* 900 * If frames are still being transmitted for this TID, they will be 901 * cleaned up during tx completion. To prevent race conditions, this 902 * TID can only be reused after all in-progress subframes have been 903 * completed. 904 */ 905 if (txtid->baw_head != txtid->baw_tail) 906 txtid->state |= AGGR_CLEANUP; 907 else 908 txtid->state &= ~AGGR_ADDBA_COMPLETE; 909 spin_unlock_bh(&txq->axq_lock); 910 911 ath_tx_flush_tid(sc, txtid); 912 } 913 914 bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an) 915 { 916 struct ath_atx_tid *tid; 917 struct ath_atx_ac *ac; 918 struct ath_txq *txq; 919 bool buffered = false; 920 int tidno; 921 922 for (tidno = 0, tid = &an->tid[tidno]; 923 tidno < WME_NUM_TID; tidno++, tid++) { 924 925 if (!tid->sched) 926 continue; 927 928 ac = tid->ac; 929 txq = ac->txq; 930 931 spin_lock_bh(&txq->axq_lock); 932 933 if (!list_empty(&tid->buf_q)) 934 buffered = true; 935 936 tid->sched = false; 937 list_del(&tid->list); 938 939 if (ac->sched) { 940 ac->sched = false; 941 list_del(&ac->list); 942 } 943 944 spin_unlock_bh(&txq->axq_lock); 945 } 946 947 return buffered; 948 } 949 950 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) 951 { 952 struct ath_atx_tid *tid; 953 struct ath_atx_ac *ac; 954 struct ath_txq *txq; 955 int tidno; 956 957 for (tidno = 0, tid = &an->tid[tidno]; 958 tidno < WME_NUM_TID; tidno++, tid++) { 959 960 ac = tid->ac; 961 txq = ac->txq; 962 963 spin_lock_bh(&txq->axq_lock); 964 ac->clear_ps_filter = true; 965 966 if (!list_empty(&tid->buf_q) && !tid->paused) { 967 ath_tx_queue_tid(txq, tid); 968 ath_txq_schedule(sc, txq); 969 } 970 971 spin_unlock_bh(&txq->axq_lock); 972 } 973 } 974 975 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 976 { 977 struct ath_atx_tid *txtid; 978 struct ath_node *an; 979 980 an = (struct ath_node *)sta->drv_priv; 981 982 if (sc->sc_flags & SC_OP_TXAGGR) { 983 txtid = ATH_AN_2_TID(an, tid); 984 txtid->baw_size = 985 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 986 txtid->state |= AGGR_ADDBA_COMPLETE; 987 txtid->state &= ~AGGR_ADDBA_PROGRESS; 988 ath_tx_resume_tid(sc, txtid); 989 } 990 } 991 992 /********************/ 993 /* Queue Management */ 994 /********************/ 995 996 static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 997 struct ath_txq *txq) 998 { 999 struct ath_atx_ac *ac, *ac_tmp; 1000 struct ath_atx_tid *tid, *tid_tmp; 1001 1002 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1003 list_del(&ac->list); 1004 ac->sched = false; 1005 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 1006 list_del(&tid->list); 1007 tid->sched = false; 1008 ath_tid_drain(sc, txq, tid); 1009 } 1010 } 1011 } 1012 1013 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 1014 { 1015 struct ath_hw *ah = sc->sc_ah; 1016 struct ath_common *common = ath9k_hw_common(ah); 1017 struct ath9k_tx_queue_info qi; 1018 static const int subtype_txq_to_hwq[] = { 1019 [WME_AC_BE] = ATH_TXQ_AC_BE, 1020 [WME_AC_BK] = ATH_TXQ_AC_BK, 1021 [WME_AC_VI] = ATH_TXQ_AC_VI, 1022 [WME_AC_VO] = ATH_TXQ_AC_VO, 1023 }; 1024 int axq_qnum, i; 1025 1026 memset(&qi, 0, sizeof(qi)); 1027 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; 1028 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 1029 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 1030 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 1031 qi.tqi_physCompBuf = 0; 1032 1033 /* 1034 * Enable interrupts only for EOL and DESC conditions. 1035 * We mark tx descriptors to receive a DESC interrupt 1036 * when a tx queue gets deep; otherwise waiting for the 1037 * EOL to reap descriptors. Note that this is done to 1038 * reduce interrupt load and this only defers reaping 1039 * descriptors, never transmitting frames. Aside from 1040 * reducing interrupts this also permits more concurrency. 1041 * The only potential downside is if the tx queue backs 1042 * up in which case the top half of the kernel may backup 1043 * due to a lack of tx descriptors. 1044 * 1045 * The UAPSD queue is an exception, since we take a desc- 1046 * based intr on the EOSP frames. 1047 */ 1048 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1049 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE | 1050 TXQ_FLAG_TXERRINT_ENABLE; 1051 } else { 1052 if (qtype == ATH9K_TX_QUEUE_UAPSD) 1053 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 1054 else 1055 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 1056 TXQ_FLAG_TXDESCINT_ENABLE; 1057 } 1058 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 1059 if (axq_qnum == -1) { 1060 /* 1061 * NB: don't print a message, this happens 1062 * normally on parts with too few tx queues 1063 */ 1064 return NULL; 1065 } 1066 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) { 1067 ath_err(common, "qnum %u out of range, max %zu!\n", 1068 axq_qnum, ARRAY_SIZE(sc->tx.txq)); 1069 ath9k_hw_releasetxqueue(ah, axq_qnum); 1070 return NULL; 1071 } 1072 if (!ATH_TXQ_SETUP(sc, axq_qnum)) { 1073 struct ath_txq *txq = &sc->tx.txq[axq_qnum]; 1074 1075 txq->axq_qnum = axq_qnum; 1076 txq->mac80211_qnum = -1; 1077 txq->axq_link = NULL; 1078 INIT_LIST_HEAD(&txq->axq_q); 1079 INIT_LIST_HEAD(&txq->axq_acq); 1080 spin_lock_init(&txq->axq_lock); 1081 txq->axq_depth = 0; 1082 txq->axq_ampdu_depth = 0; 1083 txq->axq_tx_inprogress = false; 1084 sc->tx.txqsetup |= 1<<axq_qnum; 1085 1086 txq->txq_headidx = txq->txq_tailidx = 0; 1087 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1088 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1089 INIT_LIST_HEAD(&txq->txq_fifo_pending); 1090 } 1091 return &sc->tx.txq[axq_qnum]; 1092 } 1093 1094 int ath_txq_update(struct ath_softc *sc, int qnum, 1095 struct ath9k_tx_queue_info *qinfo) 1096 { 1097 struct ath_hw *ah = sc->sc_ah; 1098 int error = 0; 1099 struct ath9k_tx_queue_info qi; 1100 1101 if (qnum == sc->beacon.beaconq) { 1102 /* 1103 * XXX: for beacon queue, we just save the parameter. 1104 * It will be picked up by ath_beaconq_config when 1105 * it's necessary. 1106 */ 1107 sc->beacon.beacon_qi = *qinfo; 1108 return 0; 1109 } 1110 1111 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 1112 1113 ath9k_hw_get_txq_props(ah, qnum, &qi); 1114 qi.tqi_aifs = qinfo->tqi_aifs; 1115 qi.tqi_cwmin = qinfo->tqi_cwmin; 1116 qi.tqi_cwmax = qinfo->tqi_cwmax; 1117 qi.tqi_burstTime = qinfo->tqi_burstTime; 1118 qi.tqi_readyTime = qinfo->tqi_readyTime; 1119 1120 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1121 ath_err(ath9k_hw_common(sc->sc_ah), 1122 "Unable to update hardware queue %u!\n", qnum); 1123 error = -EIO; 1124 } else { 1125 ath9k_hw_resettxqueue(ah, qnum); 1126 } 1127 1128 return error; 1129 } 1130 1131 int ath_cabq_update(struct ath_softc *sc) 1132 { 1133 struct ath9k_tx_queue_info qi; 1134 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 1135 int qnum = sc->beacon.cabq->axq_qnum; 1136 1137 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1138 /* 1139 * Ensure the readytime % is within the bounds. 1140 */ 1141 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1142 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1143 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1144 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1145 1146 qi.tqi_readyTime = (cur_conf->beacon_interval * 1147 sc->config.cabqReadytime) / 100; 1148 ath_txq_update(sc, qnum, &qi); 1149 1150 return 0; 1151 } 1152 1153 static bool bf_is_ampdu_not_probing(struct ath_buf *bf) 1154 { 1155 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu); 1156 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1157 } 1158 1159 /* 1160 * Drain a given TX queue (could be Beacon or Data) 1161 * 1162 * This assumes output has been stopped and 1163 * we do not need to block ath_tx_tasklet. 1164 */ 1165 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) 1166 { 1167 struct ath_buf *bf, *lastbf; 1168 struct list_head bf_head; 1169 struct ath_tx_status ts; 1170 1171 memset(&ts, 0, sizeof(ts)); 1172 INIT_LIST_HEAD(&bf_head); 1173 1174 for (;;) { 1175 spin_lock_bh(&txq->axq_lock); 1176 1177 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1178 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 1179 txq->txq_headidx = txq->txq_tailidx = 0; 1180 spin_unlock_bh(&txq->axq_lock); 1181 break; 1182 } else { 1183 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 1184 struct ath_buf, list); 1185 } 1186 } else { 1187 if (list_empty(&txq->axq_q)) { 1188 txq->axq_link = NULL; 1189 spin_unlock_bh(&txq->axq_lock); 1190 break; 1191 } 1192 bf = list_first_entry(&txq->axq_q, struct ath_buf, 1193 list); 1194 1195 if (bf->bf_stale) { 1196 list_del(&bf->list); 1197 spin_unlock_bh(&txq->axq_lock); 1198 1199 ath_tx_return_buffer(sc, bf); 1200 continue; 1201 } 1202 } 1203 1204 lastbf = bf->bf_lastbf; 1205 1206 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1207 list_cut_position(&bf_head, 1208 &txq->txq_fifo[txq->txq_tailidx], 1209 &lastbf->list); 1210 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 1211 } else { 1212 /* remove ath_buf's of the same mpdu from txq */ 1213 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); 1214 } 1215 1216 txq->axq_depth--; 1217 if (bf_is_ampdu_not_probing(bf)) 1218 txq->axq_ampdu_depth--; 1219 spin_unlock_bh(&txq->axq_lock); 1220 1221 if (bf_isampdu(bf)) 1222 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, 1223 retry_tx); 1224 else 1225 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1226 } 1227 1228 spin_lock_bh(&txq->axq_lock); 1229 txq->axq_tx_inprogress = false; 1230 spin_unlock_bh(&txq->axq_lock); 1231 1232 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1233 spin_lock_bh(&txq->axq_lock); 1234 while (!list_empty(&txq->txq_fifo_pending)) { 1235 bf = list_first_entry(&txq->txq_fifo_pending, 1236 struct ath_buf, list); 1237 list_cut_position(&bf_head, 1238 &txq->txq_fifo_pending, 1239 &bf->bf_lastbf->list); 1240 spin_unlock_bh(&txq->axq_lock); 1241 1242 if (bf_isampdu(bf)) 1243 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 1244 &ts, 0, retry_tx); 1245 else 1246 ath_tx_complete_buf(sc, bf, txq, &bf_head, 1247 &ts, 0, 0); 1248 spin_lock_bh(&txq->axq_lock); 1249 } 1250 spin_unlock_bh(&txq->axq_lock); 1251 } 1252 1253 /* flush any pending frames if aggregation is enabled */ 1254 if (sc->sc_flags & SC_OP_TXAGGR) { 1255 if (!retry_tx) { 1256 spin_lock_bh(&txq->axq_lock); 1257 ath_txq_drain_pending_buffers(sc, txq); 1258 spin_unlock_bh(&txq->axq_lock); 1259 } 1260 } 1261 } 1262 1263 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1264 { 1265 struct ath_hw *ah = sc->sc_ah; 1266 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1267 struct ath_txq *txq; 1268 int i, npend = 0; 1269 1270 if (sc->sc_flags & SC_OP_INVALID) 1271 return true; 1272 1273 ath9k_hw_abort_tx_dma(ah); 1274 1275 /* Check if any queue remains active */ 1276 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1277 if (!ATH_TXQ_SETUP(sc, i)) 1278 continue; 1279 1280 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum); 1281 } 1282 1283 if (npend) 1284 ath_err(common, "Failed to stop TX DMA!\n"); 1285 1286 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1287 if (!ATH_TXQ_SETUP(sc, i)) 1288 continue; 1289 1290 /* 1291 * The caller will resume queues with ieee80211_wake_queues. 1292 * Mark the queue as not stopped to prevent ath_tx_complete 1293 * from waking the queue too early. 1294 */ 1295 txq = &sc->tx.txq[i]; 1296 txq->stopped = false; 1297 ath_draintxq(sc, txq, retry_tx); 1298 } 1299 1300 return !npend; 1301 } 1302 1303 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1304 { 1305 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1306 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1307 } 1308 1309 /* For each axq_acq entry, for each tid, try to schedule packets 1310 * for transmit until ampdu_depth has reached min Q depth. 1311 */ 1312 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1313 { 1314 struct ath_atx_ac *ac, *ac_tmp, *last_ac; 1315 struct ath_atx_tid *tid, *last_tid; 1316 1317 if (list_empty(&txq->axq_acq) || 1318 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1319 return; 1320 1321 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1322 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1323 1324 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1325 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); 1326 list_del(&ac->list); 1327 ac->sched = false; 1328 1329 while (!list_empty(&ac->tid_q)) { 1330 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, 1331 list); 1332 list_del(&tid->list); 1333 tid->sched = false; 1334 1335 if (tid->paused) 1336 continue; 1337 1338 ath_tx_sched_aggr(sc, txq, tid); 1339 1340 /* 1341 * add tid to round-robin queue if more frames 1342 * are pending for the tid 1343 */ 1344 if (!list_empty(&tid->buf_q)) 1345 ath_tx_queue_tid(txq, tid); 1346 1347 if (tid == last_tid || 1348 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1349 break; 1350 } 1351 1352 if (!list_empty(&ac->tid_q)) { 1353 if (!ac->sched) { 1354 ac->sched = true; 1355 list_add_tail(&ac->list, &txq->axq_acq); 1356 } 1357 } 1358 1359 if (ac == last_ac || 1360 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1361 return; 1362 } 1363 } 1364 1365 /***********/ 1366 /* TX, DMA */ 1367 /***********/ 1368 1369 /* 1370 * Insert a chain of ath_buf (descriptors) on a txq and 1371 * assume the descriptors are already chained together by caller. 1372 */ 1373 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1374 struct list_head *head) 1375 { 1376 struct ath_hw *ah = sc->sc_ah; 1377 struct ath_common *common = ath9k_hw_common(ah); 1378 struct ath_buf *bf; 1379 1380 /* 1381 * Insert the frame on the outbound list and 1382 * pass it on to the hardware. 1383 */ 1384 1385 if (list_empty(head)) 1386 return; 1387 1388 bf = list_first_entry(head, struct ath_buf, list); 1389 1390 ath_dbg(common, ATH_DBG_QUEUE, 1391 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1392 1393 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1394 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) { 1395 list_splice_tail_init(head, &txq->txq_fifo_pending); 1396 return; 1397 } 1398 if (!list_empty(&txq->txq_fifo[txq->txq_headidx])) 1399 ath_dbg(common, ATH_DBG_XMIT, 1400 "Initializing tx fifo %d which is non-empty\n", 1401 txq->txq_headidx); 1402 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); 1403 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); 1404 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1405 TX_STAT_INC(txq->axq_qnum, puttxbuf); 1406 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1407 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", 1408 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1409 } else { 1410 list_splice_tail_init(head, &txq->axq_q); 1411 1412 if (txq->axq_link == NULL) { 1413 TX_STAT_INC(txq->axq_qnum, puttxbuf); 1414 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1415 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", 1416 txq->axq_qnum, ito64(bf->bf_daddr), 1417 bf->bf_desc); 1418 } else { 1419 *txq->axq_link = bf->bf_daddr; 1420 ath_dbg(common, ATH_DBG_XMIT, 1421 "link[%u] (%p)=%llx (%p)\n", 1422 txq->axq_qnum, txq->axq_link, 1423 ito64(bf->bf_daddr), bf->bf_desc); 1424 } 1425 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, 1426 &txq->axq_link); 1427 TX_STAT_INC(txq->axq_qnum, txstart); 1428 ath9k_hw_txstart(ah, txq->axq_qnum); 1429 } 1430 txq->axq_depth++; 1431 if (bf_is_ampdu_not_probing(bf)) 1432 txq->axq_ampdu_depth++; 1433 } 1434 1435 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1436 struct ath_buf *bf, struct ath_tx_control *txctl) 1437 { 1438 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 1439 struct list_head bf_head; 1440 1441 bf->bf_state.bf_type |= BUF_AMPDU; 1442 1443 /* 1444 * Do not queue to h/w when any of the following conditions is true: 1445 * - there are pending frames in software queue 1446 * - the TID is currently paused for ADDBA/BAR request 1447 * - seqno is not within block-ack window 1448 * - h/w queue depth exceeds low water mark 1449 */ 1450 if (!list_empty(&tid->buf_q) || tid->paused || 1451 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) || 1452 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { 1453 /* 1454 * Add this frame to software queue for scheduling later 1455 * for aggregation. 1456 */ 1457 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); 1458 list_add_tail(&bf->list, &tid->buf_q); 1459 ath_tx_queue_tid(txctl->txq, tid); 1460 return; 1461 } 1462 1463 INIT_LIST_HEAD(&bf_head); 1464 list_add(&bf->list, &bf_head); 1465 1466 /* Add sub-frame to BAW */ 1467 if (!fi->retries) 1468 ath_tx_addto_baw(sc, tid, fi->seqno); 1469 1470 /* Queue to h/w without aggregation */ 1471 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); 1472 bf->bf_lastbf = bf; 1473 ath_buf_set_rate(sc, bf, fi->framelen); 1474 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); 1475 } 1476 1477 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1478 struct ath_atx_tid *tid, 1479 struct list_head *bf_head) 1480 { 1481 struct ath_frame_info *fi; 1482 struct ath_buf *bf; 1483 1484 bf = list_first_entry(bf_head, struct ath_buf, list); 1485 bf->bf_state.bf_type &= ~BUF_AMPDU; 1486 1487 /* update starting sequence number for subsequent ADDBA request */ 1488 if (tid) 1489 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1490 1491 bf->bf_lastbf = bf; 1492 fi = get_frame_info(bf->bf_mpdu); 1493 ath_buf_set_rate(sc, bf, fi->framelen); 1494 ath_tx_txqaddbuf(sc, txq, bf_head); 1495 TX_STAT_INC(txq->axq_qnum, queued); 1496 } 1497 1498 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1499 { 1500 struct ieee80211_hdr *hdr; 1501 enum ath9k_pkt_type htype; 1502 __le16 fc; 1503 1504 hdr = (struct ieee80211_hdr *)skb->data; 1505 fc = hdr->frame_control; 1506 1507 if (ieee80211_is_beacon(fc)) 1508 htype = ATH9K_PKT_TYPE_BEACON; 1509 else if (ieee80211_is_probe_resp(fc)) 1510 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1511 else if (ieee80211_is_atim(fc)) 1512 htype = ATH9K_PKT_TYPE_ATIM; 1513 else if (ieee80211_is_pspoll(fc)) 1514 htype = ATH9K_PKT_TYPE_PSPOLL; 1515 else 1516 htype = ATH9K_PKT_TYPE_NORMAL; 1517 1518 return htype; 1519 } 1520 1521 static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, 1522 int framelen) 1523 { 1524 struct ath_softc *sc = hw->priv; 1525 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1526 struct ieee80211_sta *sta = tx_info->control.sta; 1527 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1528 struct ieee80211_hdr *hdr; 1529 struct ath_frame_info *fi = get_frame_info(skb); 1530 struct ath_node *an = NULL; 1531 struct ath_atx_tid *tid; 1532 enum ath9k_key_type keytype; 1533 u16 seqno = 0; 1534 u8 tidno; 1535 1536 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1537 1538 if (sta) 1539 an = (struct ath_node *) sta->drv_priv; 1540 1541 hdr = (struct ieee80211_hdr *)skb->data; 1542 if (an && ieee80211_is_data_qos(hdr->frame_control) && 1543 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) { 1544 1545 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 1546 1547 /* 1548 * Override seqno set by upper layer with the one 1549 * in tx aggregation state. 1550 */ 1551 tid = ATH_AN_2_TID(an, tidno); 1552 seqno = tid->seq_next; 1553 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 1554 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1555 } 1556 1557 memset(fi, 0, sizeof(*fi)); 1558 if (hw_key) 1559 fi->keyix = hw_key->hw_key_idx; 1560 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0) 1561 fi->keyix = an->ps_key; 1562 else 1563 fi->keyix = ATH9K_TXKEYIX_INVALID; 1564 fi->keytype = keytype; 1565 fi->framelen = framelen; 1566 fi->seqno = seqno; 1567 } 1568 1569 static int setup_tx_flags(struct sk_buff *skb) 1570 { 1571 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1572 int flags = 0; 1573 1574 flags |= ATH9K_TXDESC_INTREQ; 1575 1576 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1577 flags |= ATH9K_TXDESC_NOACK; 1578 1579 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1580 flags |= ATH9K_TXDESC_LDPC; 1581 1582 return flags; 1583 } 1584 1585 /* 1586 * rix - rate index 1587 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 1588 * width - 0 for 20 MHz, 1 for 40 MHz 1589 * half_gi - to use 4us v/s 3.6 us for symbol time 1590 */ 1591 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, 1592 int width, int half_gi, bool shortPreamble) 1593 { 1594 u32 nbits, nsymbits, duration, nsymbols; 1595 int streams; 1596 1597 /* find number of symbols: PLCP + data */ 1598 streams = HT_RC_2_STREAMS(rix); 1599 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 1600 nsymbits = bits_per_symbol[rix % 8][width] * streams; 1601 nsymbols = (nbits + nsymbits - 1) / nsymbits; 1602 1603 if (!half_gi) 1604 duration = SYMBOL_TIME(nsymbols); 1605 else 1606 duration = SYMBOL_TIME_HALFGI(nsymbols); 1607 1608 /* addup duration for legacy/ht training and signal fields */ 1609 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 1610 1611 return duration; 1612 } 1613 1614 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) 1615 { 1616 struct ath_hw *ah = sc->sc_ah; 1617 struct ath9k_channel *curchan = ah->curchan; 1618 if ((sc->sc_flags & SC_OP_ENABLE_APM) && 1619 (curchan->channelFlags & CHANNEL_5GHZ) && 1620 (chainmask == 0x7) && (rate < 0x90)) 1621 return 0x3; 1622 else 1623 return chainmask; 1624 } 1625 1626 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len) 1627 { 1628 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1629 struct ath9k_11n_rate_series series[4]; 1630 struct sk_buff *skb; 1631 struct ieee80211_tx_info *tx_info; 1632 struct ieee80211_tx_rate *rates; 1633 const struct ieee80211_rate *rate; 1634 struct ieee80211_hdr *hdr; 1635 int i, flags = 0; 1636 u8 rix = 0, ctsrate = 0; 1637 bool is_pspoll; 1638 1639 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 1640 1641 skb = bf->bf_mpdu; 1642 tx_info = IEEE80211_SKB_CB(skb); 1643 rates = tx_info->control.rates; 1644 hdr = (struct ieee80211_hdr *)skb->data; 1645 is_pspoll = ieee80211_is_pspoll(hdr->frame_control); 1646 1647 /* 1648 * We check if Short Preamble is needed for the CTS rate by 1649 * checking the BSS's global flag. 1650 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1651 */ 1652 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); 1653 ctsrate = rate->hw_value; 1654 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 1655 ctsrate |= rate->hw_value_short; 1656 1657 for (i = 0; i < 4; i++) { 1658 bool is_40, is_sgi, is_sp; 1659 int phy; 1660 1661 if (!rates[i].count || (rates[i].idx < 0)) 1662 continue; 1663 1664 rix = rates[i].idx; 1665 series[i].Tries = rates[i].count; 1666 1667 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 1668 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1669 flags |= ATH9K_TXDESC_RTSENA; 1670 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1671 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1672 flags |= ATH9K_TXDESC_CTSENA; 1673 } 1674 1675 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1676 series[i].RateFlags |= ATH9K_RATESERIES_2040; 1677 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1678 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1679 1680 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1681 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1682 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1683 1684 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1685 /* MCS rates */ 1686 series[i].Rate = rix | 0x80; 1687 series[i].ChSel = ath_txchainmask_reduction(sc, 1688 common->tx_chainmask, series[i].Rate); 1689 series[i].PktDuration = ath_pkt_duration(sc, rix, len, 1690 is_40, is_sgi, is_sp); 1691 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1692 series[i].RateFlags |= ATH9K_RATESERIES_STBC; 1693 continue; 1694 } 1695 1696 /* legacy rates */ 1697 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1698 !(rate->flags & IEEE80211_RATE_ERP_G)) 1699 phy = WLAN_RC_PHY_CCK; 1700 else 1701 phy = WLAN_RC_PHY_OFDM; 1702 1703 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1704 series[i].Rate = rate->hw_value; 1705 if (rate->hw_value_short) { 1706 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1707 series[i].Rate |= rate->hw_value_short; 1708 } else { 1709 is_sp = false; 1710 } 1711 1712 if (bf->bf_state.bfs_paprd) 1713 series[i].ChSel = common->tx_chainmask; 1714 else 1715 series[i].ChSel = ath_txchainmask_reduction(sc, 1716 common->tx_chainmask, series[i].Rate); 1717 1718 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1719 phy, rate->bitrate * 100, len, rix, is_sp); 1720 } 1721 1722 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1723 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) 1724 flags &= ~ATH9K_TXDESC_RTSENA; 1725 1726 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1727 if (flags & ATH9K_TXDESC_RTSENA) 1728 flags &= ~ATH9K_TXDESC_CTSENA; 1729 1730 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1731 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc, 1732 bf->bf_lastbf->bf_desc, 1733 !is_pspoll, ctsrate, 1734 0, series, 4, flags); 1735 1736 } 1737 1738 static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw, 1739 struct ath_txq *txq, 1740 struct sk_buff *skb) 1741 { 1742 struct ath_softc *sc = hw->priv; 1743 struct ath_hw *ah = sc->sc_ah; 1744 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1745 struct ath_frame_info *fi = get_frame_info(skb); 1746 struct ath_buf *bf; 1747 struct ath_desc *ds; 1748 int frm_type; 1749 1750 bf = ath_tx_get_buffer(sc); 1751 if (!bf) { 1752 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1753 return NULL; 1754 } 1755 1756 ATH_TXBUF_RESET(bf); 1757 1758 bf->bf_flags = setup_tx_flags(skb); 1759 bf->bf_mpdu = skb; 1760 1761 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1762 skb->len, DMA_TO_DEVICE); 1763 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 1764 bf->bf_mpdu = NULL; 1765 bf->bf_buf_addr = 0; 1766 ath_err(ath9k_hw_common(sc->sc_ah), 1767 "dma_mapping_error() on TX\n"); 1768 ath_tx_return_buffer(sc, bf); 1769 return NULL; 1770 } 1771 1772 frm_type = get_hw_packet_type(skb); 1773 1774 ds = bf->bf_desc; 1775 ath9k_hw_set_desc_link(ah, ds, 0); 1776 1777 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER, 1778 fi->keyix, fi->keytype, bf->bf_flags); 1779 1780 ath9k_hw_filltxdesc(ah, ds, 1781 skb->len, /* segment length */ 1782 true, /* first segment */ 1783 true, /* last segment */ 1784 ds, /* first descriptor */ 1785 bf->bf_buf_addr, 1786 txq->axq_qnum); 1787 1788 1789 return bf; 1790 } 1791 1792 /* FIXME: tx power */ 1793 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, 1794 struct ath_tx_control *txctl) 1795 { 1796 struct sk_buff *skb = bf->bf_mpdu; 1797 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1798 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1799 struct list_head bf_head; 1800 struct ath_atx_tid *tid = NULL; 1801 u8 tidno; 1802 1803 spin_lock_bh(&txctl->txq->axq_lock); 1804 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && 1805 ieee80211_is_data_qos(hdr->frame_control)) { 1806 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1807 IEEE80211_QOS_CTL_TID_MASK; 1808 tid = ATH_AN_2_TID(txctl->an, tidno); 1809 1810 WARN_ON(tid->ac->txq != txctl->txq); 1811 } 1812 1813 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { 1814 /* 1815 * Try aggregation if it's a unicast data frame 1816 * and the destination is HT capable. 1817 */ 1818 ath_tx_send_ampdu(sc, tid, bf, txctl); 1819 } else { 1820 INIT_LIST_HEAD(&bf_head); 1821 list_add_tail(&bf->list, &bf_head); 1822 1823 bf->bf_state.bfs_ftype = txctl->frame_type; 1824 bf->bf_state.bfs_paprd = txctl->paprd; 1825 1826 if (bf->bf_state.bfs_paprd) 1827 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, 1828 bf->bf_state.bfs_paprd); 1829 1830 if (txctl->paprd) 1831 bf->bf_state.bfs_paprd_timestamp = jiffies; 1832 1833 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) 1834 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true); 1835 1836 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); 1837 } 1838 1839 spin_unlock_bh(&txctl->txq->axq_lock); 1840 } 1841 1842 /* Upon failure caller should free skb */ 1843 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1844 struct ath_tx_control *txctl) 1845 { 1846 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1847 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1848 struct ieee80211_sta *sta = info->control.sta; 1849 struct ieee80211_vif *vif = info->control.vif; 1850 struct ath_softc *sc = hw->priv; 1851 struct ath_txq *txq = txctl->txq; 1852 struct ath_buf *bf; 1853 int padpos, padsize; 1854 int frmlen = skb->len + FCS_LEN; 1855 int q; 1856 1857 /* NOTE: sta can be NULL according to net/mac80211.h */ 1858 if (sta) 1859 txctl->an = (struct ath_node *)sta->drv_priv; 1860 1861 if (info->control.hw_key) 1862 frmlen += info->control.hw_key->icv_len; 1863 1864 /* 1865 * As a temporary workaround, assign seq# here; this will likely need 1866 * to be cleaned up to work better with Beacon transmission and virtual 1867 * BSSes. 1868 */ 1869 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1870 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1871 sc->tx.seq_no += 0x10; 1872 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1873 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1874 } 1875 1876 /* Add the padding after the header if this is not already done */ 1877 padpos = ath9k_cmn_padpos(hdr->frame_control); 1878 padsize = padpos & 3; 1879 if (padsize && skb->len > padpos) { 1880 if (skb_headroom(skb) < padsize) 1881 return -ENOMEM; 1882 1883 skb_push(skb, padsize); 1884 memmove(skb->data, skb->data + padsize, padpos); 1885 } 1886 1887 if ((vif && vif->type != NL80211_IFTYPE_AP && 1888 vif->type != NL80211_IFTYPE_AP_VLAN) || 1889 !ieee80211_is_data(hdr->frame_control)) 1890 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1891 1892 setup_frame_info(hw, skb, frmlen); 1893 1894 /* 1895 * At this point, the vif, hw_key and sta pointers in the tx control 1896 * info are no longer valid (overwritten by the ath_frame_info data. 1897 */ 1898 1899 bf = ath_tx_setup_buffer(hw, txctl->txq, skb); 1900 if (unlikely(!bf)) 1901 return -ENOMEM; 1902 1903 q = skb_get_queue_mapping(skb); 1904 spin_lock_bh(&txq->axq_lock); 1905 if (txq == sc->tx.txq_map[q] && 1906 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { 1907 ieee80211_stop_queue(sc->hw, q); 1908 txq->stopped = 1; 1909 } 1910 spin_unlock_bh(&txq->axq_lock); 1911 1912 ath_tx_start_dma(sc, bf, txctl); 1913 1914 return 0; 1915 } 1916 1917 /*****************/ 1918 /* TX Completion */ 1919 /*****************/ 1920 1921 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1922 int tx_flags, int ftype, struct ath_txq *txq) 1923 { 1924 struct ieee80211_hw *hw = sc->hw; 1925 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1926 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1927 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1928 int q, padpos, padsize; 1929 1930 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1931 1932 if (tx_flags & ATH_TX_BAR) 1933 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1934 1935 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { 1936 /* Frame was ACKed */ 1937 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1938 } 1939 1940 padpos = ath9k_cmn_padpos(hdr->frame_control); 1941 padsize = padpos & 3; 1942 if (padsize && skb->len>padpos+padsize) { 1943 /* 1944 * Remove MAC header padding before giving the frame back to 1945 * mac80211. 1946 */ 1947 memmove(skb->data + padsize, skb->data, padpos); 1948 skb_pull(skb, padsize); 1949 } 1950 1951 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { 1952 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 1953 ath_dbg(common, ATH_DBG_PS, 1954 "Going back to sleep after having received TX status (0x%lx)\n", 1955 sc->ps_flags & (PS_WAIT_FOR_BEACON | 1956 PS_WAIT_FOR_CAB | 1957 PS_WAIT_FOR_PSPOLL_DATA | 1958 PS_WAIT_FOR_TX_ACK)); 1959 } 1960 1961 q = skb_get_queue_mapping(skb); 1962 if (txq == sc->tx.txq_map[q]) { 1963 spin_lock_bh(&txq->axq_lock); 1964 if (WARN_ON(--txq->pending_frames < 0)) 1965 txq->pending_frames = 0; 1966 1967 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { 1968 ieee80211_wake_queue(sc->hw, q); 1969 txq->stopped = 0; 1970 } 1971 spin_unlock_bh(&txq->axq_lock); 1972 } 1973 1974 ieee80211_tx_status(hw, skb); 1975 } 1976 1977 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1978 struct ath_txq *txq, struct list_head *bf_q, 1979 struct ath_tx_status *ts, int txok, int sendbar) 1980 { 1981 struct sk_buff *skb = bf->bf_mpdu; 1982 unsigned long flags; 1983 int tx_flags = 0; 1984 1985 if (sendbar) 1986 tx_flags = ATH_TX_BAR; 1987 1988 if (!txok) { 1989 tx_flags |= ATH_TX_ERROR; 1990 1991 if (bf_isxretried(bf)) 1992 tx_flags |= ATH_TX_XRETRY; 1993 } 1994 1995 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); 1996 bf->bf_buf_addr = 0; 1997 1998 if (bf->bf_state.bfs_paprd) { 1999 if (time_after(jiffies, 2000 bf->bf_state.bfs_paprd_timestamp + 2001 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 2002 dev_kfree_skb_any(skb); 2003 else 2004 complete(&sc->paprd_complete); 2005 } else { 2006 ath_debug_stat_tx(sc, bf, ts, txq); 2007 ath_tx_complete(sc, skb, tx_flags, 2008 bf->bf_state.bfs_ftype, txq); 2009 } 2010 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 2011 * accidentally reference it later. 2012 */ 2013 bf->bf_mpdu = NULL; 2014 2015 /* 2016 * Return the list of ath_buf of this mpdu to free queue 2017 */ 2018 spin_lock_irqsave(&sc->tx.txbuflock, flags); 2019 list_splice_tail_init(bf_q, &sc->tx.txbuf); 2020 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 2021 } 2022 2023 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 2024 struct ath_tx_status *ts, int nframes, int nbad, 2025 int txok, bool update_rc) 2026 { 2027 struct sk_buff *skb = bf->bf_mpdu; 2028 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2029 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2030 struct ieee80211_hw *hw = sc->hw; 2031 struct ath_hw *ah = sc->sc_ah; 2032 u8 i, tx_rateindex; 2033 2034 if (txok) 2035 tx_info->status.ack_signal = ts->ts_rssi; 2036 2037 tx_rateindex = ts->ts_rateindex; 2038 WARN_ON(tx_rateindex >= hw->max_rates); 2039 2040 if (ts->ts_status & ATH9K_TXERR_FILT) 2041 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2042 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) { 2043 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2044 2045 BUG_ON(nbad > nframes); 2046 2047 tx_info->status.ampdu_len = nframes; 2048 tx_info->status.ampdu_ack_len = nframes - nbad; 2049 } 2050 2051 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2052 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 2053 /* 2054 * If an underrun error is seen assume it as an excessive 2055 * retry only if max frame trigger level has been reached 2056 * (2 KB for single stream, and 4 KB for dual stream). 2057 * Adjust the long retry as if the frame was tried 2058 * hw->max_rate_tries times to affect how rate control updates 2059 * PER for the failed rate. 2060 * In case of congestion on the bus penalizing this type of 2061 * underruns should help hardware actually transmit new frames 2062 * successfully by eventually preferring slower rates. 2063 * This itself should also alleviate congestion on the bus. 2064 */ 2065 if (ieee80211_is_data(hdr->frame_control) && 2066 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | 2067 ATH9K_TX_DELIM_UNDERRUN)) && 2068 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level) 2069 tx_info->status.rates[tx_rateindex].count = 2070 hw->max_rate_tries; 2071 } 2072 2073 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2074 tx_info->status.rates[i].count = 0; 2075 tx_info->status.rates[i].idx = -1; 2076 } 2077 2078 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2079 } 2080 2081 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2082 { 2083 struct ath_hw *ah = sc->sc_ah; 2084 struct ath_common *common = ath9k_hw_common(ah); 2085 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2086 struct list_head bf_head; 2087 struct ath_desc *ds; 2088 struct ath_tx_status ts; 2089 int txok; 2090 int status; 2091 2092 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2093 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2094 txq->axq_link); 2095 2096 for (;;) { 2097 spin_lock_bh(&txq->axq_lock); 2098 if (list_empty(&txq->axq_q)) { 2099 txq->axq_link = NULL; 2100 if (sc->sc_flags & SC_OP_TXAGGR) 2101 ath_txq_schedule(sc, txq); 2102 spin_unlock_bh(&txq->axq_lock); 2103 break; 2104 } 2105 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2106 2107 /* 2108 * There is a race condition that a BH gets scheduled 2109 * after sw writes TxE and before hw re-load the last 2110 * descriptor to get the newly chained one. 2111 * Software must keep the last DONE descriptor as a 2112 * holding descriptor - software does so by marking 2113 * it with the STALE flag. 2114 */ 2115 bf_held = NULL; 2116 if (bf->bf_stale) { 2117 bf_held = bf; 2118 if (list_is_last(&bf_held->list, &txq->axq_q)) { 2119 spin_unlock_bh(&txq->axq_lock); 2120 break; 2121 } else { 2122 bf = list_entry(bf_held->list.next, 2123 struct ath_buf, list); 2124 } 2125 } 2126 2127 lastbf = bf->bf_lastbf; 2128 ds = lastbf->bf_desc; 2129 2130 memset(&ts, 0, sizeof(ts)); 2131 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2132 if (status == -EINPROGRESS) { 2133 spin_unlock_bh(&txq->axq_lock); 2134 break; 2135 } 2136 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2137 2138 /* 2139 * Remove ath_buf's of the same transmit unit from txq, 2140 * however leave the last descriptor back as the holding 2141 * descriptor for hw. 2142 */ 2143 lastbf->bf_stale = true; 2144 INIT_LIST_HEAD(&bf_head); 2145 if (!list_is_singular(&lastbf->list)) 2146 list_cut_position(&bf_head, 2147 &txq->axq_q, lastbf->list.prev); 2148 2149 txq->axq_depth--; 2150 txok = !(ts.ts_status & ATH9K_TXERR_MASK); 2151 txq->axq_tx_inprogress = false; 2152 if (bf_held) 2153 list_del(&bf_held->list); 2154 2155 if (bf_is_ampdu_not_probing(bf)) 2156 txq->axq_ampdu_depth--; 2157 2158 spin_unlock_bh(&txq->axq_lock); 2159 2160 if (bf_held) 2161 ath_tx_return_buffer(sc, bf_held); 2162 2163 if (!bf_isampdu(bf)) { 2164 /* 2165 * This frame is sent out as a single frame. 2166 * Use hardware retry status for this frame. 2167 */ 2168 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2169 bf->bf_state.bf_type |= BUF_XRETRY; 2170 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true); 2171 } 2172 2173 if (bf_isampdu(bf)) 2174 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok, 2175 true); 2176 else 2177 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2178 2179 spin_lock_bh(&txq->axq_lock); 2180 2181 if (sc->sc_flags & SC_OP_TXAGGR) 2182 ath_txq_schedule(sc, txq); 2183 spin_unlock_bh(&txq->axq_lock); 2184 } 2185 } 2186 2187 static void ath_tx_complete_poll_work(struct work_struct *work) 2188 { 2189 struct ath_softc *sc = container_of(work, struct ath_softc, 2190 tx_complete_work.work); 2191 struct ath_txq *txq; 2192 int i; 2193 bool needreset = false; 2194 #ifdef CONFIG_ATH9K_DEBUGFS 2195 sc->tx_complete_poll_work_seen++; 2196 #endif 2197 2198 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2199 if (ATH_TXQ_SETUP(sc, i)) { 2200 txq = &sc->tx.txq[i]; 2201 spin_lock_bh(&txq->axq_lock); 2202 if (txq->axq_depth) { 2203 if (txq->axq_tx_inprogress) { 2204 needreset = true; 2205 spin_unlock_bh(&txq->axq_lock); 2206 break; 2207 } else { 2208 txq->axq_tx_inprogress = true; 2209 } 2210 } 2211 spin_unlock_bh(&txq->axq_lock); 2212 } 2213 2214 if (needreset) { 2215 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2216 "tx hung, resetting the chip\n"); 2217 ath_reset(sc, true); 2218 } 2219 2220 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2221 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); 2222 } 2223 2224 2225 2226 void ath_tx_tasklet(struct ath_softc *sc) 2227 { 2228 int i; 2229 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2230 2231 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2232 2233 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2234 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2235 ath_tx_processq(sc, &sc->tx.txq[i]); 2236 } 2237 } 2238 2239 void ath_tx_edma_tasklet(struct ath_softc *sc) 2240 { 2241 struct ath_tx_status txs; 2242 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2243 struct ath_hw *ah = sc->sc_ah; 2244 struct ath_txq *txq; 2245 struct ath_buf *bf, *lastbf; 2246 struct list_head bf_head; 2247 int status; 2248 int txok; 2249 2250 for (;;) { 2251 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2252 if (status == -EINPROGRESS) 2253 break; 2254 if (status == -EIO) { 2255 ath_dbg(common, ATH_DBG_XMIT, 2256 "Error processing tx status\n"); 2257 break; 2258 } 2259 2260 /* Skip beacon completions */ 2261 if (txs.qid == sc->beacon.beaconq) 2262 continue; 2263 2264 txq = &sc->tx.txq[txs.qid]; 2265 2266 spin_lock_bh(&txq->axq_lock); 2267 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2268 spin_unlock_bh(&txq->axq_lock); 2269 return; 2270 } 2271 2272 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 2273 struct ath_buf, list); 2274 lastbf = bf->bf_lastbf; 2275 2276 INIT_LIST_HEAD(&bf_head); 2277 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx], 2278 &lastbf->list); 2279 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2280 txq->axq_depth--; 2281 txq->axq_tx_inprogress = false; 2282 if (bf_is_ampdu_not_probing(bf)) 2283 txq->axq_ampdu_depth--; 2284 spin_unlock_bh(&txq->axq_lock); 2285 2286 txok = !(txs.ts_status & ATH9K_TXERR_MASK); 2287 2288 if (!bf_isampdu(bf)) { 2289 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2290 bf->bf_state.bf_type |= BUF_XRETRY; 2291 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true); 2292 } 2293 2294 if (bf_isampdu(bf)) 2295 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, 2296 txok, true); 2297 else 2298 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2299 &txs, txok, 0); 2300 2301 spin_lock_bh(&txq->axq_lock); 2302 2303 if (!list_empty(&txq->txq_fifo_pending)) { 2304 INIT_LIST_HEAD(&bf_head); 2305 bf = list_first_entry(&txq->txq_fifo_pending, 2306 struct ath_buf, list); 2307 list_cut_position(&bf_head, 2308 &txq->txq_fifo_pending, 2309 &bf->bf_lastbf->list); 2310 ath_tx_txqaddbuf(sc, txq, &bf_head); 2311 } else if (sc->sc_flags & SC_OP_TXAGGR) 2312 ath_txq_schedule(sc, txq); 2313 2314 spin_unlock_bh(&txq->axq_lock); 2315 } 2316 } 2317 2318 /*****************/ 2319 /* Init, Cleanup */ 2320 /*****************/ 2321 2322 static int ath_txstatus_setup(struct ath_softc *sc, int size) 2323 { 2324 struct ath_descdma *dd = &sc->txsdma; 2325 u8 txs_len = sc->sc_ah->caps.txs_len; 2326 2327 dd->dd_desc_len = size * txs_len; 2328 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 2329 &dd->dd_desc_paddr, GFP_KERNEL); 2330 if (!dd->dd_desc) 2331 return -ENOMEM; 2332 2333 return 0; 2334 } 2335 2336 static int ath_tx_edma_init(struct ath_softc *sc) 2337 { 2338 int err; 2339 2340 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2341 if (!err) 2342 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2343 sc->txsdma.dd_desc_paddr, 2344 ATH_TXSTATUS_RING_SIZE); 2345 2346 return err; 2347 } 2348 2349 static void ath_tx_edma_cleanup(struct ath_softc *sc) 2350 { 2351 struct ath_descdma *dd = &sc->txsdma; 2352 2353 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 2354 dd->dd_desc_paddr); 2355 } 2356 2357 int ath_tx_init(struct ath_softc *sc, int nbufs) 2358 { 2359 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2360 int error = 0; 2361 2362 spin_lock_init(&sc->tx.txbuflock); 2363 2364 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2365 "tx", nbufs, 1, 1); 2366 if (error != 0) { 2367 ath_err(common, 2368 "Failed to allocate tx descriptors: %d\n", error); 2369 goto err; 2370 } 2371 2372 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2373 "beacon", ATH_BCBUF, 1, 1); 2374 if (error != 0) { 2375 ath_err(common, 2376 "Failed to allocate beacon descriptors: %d\n", error); 2377 goto err; 2378 } 2379 2380 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2381 2382 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2383 error = ath_tx_edma_init(sc); 2384 if (error) 2385 goto err; 2386 } 2387 2388 err: 2389 if (error != 0) 2390 ath_tx_cleanup(sc); 2391 2392 return error; 2393 } 2394 2395 void ath_tx_cleanup(struct ath_softc *sc) 2396 { 2397 if (sc->beacon.bdma.dd_desc_len != 0) 2398 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); 2399 2400 if (sc->tx.txdma.dd_desc_len != 0) 2401 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2402 2403 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2404 ath_tx_edma_cleanup(sc); 2405 } 2406 2407 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2408 { 2409 struct ath_atx_tid *tid; 2410 struct ath_atx_ac *ac; 2411 int tidno, acno; 2412 2413 for (tidno = 0, tid = &an->tid[tidno]; 2414 tidno < WME_NUM_TID; 2415 tidno++, tid++) { 2416 tid->an = an; 2417 tid->tidno = tidno; 2418 tid->seq_start = tid->seq_next = 0; 2419 tid->baw_size = WME_MAX_BA; 2420 tid->baw_head = tid->baw_tail = 0; 2421 tid->sched = false; 2422 tid->paused = false; 2423 tid->state &= ~AGGR_CLEANUP; 2424 INIT_LIST_HEAD(&tid->buf_q); 2425 acno = TID_TO_WME_AC(tidno); 2426 tid->ac = &an->ac[acno]; 2427 tid->state &= ~AGGR_ADDBA_COMPLETE; 2428 tid->state &= ~AGGR_ADDBA_PROGRESS; 2429 } 2430 2431 for (acno = 0, ac = &an->ac[acno]; 2432 acno < WME_NUM_AC; acno++, ac++) { 2433 ac->sched = false; 2434 ac->txq = sc->tx.txq_map[acno]; 2435 INIT_LIST_HEAD(&ac->tid_q); 2436 } 2437 } 2438 2439 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2440 { 2441 struct ath_atx_ac *ac; 2442 struct ath_atx_tid *tid; 2443 struct ath_txq *txq; 2444 int tidno; 2445 2446 for (tidno = 0, tid = &an->tid[tidno]; 2447 tidno < WME_NUM_TID; tidno++, tid++) { 2448 2449 ac = tid->ac; 2450 txq = ac->txq; 2451 2452 spin_lock_bh(&txq->axq_lock); 2453 2454 if (tid->sched) { 2455 list_del(&tid->list); 2456 tid->sched = false; 2457 } 2458 2459 if (ac->sched) { 2460 list_del(&ac->list); 2461 tid->ac->sched = false; 2462 } 2463 2464 ath_tid_drain(sc, txq, tid); 2465 tid->state &= ~AGGR_ADDBA_COMPLETE; 2466 tid->state &= ~AGGR_CLEANUP; 2467 2468 spin_unlock_bh(&txq->axq_lock); 2469 } 2470 } 2471