1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "ath9k.h" 18 #include "ar9003_mac.h" 19 20 #define BITS_PER_BYTE 8 21 #define OFDM_PLCP_BITS 22 22 #define HT_RC_2_MCS(_rc) ((_rc) & 0x1f) 23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 24 #define L_STF 8 25 #define L_LTF 8 26 #define L_SIG 4 27 #define HT_SIG 8 28 #define HT_STF 4 29 #define HT_LTF(_ns) (4 * (_ns)) 30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 32 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 33 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 34 35 #define OFDM_SIFS_TIME 16 36 37 static u16 bits_per_symbol[][2] = { 38 /* 20MHz 40MHz */ 39 { 26, 54 }, /* 0: BPSK */ 40 { 52, 108 }, /* 1: QPSK 1/2 */ 41 { 78, 162 }, /* 2: QPSK 3/4 */ 42 { 104, 216 }, /* 3: 16-QAM 1/2 */ 43 { 156, 324 }, /* 4: 16-QAM 3/4 */ 44 { 208, 432 }, /* 5: 64-QAM 2/3 */ 45 { 234, 486 }, /* 6: 64-QAM 3/4 */ 46 { 260, 540 }, /* 7: 64-QAM 5/6 */ 47 }; 48 49 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 50 51 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 52 struct ath_atx_tid *tid, 53 struct list_head *bf_head); 54 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 55 struct ath_txq *txq, struct list_head *bf_q, 56 struct ath_tx_status *ts, int txok, int sendbar); 57 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 58 struct list_head *head); 59 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 60 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 61 struct ath_tx_status *ts, int txok); 62 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 63 int nbad, int txok, bool update_rc); 64 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 65 int seqno); 66 67 enum { 68 MCS_HT20, 69 MCS_HT20_SGI, 70 MCS_HT40, 71 MCS_HT40_SGI, 72 }; 73 74 static int ath_max_4ms_framelen[4][32] = { 75 [MCS_HT20] = { 76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172, 77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280, 78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532, 79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532, 80 }, 81 [MCS_HT20_SGI] = { 82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744, 83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532, 84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532, 85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532, 86 }, 87 [MCS_HT40] = { 88 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532, 89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532, 90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532, 91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532, 92 }, 93 [MCS_HT40_SGI] = { 94 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532, 95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532, 96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532, 97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532, 98 } 99 }; 100 101 /*********************/ 102 /* Aggregation logic */ 103 /*********************/ 104 105 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 106 { 107 struct ath_atx_ac *ac = tid->ac; 108 109 if (tid->paused) 110 return; 111 112 if (tid->sched) 113 return; 114 115 tid->sched = true; 116 list_add_tail(&tid->list, &ac->tid_q); 117 118 if (ac->sched) 119 return; 120 121 ac->sched = true; 122 list_add_tail(&ac->list, &txq->axq_acq); 123 } 124 125 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 126 { 127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 128 129 WARN_ON(!tid->paused); 130 131 spin_lock_bh(&txq->axq_lock); 132 tid->paused = false; 133 134 if (list_empty(&tid->buf_q)) 135 goto unlock; 136 137 ath_tx_queue_tid(txq, tid); 138 ath_txq_schedule(sc, txq); 139 unlock: 140 spin_unlock_bh(&txq->axq_lock); 141 } 142 143 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 144 { 145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 146 struct ath_buf *bf; 147 struct list_head bf_head; 148 struct ath_tx_status ts; 149 150 INIT_LIST_HEAD(&bf_head); 151 152 memset(&ts, 0, sizeof(ts)); 153 spin_lock_bh(&txq->axq_lock); 154 155 while (!list_empty(&tid->buf_q)) { 156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 157 list_move_tail(&bf->list, &bf_head); 158 159 if (bf_isretried(bf)) { 160 ath_tx_update_baw(sc, tid, bf->bf_seqno); 161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 162 } else { 163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 164 } 165 } 166 167 spin_unlock_bh(&txq->axq_lock); 168 } 169 170 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 171 int seqno) 172 { 173 int index, cindex; 174 175 index = ATH_BA_INDEX(tid->seq_start, seqno); 176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 177 178 __clear_bit(cindex, tid->tx_buf); 179 180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { 181 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 182 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 183 } 184 } 185 186 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 187 struct ath_buf *bf) 188 { 189 int index, cindex; 190 191 if (bf_isretried(bf)) 192 return; 193 194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 196 __set_bit(cindex, tid->tx_buf); 197 198 if (index >= ((tid->baw_tail - tid->baw_head) & 199 (ATH_TID_MAX_BUFS - 1))) { 200 tid->baw_tail = cindex; 201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 202 } 203 } 204 205 /* 206 * TODO: For frame(s) that are in the retry state, we will reuse the 207 * sequence number(s) without setting the retry bit. The 208 * alternative is to give up on these and BAR the receiver's window 209 * forward. 210 */ 211 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 212 struct ath_atx_tid *tid) 213 214 { 215 struct ath_buf *bf; 216 struct list_head bf_head; 217 struct ath_tx_status ts; 218 219 memset(&ts, 0, sizeof(ts)); 220 INIT_LIST_HEAD(&bf_head); 221 222 for (;;) { 223 if (list_empty(&tid->buf_q)) 224 break; 225 226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 227 list_move_tail(&bf->list, &bf_head); 228 229 if (bf_isretried(bf)) 230 ath_tx_update_baw(sc, tid, bf->bf_seqno); 231 232 spin_unlock(&txq->axq_lock); 233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 234 spin_lock(&txq->axq_lock); 235 } 236 237 tid->seq_next = tid->seq_start; 238 tid->baw_tail = tid->baw_head; 239 } 240 241 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 242 struct ath_buf *bf) 243 { 244 struct sk_buff *skb; 245 struct ieee80211_hdr *hdr; 246 247 bf->bf_state.bf_type |= BUF_RETRY; 248 bf->bf_retries++; 249 TX_STAT_INC(txq->axq_qnum, a_retries); 250 251 skb = bf->bf_mpdu; 252 hdr = (struct ieee80211_hdr *)skb->data; 253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 254 } 255 256 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 257 { 258 struct ath_buf *bf = NULL; 259 260 spin_lock_bh(&sc->tx.txbuflock); 261 262 if (unlikely(list_empty(&sc->tx.txbuf))) { 263 spin_unlock_bh(&sc->tx.txbuflock); 264 return NULL; 265 } 266 267 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 268 list_del(&bf->list); 269 270 spin_unlock_bh(&sc->tx.txbuflock); 271 272 return bf; 273 } 274 275 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 276 { 277 spin_lock_bh(&sc->tx.txbuflock); 278 list_add_tail(&bf->list, &sc->tx.txbuf); 279 spin_unlock_bh(&sc->tx.txbuflock); 280 } 281 282 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 283 { 284 struct ath_buf *tbf; 285 286 tbf = ath_tx_get_buffer(sc); 287 if (WARN_ON(!tbf)) 288 return NULL; 289 290 ATH_TXBUF_RESET(tbf); 291 292 tbf->aphy = bf->aphy; 293 tbf->bf_mpdu = bf->bf_mpdu; 294 tbf->bf_buf_addr = bf->bf_buf_addr; 295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 296 tbf->bf_state = bf->bf_state; 297 298 return tbf; 299 } 300 301 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 302 struct ath_buf *bf, struct list_head *bf_q, 303 struct ath_tx_status *ts, int txok) 304 { 305 struct ath_node *an = NULL; 306 struct sk_buff *skb; 307 struct ieee80211_sta *sta; 308 struct ieee80211_hw *hw; 309 struct ieee80211_hdr *hdr; 310 struct ieee80211_tx_info *tx_info; 311 struct ath_atx_tid *tid = NULL; 312 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 313 struct list_head bf_head, bf_pending; 314 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 315 u32 ba[WME_BA_BMP_SIZE >> 5]; 316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 317 bool rc_update = true; 318 struct ieee80211_tx_rate rates[4]; 319 int nframes; 320 321 skb = bf->bf_mpdu; 322 hdr = (struct ieee80211_hdr *)skb->data; 323 324 tx_info = IEEE80211_SKB_CB(skb); 325 hw = bf->aphy->hw; 326 327 memcpy(rates, tx_info->control.rates, sizeof(rates)); 328 nframes = bf->bf_nframes; 329 330 rcu_read_lock(); 331 332 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 333 if (!sta) { 334 rcu_read_unlock(); 335 336 INIT_LIST_HEAD(&bf_head); 337 while (bf) { 338 bf_next = bf->bf_next; 339 340 bf->bf_state.bf_type |= BUF_XRETRY; 341 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || 342 !bf->bf_stale || bf_next != NULL) 343 list_move_tail(&bf->list, &bf_head); 344 345 ath_tx_rc_status(bf, ts, 1, 0, false); 346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 347 0, 0); 348 349 bf = bf_next; 350 } 351 return; 352 } 353 354 an = (struct ath_node *)sta->drv_priv; 355 tid = ATH_AN_2_TID(an, bf->bf_tidno); 356 357 /* 358 * The hardware occasionally sends a tx status for the wrong TID. 359 * In this case, the BA status cannot be considered valid and all 360 * subframes need to be retransmitted 361 */ 362 if (bf->bf_tidno != ts->tid) 363 txok = false; 364 365 isaggr = bf_isaggr(bf); 366 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 367 368 if (isaggr && txok) { 369 if (ts->ts_flags & ATH9K_TX_BA) { 370 seq_st = ts->ts_seqnum; 371 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 372 } else { 373 /* 374 * AR5416 can become deaf/mute when BA 375 * issue happens. Chip needs to be reset. 376 * But AP code may have sychronization issues 377 * when perform internal reset in this routine. 378 * Only enable reset in STA mode for now. 379 */ 380 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 381 needreset = 1; 382 } 383 } 384 385 INIT_LIST_HEAD(&bf_pending); 386 INIT_LIST_HEAD(&bf_head); 387 388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok); 389 while (bf) { 390 txfail = txpending = 0; 391 bf_next = bf->bf_next; 392 393 skb = bf->bf_mpdu; 394 tx_info = IEEE80211_SKB_CB(skb); 395 396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 397 /* transmit completion, subframe is 398 * acked by block ack */ 399 acked_cnt++; 400 } else if (!isaggr && txok) { 401 /* transmit completion */ 402 acked_cnt++; 403 } else { 404 if (!(tid->state & AGGR_CLEANUP) && 405 !bf_last->bf_tx_aborted) { 406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 407 ath_tx_set_retry(sc, txq, bf); 408 txpending = 1; 409 } else { 410 bf->bf_state.bf_type |= BUF_XRETRY; 411 txfail = 1; 412 sendbar = 1; 413 txfail_cnt++; 414 } 415 } else { 416 /* 417 * cleanup in progress, just fail 418 * the un-acked sub-frames 419 */ 420 txfail = 1; 421 } 422 } 423 424 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 425 bf_next == NULL) { 426 /* 427 * Make sure the last desc is reclaimed if it 428 * not a holding desc. 429 */ 430 if (!bf_last->bf_stale) 431 list_move_tail(&bf->list, &bf_head); 432 else 433 INIT_LIST_HEAD(&bf_head); 434 } else { 435 BUG_ON(list_empty(bf_q)); 436 list_move_tail(&bf->list, &bf_head); 437 } 438 439 if (!txpending || (tid->state & AGGR_CLEANUP)) { 440 /* 441 * complete the acked-ones/xretried ones; update 442 * block-ack window 443 */ 444 spin_lock_bh(&txq->axq_lock); 445 ath_tx_update_baw(sc, tid, bf->bf_seqno); 446 spin_unlock_bh(&txq->axq_lock); 447 448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 449 memcpy(tx_info->control.rates, rates, sizeof(rates)); 450 bf->bf_nframes = nframes; 451 ath_tx_rc_status(bf, ts, nbad, txok, true); 452 rc_update = false; 453 } else { 454 ath_tx_rc_status(bf, ts, nbad, txok, false); 455 } 456 457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 458 !txfail, sendbar); 459 } else { 460 /* retry the un-acked ones */ 461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { 462 if (bf->bf_next == NULL && bf_last->bf_stale) { 463 struct ath_buf *tbf; 464 465 tbf = ath_clone_txbuf(sc, bf_last); 466 /* 467 * Update tx baw and complete the 468 * frame with failed status if we 469 * run out of tx buf. 470 */ 471 if (!tbf) { 472 spin_lock_bh(&txq->axq_lock); 473 ath_tx_update_baw(sc, tid, 474 bf->bf_seqno); 475 spin_unlock_bh(&txq->axq_lock); 476 477 bf->bf_state.bf_type |= 478 BUF_XRETRY; 479 ath_tx_rc_status(bf, ts, nbad, 480 0, false); 481 ath_tx_complete_buf(sc, bf, txq, 482 &bf_head, 483 ts, 0, 0); 484 break; 485 } 486 487 ath9k_hw_cleartxdesc(sc->sc_ah, 488 tbf->bf_desc); 489 list_add_tail(&tbf->list, &bf_head); 490 } else { 491 /* 492 * Clear descriptor status words for 493 * software retry 494 */ 495 ath9k_hw_cleartxdesc(sc->sc_ah, 496 bf->bf_desc); 497 } 498 } 499 500 /* 501 * Put this buffer to the temporary pending 502 * queue to retain ordering 503 */ 504 list_splice_tail_init(&bf_head, &bf_pending); 505 } 506 507 bf = bf_next; 508 } 509 510 /* prepend un-acked frames to the beginning of the pending frame queue */ 511 if (!list_empty(&bf_pending)) { 512 spin_lock_bh(&txq->axq_lock); 513 list_splice(&bf_pending, &tid->buf_q); 514 ath_tx_queue_tid(txq, tid); 515 spin_unlock_bh(&txq->axq_lock); 516 } 517 518 if (tid->state & AGGR_CLEANUP) { 519 ath_tx_flush_tid(sc, tid); 520 521 if (tid->baw_head == tid->baw_tail) { 522 tid->state &= ~AGGR_ADDBA_COMPLETE; 523 tid->state &= ~AGGR_CLEANUP; 524 } 525 } 526 527 rcu_read_unlock(); 528 529 if (needreset) 530 ath_reset(sc, false); 531 } 532 533 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 534 struct ath_atx_tid *tid) 535 { 536 struct sk_buff *skb; 537 struct ieee80211_tx_info *tx_info; 538 struct ieee80211_tx_rate *rates; 539 u32 max_4ms_framelen, frmlen; 540 u16 aggr_limit, legacy = 0; 541 int i; 542 543 skb = bf->bf_mpdu; 544 tx_info = IEEE80211_SKB_CB(skb); 545 rates = tx_info->control.rates; 546 547 /* 548 * Find the lowest frame length among the rate series that will have a 549 * 4ms transmit duration. 550 * TODO - TXOP limit needs to be considered. 551 */ 552 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 553 554 for (i = 0; i < 4; i++) { 555 if (rates[i].count) { 556 int modeidx; 557 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 558 legacy = 1; 559 break; 560 } 561 562 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 563 modeidx = MCS_HT40; 564 else 565 modeidx = MCS_HT20; 566 567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 568 modeidx++; 569 570 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 571 max_4ms_framelen = min(max_4ms_framelen, frmlen); 572 } 573 } 574 575 /* 576 * limit aggregate size by the minimum rate if rate selected is 577 * not a probe rate, if rate selected is a probe rate then 578 * avoid aggregation of this packet. 579 */ 580 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 581 return 0; 582 583 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) 584 aggr_limit = min((max_4ms_framelen * 3) / 8, 585 (u32)ATH_AMPDU_LIMIT_MAX); 586 else 587 aggr_limit = min(max_4ms_framelen, 588 (u32)ATH_AMPDU_LIMIT_MAX); 589 590 /* 591 * h/w can accept aggregates upto 16 bit lengths (65535). 592 * The IE, however can hold upto 65536, which shows up here 593 * as zero. Ignore 65536 since we are constrained by hw. 594 */ 595 if (tid->an->maxampdu) 596 aggr_limit = min(aggr_limit, tid->an->maxampdu); 597 598 return aggr_limit; 599 } 600 601 /* 602 * Returns the number of delimiters to be added to 603 * meet the minimum required mpdudensity. 604 */ 605 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 606 struct ath_buf *bf, u16 frmlen) 607 { 608 struct sk_buff *skb = bf->bf_mpdu; 609 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 610 u32 nsymbits, nsymbols; 611 u16 minlen; 612 u8 flags, rix; 613 int width, streams, half_gi, ndelim, mindelim; 614 615 /* Select standard number of delimiters based on frame length alone */ 616 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 617 618 /* 619 * If encryption enabled, hardware requires some more padding between 620 * subframes. 621 * TODO - this could be improved to be dependent on the rate. 622 * The hardware can keep up at lower rates, but not higher rates 623 */ 624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) 625 ndelim += ATH_AGGR_ENCRYPTDELIM; 626 627 /* 628 * Convert desired mpdu density from microeconds to bytes based 629 * on highest rate in rate series (i.e. first rate) to determine 630 * required minimum length for subframe. Take into account 631 * whether high rate is 20 or 40Mhz and half or full GI. 632 * 633 * If there is no mpdu density restriction, no further calculation 634 * is needed. 635 */ 636 637 if (tid->an->mpdudensity == 0) 638 return ndelim; 639 640 rix = tx_info->control.rates[0].idx; 641 flags = tx_info->control.rates[0].flags; 642 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 643 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 644 645 if (half_gi) 646 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 647 else 648 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 649 650 if (nsymbols == 0) 651 nsymbols = 1; 652 653 streams = HT_RC_2_STREAMS(rix); 654 nsymbits = bits_per_symbol[rix % 8][width] * streams; 655 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 656 657 if (frmlen < minlen) { 658 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 659 ndelim = max(mindelim, ndelim); 660 } 661 662 return ndelim; 663 } 664 665 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 666 struct ath_txq *txq, 667 struct ath_atx_tid *tid, 668 struct list_head *bf_q) 669 { 670 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 671 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 672 int rl = 0, nframes = 0, ndelim, prev_al = 0; 673 u16 aggr_limit = 0, al = 0, bpad = 0, 674 al_delta, h_baw = tid->baw_size / 2; 675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 676 struct ieee80211_tx_info *tx_info; 677 678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 679 680 do { 681 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 682 683 /* do not step over block-ack window */ 684 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { 685 status = ATH_AGGR_BAW_CLOSED; 686 break; 687 } 688 689 if (!rl) { 690 aggr_limit = ath_lookup_rate(sc, bf, tid); 691 rl = 1; 692 } 693 694 /* do not exceed aggregation limit */ 695 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; 696 697 if (nframes && 698 (aggr_limit < (al + bpad + al_delta + prev_al))) { 699 status = ATH_AGGR_LIMITED; 700 break; 701 } 702 703 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 704 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || 705 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))) 706 break; 707 708 /* do not exceed subframe limit */ 709 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 710 status = ATH_AGGR_LIMITED; 711 break; 712 } 713 nframes++; 714 715 /* add padding for previous frame to aggregation length */ 716 al += bpad + al_delta; 717 718 /* 719 * Get the delimiters needed to meet the MPDU 720 * density for this node. 721 */ 722 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); 723 bpad = PADBYTES(al_delta) + (ndelim << 2); 724 725 bf->bf_next = NULL; 726 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0); 727 728 /* link buffers of this frame to the aggregate */ 729 ath_tx_addto_baw(sc, tid, bf); 730 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 731 list_move_tail(&bf->list, bf_q); 732 if (bf_prev) { 733 bf_prev->bf_next = bf; 734 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc, 735 bf->bf_daddr); 736 } 737 bf_prev = bf; 738 739 } while (!list_empty(&tid->buf_q)); 740 741 bf_first->bf_al = al; 742 bf_first->bf_nframes = nframes; 743 744 return status; 745 #undef PADBYTES 746 } 747 748 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 749 struct ath_atx_tid *tid) 750 { 751 struct ath_buf *bf; 752 enum ATH_AGGR_STATUS status; 753 struct list_head bf_q; 754 755 do { 756 if (list_empty(&tid->buf_q)) 757 return; 758 759 INIT_LIST_HEAD(&bf_q); 760 761 status = ath_tx_form_aggr(sc, txq, tid, &bf_q); 762 763 /* 764 * no frames picked up to be aggregated; 765 * block-ack window is not open. 766 */ 767 if (list_empty(&bf_q)) 768 break; 769 770 bf = list_first_entry(&bf_q, struct ath_buf, list); 771 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 772 773 /* if only one frame, send as non-aggregate */ 774 if (bf->bf_nframes == 1) { 775 bf->bf_state.bf_type &= ~BUF_AGGR; 776 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); 777 ath_buf_set_rate(sc, bf); 778 ath_tx_txqaddbuf(sc, txq, &bf_q); 779 continue; 780 } 781 782 /* setup first desc of aggregate */ 783 bf->bf_state.bf_type |= BUF_AGGR; 784 ath_buf_set_rate(sc, bf); 785 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 786 787 /* anchor last desc of aggregate */ 788 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 789 790 ath_tx_txqaddbuf(sc, txq, &bf_q); 791 TX_STAT_INC(txq->axq_qnum, a_aggr); 792 793 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && 794 status != ATH_AGGR_BAW_CLOSED); 795 } 796 797 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 798 u16 tid, u16 *ssn) 799 { 800 struct ath_atx_tid *txtid; 801 struct ath_node *an; 802 803 an = (struct ath_node *)sta->drv_priv; 804 txtid = ATH_AN_2_TID(an, tid); 805 806 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 807 return -EAGAIN; 808 809 txtid->state |= AGGR_ADDBA_PROGRESS; 810 txtid->paused = true; 811 *ssn = txtid->seq_start; 812 813 return 0; 814 } 815 816 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 817 { 818 struct ath_node *an = (struct ath_node *)sta->drv_priv; 819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 820 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 821 822 if (txtid->state & AGGR_CLEANUP) 823 return; 824 825 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 826 txtid->state &= ~AGGR_ADDBA_PROGRESS; 827 return; 828 } 829 830 spin_lock_bh(&txq->axq_lock); 831 txtid->paused = true; 832 833 /* 834 * If frames are still being transmitted for this TID, they will be 835 * cleaned up during tx completion. To prevent race conditions, this 836 * TID can only be reused after all in-progress subframes have been 837 * completed. 838 */ 839 if (txtid->baw_head != txtid->baw_tail) 840 txtid->state |= AGGR_CLEANUP; 841 else 842 txtid->state &= ~AGGR_ADDBA_COMPLETE; 843 spin_unlock_bh(&txq->axq_lock); 844 845 ath_tx_flush_tid(sc, txtid); 846 } 847 848 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 849 { 850 struct ath_atx_tid *txtid; 851 struct ath_node *an; 852 853 an = (struct ath_node *)sta->drv_priv; 854 855 if (sc->sc_flags & SC_OP_TXAGGR) { 856 txtid = ATH_AN_2_TID(an, tid); 857 txtid->baw_size = 858 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 859 txtid->state |= AGGR_ADDBA_COMPLETE; 860 txtid->state &= ~AGGR_ADDBA_PROGRESS; 861 ath_tx_resume_tid(sc, txtid); 862 } 863 } 864 865 /********************/ 866 /* Queue Management */ 867 /********************/ 868 869 static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 870 struct ath_txq *txq) 871 { 872 struct ath_atx_ac *ac, *ac_tmp; 873 struct ath_atx_tid *tid, *tid_tmp; 874 875 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 876 list_del(&ac->list); 877 ac->sched = false; 878 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 879 list_del(&tid->list); 880 tid->sched = false; 881 ath_tid_drain(sc, txq, tid); 882 } 883 } 884 } 885 886 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 887 { 888 struct ath_hw *ah = sc->sc_ah; 889 struct ath_common *common = ath9k_hw_common(ah); 890 struct ath9k_tx_queue_info qi; 891 int qnum, i; 892 893 memset(&qi, 0, sizeof(qi)); 894 qi.tqi_subtype = subtype; 895 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 896 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 897 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 898 qi.tqi_physCompBuf = 0; 899 900 /* 901 * Enable interrupts only for EOL and DESC conditions. 902 * We mark tx descriptors to receive a DESC interrupt 903 * when a tx queue gets deep; otherwise waiting for the 904 * EOL to reap descriptors. Note that this is done to 905 * reduce interrupt load and this only defers reaping 906 * descriptors, never transmitting frames. Aside from 907 * reducing interrupts this also permits more concurrency. 908 * The only potential downside is if the tx queue backs 909 * up in which case the top half of the kernel may backup 910 * due to a lack of tx descriptors. 911 * 912 * The UAPSD queue is an exception, since we take a desc- 913 * based intr on the EOSP frames. 914 */ 915 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 916 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE | 917 TXQ_FLAG_TXERRINT_ENABLE; 918 } else { 919 if (qtype == ATH9K_TX_QUEUE_UAPSD) 920 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 921 else 922 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 923 TXQ_FLAG_TXDESCINT_ENABLE; 924 } 925 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 926 if (qnum == -1) { 927 /* 928 * NB: don't print a message, this happens 929 * normally on parts with too few tx queues 930 */ 931 return NULL; 932 } 933 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 934 ath_print(common, ATH_DBG_FATAL, 935 "qnum %u out of range, max %u!\n", 936 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); 937 ath9k_hw_releasetxqueue(ah, qnum); 938 return NULL; 939 } 940 if (!ATH_TXQ_SETUP(sc, qnum)) { 941 struct ath_txq *txq = &sc->tx.txq[qnum]; 942 943 txq->axq_class = subtype; 944 txq->axq_qnum = qnum; 945 txq->axq_link = NULL; 946 INIT_LIST_HEAD(&txq->axq_q); 947 INIT_LIST_HEAD(&txq->axq_acq); 948 spin_lock_init(&txq->axq_lock); 949 txq->axq_depth = 0; 950 txq->axq_tx_inprogress = false; 951 sc->tx.txqsetup |= 1<<qnum; 952 953 txq->txq_headidx = txq->txq_tailidx = 0; 954 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 955 INIT_LIST_HEAD(&txq->txq_fifo[i]); 956 INIT_LIST_HEAD(&txq->txq_fifo_pending); 957 } 958 return &sc->tx.txq[qnum]; 959 } 960 961 int ath_txq_update(struct ath_softc *sc, int qnum, 962 struct ath9k_tx_queue_info *qinfo) 963 { 964 struct ath_hw *ah = sc->sc_ah; 965 int error = 0; 966 struct ath9k_tx_queue_info qi; 967 968 if (qnum == sc->beacon.beaconq) { 969 /* 970 * XXX: for beacon queue, we just save the parameter. 971 * It will be picked up by ath_beaconq_config when 972 * it's necessary. 973 */ 974 sc->beacon.beacon_qi = *qinfo; 975 return 0; 976 } 977 978 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 979 980 ath9k_hw_get_txq_props(ah, qnum, &qi); 981 qi.tqi_aifs = qinfo->tqi_aifs; 982 qi.tqi_cwmin = qinfo->tqi_cwmin; 983 qi.tqi_cwmax = qinfo->tqi_cwmax; 984 qi.tqi_burstTime = qinfo->tqi_burstTime; 985 qi.tqi_readyTime = qinfo->tqi_readyTime; 986 987 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 988 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 989 "Unable to update hardware queue %u!\n", qnum); 990 error = -EIO; 991 } else { 992 ath9k_hw_resettxqueue(ah, qnum); 993 } 994 995 return error; 996 } 997 998 int ath_cabq_update(struct ath_softc *sc) 999 { 1000 struct ath9k_tx_queue_info qi; 1001 int qnum = sc->beacon.cabq->axq_qnum; 1002 1003 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1004 /* 1005 * Ensure the readytime % is within the bounds. 1006 */ 1007 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1008 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1009 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1010 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1011 1012 qi.tqi_readyTime = (sc->beacon_interval * 1013 sc->config.cabqReadytime) / 100; 1014 ath_txq_update(sc, qnum, &qi); 1015 1016 return 0; 1017 } 1018 1019 /* 1020 * Drain a given TX queue (could be Beacon or Data) 1021 * 1022 * This assumes output has been stopped and 1023 * we do not need to block ath_tx_tasklet. 1024 */ 1025 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) 1026 { 1027 struct ath_buf *bf, *lastbf; 1028 struct list_head bf_head; 1029 struct ath_tx_status ts; 1030 1031 memset(&ts, 0, sizeof(ts)); 1032 INIT_LIST_HEAD(&bf_head); 1033 1034 for (;;) { 1035 spin_lock_bh(&txq->axq_lock); 1036 1037 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1038 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 1039 txq->txq_headidx = txq->txq_tailidx = 0; 1040 spin_unlock_bh(&txq->axq_lock); 1041 break; 1042 } else { 1043 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 1044 struct ath_buf, list); 1045 } 1046 } else { 1047 if (list_empty(&txq->axq_q)) { 1048 txq->axq_link = NULL; 1049 spin_unlock_bh(&txq->axq_lock); 1050 break; 1051 } 1052 bf = list_first_entry(&txq->axq_q, struct ath_buf, 1053 list); 1054 1055 if (bf->bf_stale) { 1056 list_del(&bf->list); 1057 spin_unlock_bh(&txq->axq_lock); 1058 1059 ath_tx_return_buffer(sc, bf); 1060 continue; 1061 } 1062 } 1063 1064 lastbf = bf->bf_lastbf; 1065 if (!retry_tx) 1066 lastbf->bf_tx_aborted = true; 1067 1068 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1069 list_cut_position(&bf_head, 1070 &txq->txq_fifo[txq->txq_tailidx], 1071 &lastbf->list); 1072 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 1073 } else { 1074 /* remove ath_buf's of the same mpdu from txq */ 1075 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); 1076 } 1077 1078 txq->axq_depth--; 1079 1080 spin_unlock_bh(&txq->axq_lock); 1081 1082 if (bf_isampdu(bf)) 1083 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0); 1084 else 1085 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1086 } 1087 1088 spin_lock_bh(&txq->axq_lock); 1089 txq->axq_tx_inprogress = false; 1090 spin_unlock_bh(&txq->axq_lock); 1091 1092 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1093 spin_lock_bh(&txq->axq_lock); 1094 while (!list_empty(&txq->txq_fifo_pending)) { 1095 bf = list_first_entry(&txq->txq_fifo_pending, 1096 struct ath_buf, list); 1097 list_cut_position(&bf_head, 1098 &txq->txq_fifo_pending, 1099 &bf->bf_lastbf->list); 1100 spin_unlock_bh(&txq->axq_lock); 1101 1102 if (bf_isampdu(bf)) 1103 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 1104 &ts, 0); 1105 else 1106 ath_tx_complete_buf(sc, bf, txq, &bf_head, 1107 &ts, 0, 0); 1108 spin_lock_bh(&txq->axq_lock); 1109 } 1110 spin_unlock_bh(&txq->axq_lock); 1111 } 1112 1113 /* flush any pending frames if aggregation is enabled */ 1114 if (sc->sc_flags & SC_OP_TXAGGR) { 1115 if (!retry_tx) { 1116 spin_lock_bh(&txq->axq_lock); 1117 ath_txq_drain_pending_buffers(sc, txq); 1118 spin_unlock_bh(&txq->axq_lock); 1119 } 1120 } 1121 } 1122 1123 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1124 { 1125 struct ath_hw *ah = sc->sc_ah; 1126 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1127 struct ath_txq *txq; 1128 int i, npend = 0; 1129 1130 if (sc->sc_flags & SC_OP_INVALID) 1131 return; 1132 1133 /* Stop beacon queue */ 1134 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1135 1136 /* Stop data queues */ 1137 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1138 if (ATH_TXQ_SETUP(sc, i)) { 1139 txq = &sc->tx.txq[i]; 1140 ath9k_hw_stoptxdma(ah, txq->axq_qnum); 1141 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum); 1142 } 1143 } 1144 1145 if (npend) { 1146 int r; 1147 1148 ath_print(common, ATH_DBG_FATAL, 1149 "Failed to stop TX DMA. Resetting hardware!\n"); 1150 1151 spin_lock_bh(&sc->sc_resetlock); 1152 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 1153 if (r) 1154 ath_print(common, ATH_DBG_FATAL, 1155 "Unable to reset hardware; reset status %d\n", 1156 r); 1157 spin_unlock_bh(&sc->sc_resetlock); 1158 } 1159 1160 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1161 if (ATH_TXQ_SETUP(sc, i)) 1162 ath_draintxq(sc, &sc->tx.txq[i], retry_tx); 1163 } 1164 } 1165 1166 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1167 { 1168 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1169 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1170 } 1171 1172 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1173 { 1174 struct ath_atx_ac *ac; 1175 struct ath_atx_tid *tid; 1176 1177 if (list_empty(&txq->axq_acq)) 1178 return; 1179 1180 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1181 list_del(&ac->list); 1182 ac->sched = false; 1183 1184 do { 1185 if (list_empty(&ac->tid_q)) 1186 return; 1187 1188 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); 1189 list_del(&tid->list); 1190 tid->sched = false; 1191 1192 if (tid->paused) 1193 continue; 1194 1195 ath_tx_sched_aggr(sc, txq, tid); 1196 1197 /* 1198 * add tid to round-robin queue if more frames 1199 * are pending for the tid 1200 */ 1201 if (!list_empty(&tid->buf_q)) 1202 ath_tx_queue_tid(txq, tid); 1203 1204 break; 1205 } while (!list_empty(&ac->tid_q)); 1206 1207 if (!list_empty(&ac->tid_q)) { 1208 if (!ac->sched) { 1209 ac->sched = true; 1210 list_add_tail(&ac->list, &txq->axq_acq); 1211 } 1212 } 1213 } 1214 1215 int ath_tx_setup(struct ath_softc *sc, int haltype) 1216 { 1217 struct ath_txq *txq; 1218 1219 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 1220 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1221 "HAL AC %u out of range, max %zu!\n", 1222 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 1223 return 0; 1224 } 1225 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); 1226 if (txq != NULL) { 1227 sc->tx.hwq_map[haltype] = txq->axq_qnum; 1228 return 1; 1229 } else 1230 return 0; 1231 } 1232 1233 /***********/ 1234 /* TX, DMA */ 1235 /***********/ 1236 1237 /* 1238 * Insert a chain of ath_buf (descriptors) on a txq and 1239 * assume the descriptors are already chained together by caller. 1240 */ 1241 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1242 struct list_head *head) 1243 { 1244 struct ath_hw *ah = sc->sc_ah; 1245 struct ath_common *common = ath9k_hw_common(ah); 1246 struct ath_buf *bf; 1247 1248 /* 1249 * Insert the frame on the outbound list and 1250 * pass it on to the hardware. 1251 */ 1252 1253 if (list_empty(head)) 1254 return; 1255 1256 bf = list_first_entry(head, struct ath_buf, list); 1257 1258 ath_print(common, ATH_DBG_QUEUE, 1259 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1260 1261 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1262 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) { 1263 list_splice_tail_init(head, &txq->txq_fifo_pending); 1264 return; 1265 } 1266 if (!list_empty(&txq->txq_fifo[txq->txq_headidx])) 1267 ath_print(common, ATH_DBG_XMIT, 1268 "Initializing tx fifo %d which " 1269 "is non-empty\n", 1270 txq->txq_headidx); 1271 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); 1272 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); 1273 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1274 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1275 ath_print(common, ATH_DBG_XMIT, 1276 "TXDP[%u] = %llx (%p)\n", 1277 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1278 } else { 1279 list_splice_tail_init(head, &txq->axq_q); 1280 1281 if (txq->axq_link == NULL) { 1282 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1283 ath_print(common, ATH_DBG_XMIT, 1284 "TXDP[%u] = %llx (%p)\n", 1285 txq->axq_qnum, ito64(bf->bf_daddr), 1286 bf->bf_desc); 1287 } else { 1288 *txq->axq_link = bf->bf_daddr; 1289 ath_print(common, ATH_DBG_XMIT, 1290 "link[%u] (%p)=%llx (%p)\n", 1291 txq->axq_qnum, txq->axq_link, 1292 ito64(bf->bf_daddr), bf->bf_desc); 1293 } 1294 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, 1295 &txq->axq_link); 1296 ath9k_hw_txstart(ah, txq->axq_qnum); 1297 } 1298 txq->axq_depth++; 1299 } 1300 1301 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1302 struct list_head *bf_head, 1303 struct ath_tx_control *txctl) 1304 { 1305 struct ath_buf *bf; 1306 1307 bf = list_first_entry(bf_head, struct ath_buf, list); 1308 bf->bf_state.bf_type |= BUF_AMPDU; 1309 TX_STAT_INC(txctl->txq->axq_qnum, a_queued); 1310 1311 /* 1312 * Do not queue to h/w when any of the following conditions is true: 1313 * - there are pending frames in software queue 1314 * - the TID is currently paused for ADDBA/BAR request 1315 * - seqno is not within block-ack window 1316 * - h/w queue depth exceeds low water mark 1317 */ 1318 if (!list_empty(&tid->buf_q) || tid->paused || 1319 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1320 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1321 /* 1322 * Add this frame to software queue for scheduling later 1323 * for aggregation. 1324 */ 1325 list_move_tail(&bf->list, &tid->buf_q); 1326 ath_tx_queue_tid(txctl->txq, tid); 1327 return; 1328 } 1329 1330 /* Add sub-frame to BAW */ 1331 ath_tx_addto_baw(sc, tid, bf); 1332 1333 /* Queue to h/w without aggregation */ 1334 bf->bf_nframes = 1; 1335 bf->bf_lastbf = bf; 1336 ath_buf_set_rate(sc, bf); 1337 ath_tx_txqaddbuf(sc, txctl->txq, bf_head); 1338 } 1339 1340 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 1341 struct ath_atx_tid *tid, 1342 struct list_head *bf_head) 1343 { 1344 struct ath_buf *bf; 1345 1346 bf = list_first_entry(bf_head, struct ath_buf, list); 1347 bf->bf_state.bf_type &= ~BUF_AMPDU; 1348 1349 /* update starting sequence number for subsequent ADDBA request */ 1350 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1351 1352 bf->bf_nframes = 1; 1353 bf->bf_lastbf = bf; 1354 ath_buf_set_rate(sc, bf); 1355 ath_tx_txqaddbuf(sc, txq, bf_head); 1356 TX_STAT_INC(txq->axq_qnum, queued); 1357 } 1358 1359 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1360 struct list_head *bf_head) 1361 { 1362 struct ath_buf *bf; 1363 1364 bf = list_first_entry(bf_head, struct ath_buf, list); 1365 1366 bf->bf_lastbf = bf; 1367 bf->bf_nframes = 1; 1368 ath_buf_set_rate(sc, bf); 1369 ath_tx_txqaddbuf(sc, txq, bf_head); 1370 TX_STAT_INC(txq->axq_qnum, queued); 1371 } 1372 1373 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1374 { 1375 struct ieee80211_hdr *hdr; 1376 enum ath9k_pkt_type htype; 1377 __le16 fc; 1378 1379 hdr = (struct ieee80211_hdr *)skb->data; 1380 fc = hdr->frame_control; 1381 1382 if (ieee80211_is_beacon(fc)) 1383 htype = ATH9K_PKT_TYPE_BEACON; 1384 else if (ieee80211_is_probe_resp(fc)) 1385 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1386 else if (ieee80211_is_atim(fc)) 1387 htype = ATH9K_PKT_TYPE_ATIM; 1388 else if (ieee80211_is_pspoll(fc)) 1389 htype = ATH9K_PKT_TYPE_PSPOLL; 1390 else 1391 htype = ATH9K_PKT_TYPE_NORMAL; 1392 1393 return htype; 1394 } 1395 1396 static void assign_aggr_tid_seqno(struct sk_buff *skb, 1397 struct ath_buf *bf) 1398 { 1399 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1400 struct ieee80211_hdr *hdr; 1401 struct ath_node *an; 1402 struct ath_atx_tid *tid; 1403 __le16 fc; 1404 u8 *qc; 1405 1406 if (!tx_info->control.sta) 1407 return; 1408 1409 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1410 hdr = (struct ieee80211_hdr *)skb->data; 1411 fc = hdr->frame_control; 1412 1413 if (ieee80211_is_data_qos(fc)) { 1414 qc = ieee80211_get_qos_ctl(hdr); 1415 bf->bf_tidno = qc[0] & 0xf; 1416 } 1417 1418 /* 1419 * For HT capable stations, we save tidno for later use. 1420 * We also override seqno set by upper layer with the one 1421 * in tx aggregation state. 1422 */ 1423 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1424 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1425 bf->bf_seqno = tid->seq_next; 1426 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1427 } 1428 1429 static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc) 1430 { 1431 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1432 int flags = 0; 1433 1434 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ 1435 flags |= ATH9K_TXDESC_INTREQ; 1436 1437 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1438 flags |= ATH9K_TXDESC_NOACK; 1439 1440 if (use_ldpc) 1441 flags |= ATH9K_TXDESC_LDPC; 1442 1443 return flags; 1444 } 1445 1446 /* 1447 * rix - rate index 1448 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 1449 * width - 0 for 20 MHz, 1 for 40 MHz 1450 * half_gi - to use 4us v/s 3.6 us for symbol time 1451 */ 1452 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, 1453 int width, int half_gi, bool shortPreamble) 1454 { 1455 u32 nbits, nsymbits, duration, nsymbols; 1456 int streams, pktlen; 1457 1458 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; 1459 1460 /* find number of symbols: PLCP + data */ 1461 streams = HT_RC_2_STREAMS(rix); 1462 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 1463 nsymbits = bits_per_symbol[rix % 8][width] * streams; 1464 nsymbols = (nbits + nsymbits - 1) / nsymbits; 1465 1466 if (!half_gi) 1467 duration = SYMBOL_TIME(nsymbols); 1468 else 1469 duration = SYMBOL_TIME_HALFGI(nsymbols); 1470 1471 /* addup duration for legacy/ht training and signal fields */ 1472 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 1473 1474 return duration; 1475 } 1476 1477 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1478 { 1479 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1480 struct ath9k_11n_rate_series series[4]; 1481 struct sk_buff *skb; 1482 struct ieee80211_tx_info *tx_info; 1483 struct ieee80211_tx_rate *rates; 1484 const struct ieee80211_rate *rate; 1485 struct ieee80211_hdr *hdr; 1486 int i, flags = 0; 1487 u8 rix = 0, ctsrate = 0; 1488 bool is_pspoll; 1489 1490 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 1491 1492 skb = bf->bf_mpdu; 1493 tx_info = IEEE80211_SKB_CB(skb); 1494 rates = tx_info->control.rates; 1495 hdr = (struct ieee80211_hdr *)skb->data; 1496 is_pspoll = ieee80211_is_pspoll(hdr->frame_control); 1497 1498 /* 1499 * We check if Short Preamble is needed for the CTS rate by 1500 * checking the BSS's global flag. 1501 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1502 */ 1503 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); 1504 ctsrate = rate->hw_value; 1505 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 1506 ctsrate |= rate->hw_value_short; 1507 1508 for (i = 0; i < 4; i++) { 1509 bool is_40, is_sgi, is_sp; 1510 int phy; 1511 1512 if (!rates[i].count || (rates[i].idx < 0)) 1513 continue; 1514 1515 rix = rates[i].idx; 1516 series[i].Tries = rates[i].count; 1517 series[i].ChSel = common->tx_chainmask; 1518 1519 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) || 1520 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) { 1521 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1522 flags |= ATH9K_TXDESC_RTSENA; 1523 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1524 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1525 flags |= ATH9K_TXDESC_CTSENA; 1526 } 1527 1528 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1529 series[i].RateFlags |= ATH9K_RATESERIES_2040; 1530 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1531 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1532 1533 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1534 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1535 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1536 1537 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1538 /* MCS rates */ 1539 series[i].Rate = rix | 0x80; 1540 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1541 is_40, is_sgi, is_sp); 1542 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1543 series[i].RateFlags |= ATH9K_RATESERIES_STBC; 1544 continue; 1545 } 1546 1547 /* legcay rates */ 1548 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1549 !(rate->flags & IEEE80211_RATE_ERP_G)) 1550 phy = WLAN_RC_PHY_CCK; 1551 else 1552 phy = WLAN_RC_PHY_OFDM; 1553 1554 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1555 series[i].Rate = rate->hw_value; 1556 if (rate->hw_value_short) { 1557 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1558 series[i].Rate |= rate->hw_value_short; 1559 } else { 1560 is_sp = false; 1561 } 1562 1563 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1564 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); 1565 } 1566 1567 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1568 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit)) 1569 flags &= ~ATH9K_TXDESC_RTSENA; 1570 1571 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1572 if (flags & ATH9K_TXDESC_RTSENA) 1573 flags &= ~ATH9K_TXDESC_CTSENA; 1574 1575 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1576 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc, 1577 bf->bf_lastbf->bf_desc, 1578 !is_pspoll, ctsrate, 1579 0, series, 4, flags); 1580 1581 if (sc->config.ath_aggr_prot && flags) 1582 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192); 1583 } 1584 1585 static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, 1586 struct sk_buff *skb, 1587 struct ath_tx_control *txctl) 1588 { 1589 struct ath_wiphy *aphy = hw->priv; 1590 struct ath_softc *sc = aphy->sc; 1591 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1592 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1593 int hdrlen; 1594 __le16 fc; 1595 int padpos, padsize; 1596 bool use_ldpc = false; 1597 1598 tx_info->pad[0] = 0; 1599 switch (txctl->frame_type) { 1600 case ATH9K_IFT_NOT_INTERNAL: 1601 break; 1602 case ATH9K_IFT_PAUSE: 1603 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE; 1604 /* fall through */ 1605 case ATH9K_IFT_UNPAUSE: 1606 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL; 1607 break; 1608 } 1609 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1610 fc = hdr->frame_control; 1611 1612 ATH_TXBUF_RESET(bf); 1613 1614 bf->aphy = aphy; 1615 bf->bf_frmlen = skb->len + FCS_LEN; 1616 /* Remove the padding size from bf_frmlen, if any */ 1617 padpos = ath9k_cmn_padpos(hdr->frame_control); 1618 padsize = padpos & 3; 1619 if (padsize && skb->len>padpos+padsize) { 1620 bf->bf_frmlen -= padsize; 1621 } 1622 1623 if (!txctl->paprd && conf_is_ht(&hw->conf)) { 1624 bf->bf_state.bf_type |= BUF_HT; 1625 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1626 use_ldpc = true; 1627 } 1628 1629 bf->bf_state.bfs_paprd = txctl->paprd; 1630 if (txctl->paprd) 1631 bf->bf_state.bfs_paprd_timestamp = jiffies; 1632 bf->bf_flags = setup_tx_flags(skb, use_ldpc); 1633 1634 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1635 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1636 bf->bf_frmlen += tx_info->control.hw_key->icv_len; 1637 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; 1638 } else { 1639 bf->bf_keyix = ATH9K_TXKEYIX_INVALID; 1640 } 1641 1642 if (ieee80211_is_data_qos(fc) && bf_isht(bf) && 1643 (sc->sc_flags & SC_OP_TXAGGR)) 1644 assign_aggr_tid_seqno(skb, bf); 1645 1646 bf->bf_mpdu = skb; 1647 1648 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1649 skb->len, DMA_TO_DEVICE); 1650 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 1651 bf->bf_mpdu = NULL; 1652 bf->bf_buf_addr = 0; 1653 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1654 "dma_mapping_error() on TX\n"); 1655 return -ENOMEM; 1656 } 1657 1658 bf->bf_tx_aborted = false; 1659 1660 return 0; 1661 } 1662 1663 /* FIXME: tx power */ 1664 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, 1665 struct ath_tx_control *txctl) 1666 { 1667 struct sk_buff *skb = bf->bf_mpdu; 1668 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1669 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1670 struct ath_node *an = NULL; 1671 struct list_head bf_head; 1672 struct ath_desc *ds; 1673 struct ath_atx_tid *tid; 1674 struct ath_hw *ah = sc->sc_ah; 1675 int frm_type; 1676 __le16 fc; 1677 1678 frm_type = get_hw_packet_type(skb); 1679 fc = hdr->frame_control; 1680 1681 INIT_LIST_HEAD(&bf_head); 1682 list_add_tail(&bf->list, &bf_head); 1683 1684 ds = bf->bf_desc; 1685 ath9k_hw_set_desc_link(ah, ds, 0); 1686 1687 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1688 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1689 1690 ath9k_hw_filltxdesc(ah, ds, 1691 skb->len, /* segment length */ 1692 true, /* first segment */ 1693 true, /* last segment */ 1694 ds, /* first descriptor */ 1695 bf->bf_buf_addr, 1696 txctl->txq->axq_qnum); 1697 1698 if (bf->bf_state.bfs_paprd) 1699 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd); 1700 1701 spin_lock_bh(&txctl->txq->axq_lock); 1702 1703 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 1704 tx_info->control.sta) { 1705 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1706 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1707 1708 if (!ieee80211_is_data_qos(fc)) { 1709 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1710 goto tx_done; 1711 } 1712 1713 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1714 /* 1715 * Try aggregation if it's a unicast data frame 1716 * and the destination is HT capable. 1717 */ 1718 ath_tx_send_ampdu(sc, tid, &bf_head, txctl); 1719 } else { 1720 /* 1721 * Send this frame as regular when ADDBA 1722 * exchange is neither complete nor pending. 1723 */ 1724 ath_tx_send_ht_normal(sc, txctl->txq, 1725 tid, &bf_head); 1726 } 1727 } else { 1728 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1729 } 1730 1731 tx_done: 1732 spin_unlock_bh(&txctl->txq->axq_lock); 1733 } 1734 1735 /* Upon failure caller should free skb */ 1736 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1737 struct ath_tx_control *txctl) 1738 { 1739 struct ath_wiphy *aphy = hw->priv; 1740 struct ath_softc *sc = aphy->sc; 1741 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1742 struct ath_txq *txq = txctl->txq; 1743 struct ath_buf *bf; 1744 int q, r; 1745 1746 bf = ath_tx_get_buffer(sc); 1747 if (!bf) { 1748 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1749 return -1; 1750 } 1751 1752 r = ath_tx_setup_buffer(hw, bf, skb, txctl); 1753 if (unlikely(r)) { 1754 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1755 1756 /* upon ath_tx_processq() this TX queue will be resumed, we 1757 * guarantee this will happen by knowing beforehand that 1758 * we will at least have to run TX completionon one buffer 1759 * on the queue */ 1760 spin_lock_bh(&txq->axq_lock); 1761 if (!txq->stopped && txq->axq_depth > 1) { 1762 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1763 txq->stopped = 1; 1764 } 1765 spin_unlock_bh(&txq->axq_lock); 1766 1767 ath_tx_return_buffer(sc, bf); 1768 1769 return r; 1770 } 1771 1772 q = skb_get_queue_mapping(skb); 1773 if (q >= 4) 1774 q = 0; 1775 1776 spin_lock_bh(&txq->axq_lock); 1777 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) { 1778 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1779 txq->stopped = 1; 1780 } 1781 spin_unlock_bh(&txq->axq_lock); 1782 1783 ath_tx_start_dma(sc, bf, txctl); 1784 1785 return 0; 1786 } 1787 1788 void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) 1789 { 1790 struct ath_wiphy *aphy = hw->priv; 1791 struct ath_softc *sc = aphy->sc; 1792 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1793 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1794 int padpos, padsize; 1795 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1796 struct ath_tx_control txctl; 1797 1798 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1799 1800 /* 1801 * As a temporary workaround, assign seq# here; this will likely need 1802 * to be cleaned up to work better with Beacon transmission and virtual 1803 * BSSes. 1804 */ 1805 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1806 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1807 sc->tx.seq_no += 0x10; 1808 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1809 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1810 } 1811 1812 /* Add the padding after the header if this is not already done */ 1813 padpos = ath9k_cmn_padpos(hdr->frame_control); 1814 padsize = padpos & 3; 1815 if (padsize && skb->len>padpos) { 1816 if (skb_headroom(skb) < padsize) { 1817 ath_print(common, ATH_DBG_XMIT, 1818 "TX CABQ padding failed\n"); 1819 dev_kfree_skb_any(skb); 1820 return; 1821 } 1822 skb_push(skb, padsize); 1823 memmove(skb->data, skb->data + padsize, padpos); 1824 } 1825 1826 txctl.txq = sc->beacon.cabq; 1827 1828 ath_print(common, ATH_DBG_XMIT, 1829 "transmitting CABQ packet, skb: %p\n", skb); 1830 1831 if (ath_tx_start(hw, skb, &txctl) != 0) { 1832 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n"); 1833 goto exit; 1834 } 1835 1836 return; 1837 exit: 1838 dev_kfree_skb_any(skb); 1839 } 1840 1841 /*****************/ 1842 /* TX Completion */ 1843 /*****************/ 1844 1845 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1846 struct ath_wiphy *aphy, int tx_flags) 1847 { 1848 struct ieee80211_hw *hw = sc->hw; 1849 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1850 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1851 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1852 int q, padpos, padsize; 1853 1854 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1855 1856 if (aphy) 1857 hw = aphy->hw; 1858 1859 if (tx_flags & ATH_TX_BAR) 1860 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1861 1862 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { 1863 /* Frame was ACKed */ 1864 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1865 } 1866 1867 padpos = ath9k_cmn_padpos(hdr->frame_control); 1868 padsize = padpos & 3; 1869 if (padsize && skb->len>padpos+padsize) { 1870 /* 1871 * Remove MAC header padding before giving the frame back to 1872 * mac80211. 1873 */ 1874 memmove(skb->data + padsize, skb->data, padpos); 1875 skb_pull(skb, padsize); 1876 } 1877 1878 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { 1879 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 1880 ath_print(common, ATH_DBG_PS, 1881 "Going back to sleep after having " 1882 "received TX status (0x%lx)\n", 1883 sc->ps_flags & (PS_WAIT_FOR_BEACON | 1884 PS_WAIT_FOR_CAB | 1885 PS_WAIT_FOR_PSPOLL_DATA | 1886 PS_WAIT_FOR_TX_ACK)); 1887 } 1888 1889 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1890 ath9k_tx_status(hw, skb); 1891 else { 1892 q = skb_get_queue_mapping(skb); 1893 if (q >= 4) 1894 q = 0; 1895 1896 if (--sc->tx.pending_frames[q] < 0) 1897 sc->tx.pending_frames[q] = 0; 1898 1899 ieee80211_tx_status(hw, skb); 1900 } 1901 } 1902 1903 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1904 struct ath_txq *txq, struct list_head *bf_q, 1905 struct ath_tx_status *ts, int txok, int sendbar) 1906 { 1907 struct sk_buff *skb = bf->bf_mpdu; 1908 unsigned long flags; 1909 int tx_flags = 0; 1910 1911 if (sendbar) 1912 tx_flags = ATH_TX_BAR; 1913 1914 if (!txok) { 1915 tx_flags |= ATH_TX_ERROR; 1916 1917 if (bf_isxretried(bf)) 1918 tx_flags |= ATH_TX_XRETRY; 1919 } 1920 1921 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); 1922 bf->bf_buf_addr = 0; 1923 1924 if (bf->bf_state.bfs_paprd) { 1925 if (time_after(jiffies, 1926 bf->bf_state.bfs_paprd_timestamp + 1927 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 1928 dev_kfree_skb_any(skb); 1929 else 1930 complete(&sc->paprd_complete); 1931 } else { 1932 ath_debug_stat_tx(sc, txq, bf, ts); 1933 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1934 } 1935 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 1936 * accidentally reference it later. 1937 */ 1938 bf->bf_mpdu = NULL; 1939 1940 /* 1941 * Return the list of ath_buf of this mpdu to free queue 1942 */ 1943 spin_lock_irqsave(&sc->tx.txbuflock, flags); 1944 list_splice_tail_init(bf_q, &sc->tx.txbuf); 1945 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1946 } 1947 1948 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 1949 struct ath_tx_status *ts, int txok) 1950 { 1951 u16 seq_st = 0; 1952 u32 ba[WME_BA_BMP_SIZE >> 5]; 1953 int ba_index; 1954 int nbad = 0; 1955 int isaggr = 0; 1956 1957 if (bf->bf_lastbf->bf_tx_aborted) 1958 return 0; 1959 1960 isaggr = bf_isaggr(bf); 1961 if (isaggr) { 1962 seq_st = ts->ts_seqnum; 1963 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 1964 } 1965 1966 while (bf) { 1967 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); 1968 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 1969 nbad++; 1970 1971 bf = bf->bf_next; 1972 } 1973 1974 return nbad; 1975 } 1976 1977 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 1978 int nbad, int txok, bool update_rc) 1979 { 1980 struct sk_buff *skb = bf->bf_mpdu; 1981 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1982 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1983 struct ieee80211_hw *hw = bf->aphy->hw; 1984 u8 i, tx_rateindex; 1985 1986 if (txok) 1987 tx_info->status.ack_signal = ts->ts_rssi; 1988 1989 tx_rateindex = ts->ts_rateindex; 1990 WARN_ON(tx_rateindex >= hw->max_rates); 1991 1992 if (ts->ts_status & ATH9K_TXERR_FILT) 1993 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1994 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) { 1995 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 1996 1997 BUG_ON(nbad > bf->bf_nframes); 1998 1999 tx_info->status.ampdu_len = bf->bf_nframes; 2000 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 2001 } 2002 2003 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2004 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 2005 if (ieee80211_is_data(hdr->frame_control)) { 2006 if (ts->ts_flags & 2007 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 2008 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 2009 if ((ts->ts_status & ATH9K_TXERR_XRETRY) || 2010 (ts->ts_status & ATH9K_TXERR_FIFO)) 2011 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 2012 } 2013 } 2014 2015 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2016 tx_info->status.rates[i].count = 0; 2017 tx_info->status.rates[i].idx = -1; 2018 } 2019 2020 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2021 } 2022 2023 static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 2024 { 2025 int qnum; 2026 2027 qnum = ath_get_mac80211_qnum(txq->axq_class, sc); 2028 if (qnum == -1) 2029 return; 2030 2031 spin_lock_bh(&txq->axq_lock); 2032 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) { 2033 if (ath_mac80211_start_queue(sc, qnum)) 2034 txq->stopped = 0; 2035 } 2036 spin_unlock_bh(&txq->axq_lock); 2037 } 2038 2039 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2040 { 2041 struct ath_hw *ah = sc->sc_ah; 2042 struct ath_common *common = ath9k_hw_common(ah); 2043 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2044 struct list_head bf_head; 2045 struct ath_desc *ds; 2046 struct ath_tx_status ts; 2047 int txok; 2048 int status; 2049 2050 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2051 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2052 txq->axq_link); 2053 2054 for (;;) { 2055 spin_lock_bh(&txq->axq_lock); 2056 if (list_empty(&txq->axq_q)) { 2057 txq->axq_link = NULL; 2058 spin_unlock_bh(&txq->axq_lock); 2059 break; 2060 } 2061 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2062 2063 /* 2064 * There is a race condition that a BH gets scheduled 2065 * after sw writes TxE and before hw re-load the last 2066 * descriptor to get the newly chained one. 2067 * Software must keep the last DONE descriptor as a 2068 * holding descriptor - software does so by marking 2069 * it with the STALE flag. 2070 */ 2071 bf_held = NULL; 2072 if (bf->bf_stale) { 2073 bf_held = bf; 2074 if (list_is_last(&bf_held->list, &txq->axq_q)) { 2075 spin_unlock_bh(&txq->axq_lock); 2076 break; 2077 } else { 2078 bf = list_entry(bf_held->list.next, 2079 struct ath_buf, list); 2080 } 2081 } 2082 2083 lastbf = bf->bf_lastbf; 2084 ds = lastbf->bf_desc; 2085 2086 memset(&ts, 0, sizeof(ts)); 2087 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2088 if (status == -EINPROGRESS) { 2089 spin_unlock_bh(&txq->axq_lock); 2090 break; 2091 } 2092 2093 /* 2094 * Remove ath_buf's of the same transmit unit from txq, 2095 * however leave the last descriptor back as the holding 2096 * descriptor for hw. 2097 */ 2098 lastbf->bf_stale = true; 2099 INIT_LIST_HEAD(&bf_head); 2100 if (!list_is_singular(&lastbf->list)) 2101 list_cut_position(&bf_head, 2102 &txq->axq_q, lastbf->list.prev); 2103 2104 txq->axq_depth--; 2105 txok = !(ts.ts_status & ATH9K_TXERR_MASK); 2106 txq->axq_tx_inprogress = false; 2107 if (bf_held) 2108 list_del(&bf_held->list); 2109 spin_unlock_bh(&txq->axq_lock); 2110 2111 if (bf_held) 2112 ath_tx_return_buffer(sc, bf_held); 2113 2114 if (!bf_isampdu(bf)) { 2115 /* 2116 * This frame is sent out as a single frame. 2117 * Use hardware retry status for this frame. 2118 */ 2119 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2120 bf->bf_state.bf_type |= BUF_XRETRY; 2121 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true); 2122 } 2123 2124 if (bf_isampdu(bf)) 2125 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok); 2126 else 2127 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2128 2129 ath_wake_mac80211_queue(sc, txq); 2130 2131 spin_lock_bh(&txq->axq_lock); 2132 if (sc->sc_flags & SC_OP_TXAGGR) 2133 ath_txq_schedule(sc, txq); 2134 spin_unlock_bh(&txq->axq_lock); 2135 } 2136 } 2137 2138 static void ath_tx_complete_poll_work(struct work_struct *work) 2139 { 2140 struct ath_softc *sc = container_of(work, struct ath_softc, 2141 tx_complete_work.work); 2142 struct ath_txq *txq; 2143 int i; 2144 bool needreset = false; 2145 2146 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2147 if (ATH_TXQ_SETUP(sc, i)) { 2148 txq = &sc->tx.txq[i]; 2149 spin_lock_bh(&txq->axq_lock); 2150 if (txq->axq_depth) { 2151 if (txq->axq_tx_inprogress) { 2152 needreset = true; 2153 spin_unlock_bh(&txq->axq_lock); 2154 break; 2155 } else { 2156 txq->axq_tx_inprogress = true; 2157 } 2158 } 2159 spin_unlock_bh(&txq->axq_lock); 2160 } 2161 2162 if (needreset) { 2163 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2164 "tx hung, resetting the chip\n"); 2165 ath9k_ps_wakeup(sc); 2166 ath_reset(sc, true); 2167 ath9k_ps_restore(sc); 2168 } 2169 2170 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2171 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); 2172 } 2173 2174 2175 2176 void ath_tx_tasklet(struct ath_softc *sc) 2177 { 2178 int i; 2179 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2180 2181 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2182 2183 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2184 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2185 ath_tx_processq(sc, &sc->tx.txq[i]); 2186 } 2187 } 2188 2189 void ath_tx_edma_tasklet(struct ath_softc *sc) 2190 { 2191 struct ath_tx_status txs; 2192 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2193 struct ath_hw *ah = sc->sc_ah; 2194 struct ath_txq *txq; 2195 struct ath_buf *bf, *lastbf; 2196 struct list_head bf_head; 2197 int status; 2198 int txok; 2199 2200 for (;;) { 2201 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2202 if (status == -EINPROGRESS) 2203 break; 2204 if (status == -EIO) { 2205 ath_print(common, ATH_DBG_XMIT, 2206 "Error processing tx status\n"); 2207 break; 2208 } 2209 2210 /* Skip beacon completions */ 2211 if (txs.qid == sc->beacon.beaconq) 2212 continue; 2213 2214 txq = &sc->tx.txq[txs.qid]; 2215 2216 spin_lock_bh(&txq->axq_lock); 2217 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2218 spin_unlock_bh(&txq->axq_lock); 2219 return; 2220 } 2221 2222 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 2223 struct ath_buf, list); 2224 lastbf = bf->bf_lastbf; 2225 2226 INIT_LIST_HEAD(&bf_head); 2227 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx], 2228 &lastbf->list); 2229 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2230 txq->axq_depth--; 2231 txq->axq_tx_inprogress = false; 2232 spin_unlock_bh(&txq->axq_lock); 2233 2234 txok = !(txs.ts_status & ATH9K_TXERR_MASK); 2235 2236 if (!bf_isampdu(bf)) { 2237 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2238 bf->bf_state.bf_type |= BUF_XRETRY; 2239 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true); 2240 } 2241 2242 if (bf_isampdu(bf)) 2243 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok); 2244 else 2245 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2246 &txs, txok, 0); 2247 2248 ath_wake_mac80211_queue(sc, txq); 2249 2250 spin_lock_bh(&txq->axq_lock); 2251 if (!list_empty(&txq->txq_fifo_pending)) { 2252 INIT_LIST_HEAD(&bf_head); 2253 bf = list_first_entry(&txq->txq_fifo_pending, 2254 struct ath_buf, list); 2255 list_cut_position(&bf_head, &txq->txq_fifo_pending, 2256 &bf->bf_lastbf->list); 2257 ath_tx_txqaddbuf(sc, txq, &bf_head); 2258 } else if (sc->sc_flags & SC_OP_TXAGGR) 2259 ath_txq_schedule(sc, txq); 2260 spin_unlock_bh(&txq->axq_lock); 2261 } 2262 } 2263 2264 /*****************/ 2265 /* Init, Cleanup */ 2266 /*****************/ 2267 2268 static int ath_txstatus_setup(struct ath_softc *sc, int size) 2269 { 2270 struct ath_descdma *dd = &sc->txsdma; 2271 u8 txs_len = sc->sc_ah->caps.txs_len; 2272 2273 dd->dd_desc_len = size * txs_len; 2274 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 2275 &dd->dd_desc_paddr, GFP_KERNEL); 2276 if (!dd->dd_desc) 2277 return -ENOMEM; 2278 2279 return 0; 2280 } 2281 2282 static int ath_tx_edma_init(struct ath_softc *sc) 2283 { 2284 int err; 2285 2286 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2287 if (!err) 2288 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2289 sc->txsdma.dd_desc_paddr, 2290 ATH_TXSTATUS_RING_SIZE); 2291 2292 return err; 2293 } 2294 2295 static void ath_tx_edma_cleanup(struct ath_softc *sc) 2296 { 2297 struct ath_descdma *dd = &sc->txsdma; 2298 2299 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 2300 dd->dd_desc_paddr); 2301 } 2302 2303 int ath_tx_init(struct ath_softc *sc, int nbufs) 2304 { 2305 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2306 int error = 0; 2307 2308 spin_lock_init(&sc->tx.txbuflock); 2309 2310 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2311 "tx", nbufs, 1, 1); 2312 if (error != 0) { 2313 ath_print(common, ATH_DBG_FATAL, 2314 "Failed to allocate tx descriptors: %d\n", error); 2315 goto err; 2316 } 2317 2318 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2319 "beacon", ATH_BCBUF, 1, 1); 2320 if (error != 0) { 2321 ath_print(common, ATH_DBG_FATAL, 2322 "Failed to allocate beacon descriptors: %d\n", error); 2323 goto err; 2324 } 2325 2326 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2327 2328 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2329 error = ath_tx_edma_init(sc); 2330 if (error) 2331 goto err; 2332 } 2333 2334 err: 2335 if (error != 0) 2336 ath_tx_cleanup(sc); 2337 2338 return error; 2339 } 2340 2341 void ath_tx_cleanup(struct ath_softc *sc) 2342 { 2343 if (sc->beacon.bdma.dd_desc_len != 0) 2344 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); 2345 2346 if (sc->tx.txdma.dd_desc_len != 0) 2347 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2348 2349 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2350 ath_tx_edma_cleanup(sc); 2351 } 2352 2353 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2354 { 2355 struct ath_atx_tid *tid; 2356 struct ath_atx_ac *ac; 2357 int tidno, acno; 2358 2359 for (tidno = 0, tid = &an->tid[tidno]; 2360 tidno < WME_NUM_TID; 2361 tidno++, tid++) { 2362 tid->an = an; 2363 tid->tidno = tidno; 2364 tid->seq_start = tid->seq_next = 0; 2365 tid->baw_size = WME_MAX_BA; 2366 tid->baw_head = tid->baw_tail = 0; 2367 tid->sched = false; 2368 tid->paused = false; 2369 tid->state &= ~AGGR_CLEANUP; 2370 INIT_LIST_HEAD(&tid->buf_q); 2371 acno = TID_TO_WME_AC(tidno); 2372 tid->ac = &an->ac[acno]; 2373 tid->state &= ~AGGR_ADDBA_COMPLETE; 2374 tid->state &= ~AGGR_ADDBA_PROGRESS; 2375 } 2376 2377 for (acno = 0, ac = &an->ac[acno]; 2378 acno < WME_NUM_AC; acno++, ac++) { 2379 ac->sched = false; 2380 ac->qnum = sc->tx.hwq_map[acno]; 2381 INIT_LIST_HEAD(&ac->tid_q); 2382 } 2383 } 2384 2385 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2386 { 2387 struct ath_atx_ac *ac; 2388 struct ath_atx_tid *tid; 2389 struct ath_txq *txq; 2390 int i, tidno; 2391 2392 for (tidno = 0, tid = &an->tid[tidno]; 2393 tidno < WME_NUM_TID; tidno++, tid++) { 2394 i = tid->ac->qnum; 2395 2396 if (!ATH_TXQ_SETUP(sc, i)) 2397 continue; 2398 2399 txq = &sc->tx.txq[i]; 2400 ac = tid->ac; 2401 2402 spin_lock_bh(&txq->axq_lock); 2403 2404 if (tid->sched) { 2405 list_del(&tid->list); 2406 tid->sched = false; 2407 } 2408 2409 if (ac->sched) { 2410 list_del(&ac->list); 2411 tid->ac->sched = false; 2412 } 2413 2414 ath_tid_drain(sc, txq, tid); 2415 tid->state &= ~AGGR_ADDBA_COMPLETE; 2416 tid->state &= ~AGGR_CLEANUP; 2417 2418 spin_unlock_bh(&txq->axq_lock); 2419 } 2420 } 2421