1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define BITS_PER_BYTE 8 22 #define OFDM_PLCP_BITS 22 23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 24 #define L_STF 8 25 #define L_LTF 8 26 #define L_SIG 4 27 #define HT_SIG 8 28 #define HT_STF 4 29 #define HT_LTF(_ns) (4 * (_ns)) 30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 32 #define TIME_SYMBOLS(t) ((t) >> 2) 33 #define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18) 34 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 35 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 36 37 38 static u16 bits_per_symbol[][2] = { 39 /* 20MHz 40MHz */ 40 { 26, 54 }, /* 0: BPSK */ 41 { 52, 108 }, /* 1: QPSK 1/2 */ 42 { 78, 162 }, /* 2: QPSK 3/4 */ 43 { 104, 216 }, /* 3: 16-QAM 1/2 */ 44 { 156, 324 }, /* 4: 16-QAM 3/4 */ 45 { 208, 432 }, /* 5: 64-QAM 2/3 */ 46 { 234, 486 }, /* 6: 64-QAM 3/4 */ 47 { 260, 540 }, /* 7: 64-QAM 5/6 */ 48 }; 49 50 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 51 52 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 53 struct ath_atx_tid *tid, struct sk_buff *skb); 54 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 55 int tx_flags, struct ath_txq *txq); 56 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 57 struct ath_txq *txq, struct list_head *bf_q, 58 struct ath_tx_status *ts, int txok); 59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 60 struct list_head *head, bool internal); 61 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 62 struct ath_tx_status *ts, int nframes, int nbad, 63 int txok); 64 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 65 int seqno); 66 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 67 struct ath_txq *txq, 68 struct ath_atx_tid *tid, 69 struct sk_buff *skb); 70 71 enum { 72 MCS_HT20, 73 MCS_HT20_SGI, 74 MCS_HT40, 75 MCS_HT40_SGI, 76 }; 77 78 /*********************/ 79 /* Aggregation logic */ 80 /*********************/ 81 82 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) 83 __acquires(&txq->axq_lock) 84 { 85 spin_lock_bh(&txq->axq_lock); 86 } 87 88 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) 89 __releases(&txq->axq_lock) 90 { 91 spin_unlock_bh(&txq->axq_lock); 92 } 93 94 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) 95 __releases(&txq->axq_lock) 96 { 97 struct sk_buff_head q; 98 struct sk_buff *skb; 99 100 __skb_queue_head_init(&q); 101 skb_queue_splice_init(&txq->complete_q, &q); 102 spin_unlock_bh(&txq->axq_lock); 103 104 while ((skb = __skb_dequeue(&q))) 105 ieee80211_tx_status(sc->hw, skb); 106 } 107 108 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 109 { 110 struct ath_atx_ac *ac = tid->ac; 111 112 if (tid->paused) 113 return; 114 115 if (tid->sched) 116 return; 117 118 tid->sched = true; 119 list_add_tail(&tid->list, &ac->tid_q); 120 121 if (ac->sched) 122 return; 123 124 ac->sched = true; 125 list_add_tail(&ac->list, &txq->axq_acq); 126 } 127 128 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 129 { 130 struct ath_txq *txq = tid->ac->txq; 131 132 WARN_ON(!tid->paused); 133 134 ath_txq_lock(sc, txq); 135 tid->paused = false; 136 137 if (skb_queue_empty(&tid->buf_q)) 138 goto unlock; 139 140 ath_tx_queue_tid(txq, tid); 141 ath_txq_schedule(sc, txq); 142 unlock: 143 ath_txq_unlock_complete(sc, txq); 144 } 145 146 static struct ath_frame_info *get_frame_info(struct sk_buff *skb) 147 { 148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 149 BUILD_BUG_ON(sizeof(struct ath_frame_info) > 150 sizeof(tx_info->rate_driver_data)); 151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; 152 } 153 154 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) 155 { 156 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, 157 seqno << IEEE80211_SEQ_SEQ_SHIFT); 158 } 159 160 static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, 161 struct ath_buf *bf) 162 { 163 ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates, 164 ARRAY_SIZE(bf->rates)); 165 } 166 167 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 168 { 169 struct ath_txq *txq = tid->ac->txq; 170 struct sk_buff *skb; 171 struct ath_buf *bf; 172 struct list_head bf_head; 173 struct ath_tx_status ts; 174 struct ath_frame_info *fi; 175 bool sendbar = false; 176 177 INIT_LIST_HEAD(&bf_head); 178 179 memset(&ts, 0, sizeof(ts)); 180 181 while ((skb = __skb_dequeue(&tid->buf_q))) { 182 fi = get_frame_info(skb); 183 bf = fi->bf; 184 185 if (!bf) { 186 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 187 if (!bf) { 188 ieee80211_free_txskb(sc->hw, skb); 189 continue; 190 } 191 } 192 193 if (fi->retries) { 194 list_add_tail(&bf->list, &bf_head); 195 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 196 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 197 sendbar = true; 198 } else { 199 ath_set_rates(tid->an->vif, tid->an->sta, bf); 200 ath_tx_send_normal(sc, txq, NULL, skb); 201 } 202 } 203 204 if (tid->baw_head == tid->baw_tail) { 205 tid->state &= ~AGGR_ADDBA_COMPLETE; 206 tid->state &= ~AGGR_CLEANUP; 207 } 208 209 if (sendbar) { 210 ath_txq_unlock(sc, txq); 211 ath_send_bar(tid, tid->seq_start); 212 ath_txq_lock(sc, txq); 213 } 214 } 215 216 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 217 int seqno) 218 { 219 int index, cindex; 220 221 index = ATH_BA_INDEX(tid->seq_start, seqno); 222 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 223 224 __clear_bit(cindex, tid->tx_buf); 225 226 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { 227 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 228 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 229 if (tid->bar_index >= 0) 230 tid->bar_index--; 231 } 232 } 233 234 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 235 u16 seqno) 236 { 237 int index, cindex; 238 239 index = ATH_BA_INDEX(tid->seq_start, seqno); 240 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 241 __set_bit(cindex, tid->tx_buf); 242 243 if (index >= ((tid->baw_tail - tid->baw_head) & 244 (ATH_TID_MAX_BUFS - 1))) { 245 tid->baw_tail = cindex; 246 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 247 } 248 } 249 250 /* 251 * TODO: For frame(s) that are in the retry state, we will reuse the 252 * sequence number(s) without setting the retry bit. The 253 * alternative is to give up on these and BAR the receiver's window 254 * forward. 255 */ 256 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 257 struct ath_atx_tid *tid) 258 259 { 260 struct sk_buff *skb; 261 struct ath_buf *bf; 262 struct list_head bf_head; 263 struct ath_tx_status ts; 264 struct ath_frame_info *fi; 265 266 memset(&ts, 0, sizeof(ts)); 267 INIT_LIST_HEAD(&bf_head); 268 269 while ((skb = __skb_dequeue(&tid->buf_q))) { 270 fi = get_frame_info(skb); 271 bf = fi->bf; 272 273 if (!bf) { 274 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); 275 continue; 276 } 277 278 list_add_tail(&bf->list, &bf_head); 279 280 if (fi->retries) 281 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 282 283 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 284 } 285 286 tid->seq_next = tid->seq_start; 287 tid->baw_tail = tid->baw_head; 288 tid->bar_index = -1; 289 } 290 291 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 292 struct sk_buff *skb, int count) 293 { 294 struct ath_frame_info *fi = get_frame_info(skb); 295 struct ath_buf *bf = fi->bf; 296 struct ieee80211_hdr *hdr; 297 int prev = fi->retries; 298 299 TX_STAT_INC(txq->axq_qnum, a_retries); 300 fi->retries += count; 301 302 if (prev > 0) 303 return; 304 305 hdr = (struct ieee80211_hdr *)skb->data; 306 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 307 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 308 sizeof(*hdr), DMA_TO_DEVICE); 309 } 310 311 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 312 { 313 struct ath_buf *bf = NULL; 314 315 spin_lock_bh(&sc->tx.txbuflock); 316 317 if (unlikely(list_empty(&sc->tx.txbuf))) { 318 spin_unlock_bh(&sc->tx.txbuflock); 319 return NULL; 320 } 321 322 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 323 list_del(&bf->list); 324 325 spin_unlock_bh(&sc->tx.txbuflock); 326 327 return bf; 328 } 329 330 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 331 { 332 spin_lock_bh(&sc->tx.txbuflock); 333 list_add_tail(&bf->list, &sc->tx.txbuf); 334 spin_unlock_bh(&sc->tx.txbuflock); 335 } 336 337 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 338 { 339 struct ath_buf *tbf; 340 341 tbf = ath_tx_get_buffer(sc); 342 if (WARN_ON(!tbf)) 343 return NULL; 344 345 ATH_TXBUF_RESET(tbf); 346 347 tbf->bf_mpdu = bf->bf_mpdu; 348 tbf->bf_buf_addr = bf->bf_buf_addr; 349 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 350 tbf->bf_state = bf->bf_state; 351 352 return tbf; 353 } 354 355 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, 356 struct ath_tx_status *ts, int txok, 357 int *nframes, int *nbad) 358 { 359 struct ath_frame_info *fi; 360 u16 seq_st = 0; 361 u32 ba[WME_BA_BMP_SIZE >> 5]; 362 int ba_index; 363 int isaggr = 0; 364 365 *nbad = 0; 366 *nframes = 0; 367 368 isaggr = bf_isaggr(bf); 369 if (isaggr) { 370 seq_st = ts->ts_seqnum; 371 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 372 } 373 374 while (bf) { 375 fi = get_frame_info(bf->bf_mpdu); 376 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno); 377 378 (*nframes)++; 379 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 380 (*nbad)++; 381 382 bf = bf->bf_next; 383 } 384 } 385 386 387 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 388 struct ath_buf *bf, struct list_head *bf_q, 389 struct ath_tx_status *ts, int txok) 390 { 391 struct ath_node *an = NULL; 392 struct sk_buff *skb; 393 struct ieee80211_sta *sta; 394 struct ieee80211_hw *hw = sc->hw; 395 struct ieee80211_hdr *hdr; 396 struct ieee80211_tx_info *tx_info; 397 struct ath_atx_tid *tid = NULL; 398 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 399 struct list_head bf_head; 400 struct sk_buff_head bf_pending; 401 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; 402 u32 ba[WME_BA_BMP_SIZE >> 5]; 403 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 404 bool rc_update = true, isba; 405 struct ieee80211_tx_rate rates[4]; 406 struct ath_frame_info *fi; 407 int nframes; 408 u8 tidno; 409 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 410 int i, retries; 411 int bar_index = -1; 412 413 skb = bf->bf_mpdu; 414 hdr = (struct ieee80211_hdr *)skb->data; 415 416 tx_info = IEEE80211_SKB_CB(skb); 417 418 memcpy(rates, bf->rates, sizeof(rates)); 419 420 retries = ts->ts_longretry + 1; 421 for (i = 0; i < ts->ts_rateindex; i++) 422 retries += rates[i].count; 423 424 rcu_read_lock(); 425 426 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 427 if (!sta) { 428 rcu_read_unlock(); 429 430 INIT_LIST_HEAD(&bf_head); 431 while (bf) { 432 bf_next = bf->bf_next; 433 434 if (!bf->bf_stale || bf_next != NULL) 435 list_move_tail(&bf->list, &bf_head); 436 437 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0); 438 439 bf = bf_next; 440 } 441 return; 442 } 443 444 an = (struct ath_node *)sta->drv_priv; 445 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 446 tid = ATH_AN_2_TID(an, tidno); 447 seq_first = tid->seq_start; 448 isba = ts->ts_flags & ATH9K_TX_BA; 449 450 /* 451 * The hardware occasionally sends a tx status for the wrong TID. 452 * In this case, the BA status cannot be considered valid and all 453 * subframes need to be retransmitted 454 * 455 * Only BlockAcks have a TID and therefore normal Acks cannot be 456 * checked 457 */ 458 if (isba && tidno != ts->tid) 459 txok = false; 460 461 isaggr = bf_isaggr(bf); 462 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 463 464 if (isaggr && txok) { 465 if (ts->ts_flags & ATH9K_TX_BA) { 466 seq_st = ts->ts_seqnum; 467 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 468 } else { 469 /* 470 * AR5416 can become deaf/mute when BA 471 * issue happens. Chip needs to be reset. 472 * But AP code may have sychronization issues 473 * when perform internal reset in this routine. 474 * Only enable reset in STA mode for now. 475 */ 476 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 477 needreset = 1; 478 } 479 } 480 481 __skb_queue_head_init(&bf_pending); 482 483 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 484 while (bf) { 485 u16 seqno = bf->bf_state.seqno; 486 487 txfail = txpending = sendbar = 0; 488 bf_next = bf->bf_next; 489 490 skb = bf->bf_mpdu; 491 tx_info = IEEE80211_SKB_CB(skb); 492 fi = get_frame_info(skb); 493 494 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { 495 /* transmit completion, subframe is 496 * acked by block ack */ 497 acked_cnt++; 498 } else if (!isaggr && txok) { 499 /* transmit completion */ 500 acked_cnt++; 501 } else if (tid->state & AGGR_CLEANUP) { 502 /* 503 * cleanup in progress, just fail 504 * the un-acked sub-frames 505 */ 506 txfail = 1; 507 } else if (flush) { 508 txpending = 1; 509 } else if (fi->retries < ATH_MAX_SW_RETRIES) { 510 if (txok || !an->sleeping) 511 ath_tx_set_retry(sc, txq, bf->bf_mpdu, 512 retries); 513 514 txpending = 1; 515 } else { 516 txfail = 1; 517 txfail_cnt++; 518 bar_index = max_t(int, bar_index, 519 ATH_BA_INDEX(seq_first, seqno)); 520 } 521 522 /* 523 * Make sure the last desc is reclaimed if it 524 * not a holding desc. 525 */ 526 INIT_LIST_HEAD(&bf_head); 527 if (bf_next != NULL || !bf_last->bf_stale) 528 list_move_tail(&bf->list, &bf_head); 529 530 if (!txpending || (tid->state & AGGR_CLEANUP)) { 531 /* 532 * complete the acked-ones/xretried ones; update 533 * block-ack window 534 */ 535 ath_tx_update_baw(sc, tid, seqno); 536 537 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 538 memcpy(tx_info->control.rates, rates, sizeof(rates)); 539 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok); 540 rc_update = false; 541 } 542 543 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 544 !txfail); 545 } else { 546 /* retry the un-acked ones */ 547 if (bf->bf_next == NULL && bf_last->bf_stale) { 548 struct ath_buf *tbf; 549 550 tbf = ath_clone_txbuf(sc, bf_last); 551 /* 552 * Update tx baw and complete the 553 * frame with failed status if we 554 * run out of tx buf. 555 */ 556 if (!tbf) { 557 ath_tx_update_baw(sc, tid, seqno); 558 559 ath_tx_complete_buf(sc, bf, txq, 560 &bf_head, ts, 0); 561 bar_index = max_t(int, bar_index, 562 ATH_BA_INDEX(seq_first, seqno)); 563 break; 564 } 565 566 fi->bf = tbf; 567 } 568 569 /* 570 * Put this buffer to the temporary pending 571 * queue to retain ordering 572 */ 573 __skb_queue_tail(&bf_pending, skb); 574 } 575 576 bf = bf_next; 577 } 578 579 /* prepend un-acked frames to the beginning of the pending frame queue */ 580 if (!skb_queue_empty(&bf_pending)) { 581 if (an->sleeping) 582 ieee80211_sta_set_buffered(sta, tid->tidno, true); 583 584 skb_queue_splice(&bf_pending, &tid->buf_q); 585 if (!an->sleeping) { 586 ath_tx_queue_tid(txq, tid); 587 588 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) 589 tid->ac->clear_ps_filter = true; 590 } 591 } 592 593 if (bar_index >= 0) { 594 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index); 595 596 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq)) 597 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq); 598 599 ath_txq_unlock(sc, txq); 600 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1)); 601 ath_txq_lock(sc, txq); 602 } 603 604 if (tid->state & AGGR_CLEANUP) 605 ath_tx_flush_tid(sc, tid); 606 607 rcu_read_unlock(); 608 609 if (needreset) 610 ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR); 611 } 612 613 static bool bf_is_ampdu_not_probing(struct ath_buf *bf) 614 { 615 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu); 616 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 617 } 618 619 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, 620 struct ath_tx_status *ts, struct ath_buf *bf, 621 struct list_head *bf_head) 622 { 623 bool txok, flush; 624 625 txok = !(ts->ts_status & ATH9K_TXERR_MASK); 626 flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 627 txq->axq_tx_inprogress = false; 628 629 txq->axq_depth--; 630 if (bf_is_ampdu_not_probing(bf)) 631 txq->axq_ampdu_depth--; 632 633 if (!bf_isampdu(bf)) { 634 if (!flush) 635 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); 636 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); 637 } else 638 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); 639 640 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush) 641 ath_txq_schedule(sc, txq); 642 } 643 644 static bool ath_lookup_legacy(struct ath_buf *bf) 645 { 646 struct sk_buff *skb; 647 struct ieee80211_tx_info *tx_info; 648 struct ieee80211_tx_rate *rates; 649 int i; 650 651 skb = bf->bf_mpdu; 652 tx_info = IEEE80211_SKB_CB(skb); 653 rates = tx_info->control.rates; 654 655 for (i = 0; i < 4; i++) { 656 if (!rates[i].count || rates[i].idx < 0) 657 break; 658 659 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) 660 return true; 661 } 662 663 return false; 664 } 665 666 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 667 struct ath_atx_tid *tid) 668 { 669 struct sk_buff *skb; 670 struct ieee80211_tx_info *tx_info; 671 struct ieee80211_tx_rate *rates; 672 u32 max_4ms_framelen, frmlen; 673 u16 aggr_limit, bt_aggr_limit, legacy = 0; 674 int q = tid->ac->txq->mac80211_qnum; 675 int i; 676 677 skb = bf->bf_mpdu; 678 tx_info = IEEE80211_SKB_CB(skb); 679 rates = tx_info->control.rates; 680 681 /* 682 * Find the lowest frame length among the rate series that will have a 683 * 4ms (or TXOP limited) transmit duration. 684 */ 685 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 686 687 for (i = 0; i < 4; i++) { 688 int modeidx; 689 690 if (!rates[i].count) 691 continue; 692 693 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 694 legacy = 1; 695 break; 696 } 697 698 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 699 modeidx = MCS_HT40; 700 else 701 modeidx = MCS_HT20; 702 703 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 704 modeidx++; 705 706 frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx]; 707 max_4ms_framelen = min(max_4ms_framelen, frmlen); 708 } 709 710 /* 711 * limit aggregate size by the minimum rate if rate selected is 712 * not a probe rate, if rate selected is a probe rate then 713 * avoid aggregation of this packet. 714 */ 715 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 716 return 0; 717 718 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX); 719 720 /* 721 * Override the default aggregation limit for BTCOEX. 722 */ 723 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen); 724 if (bt_aggr_limit) 725 aggr_limit = bt_aggr_limit; 726 727 /* 728 * h/w can accept aggregates up to 16 bit lengths (65535). 729 * The IE, however can hold up to 65536, which shows up here 730 * as zero. Ignore 65536 since we are constrained by hw. 731 */ 732 if (tid->an->maxampdu) 733 aggr_limit = min(aggr_limit, tid->an->maxampdu); 734 735 return aggr_limit; 736 } 737 738 /* 739 * Returns the number of delimiters to be added to 740 * meet the minimum required mpdudensity. 741 */ 742 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 743 struct ath_buf *bf, u16 frmlen, 744 bool first_subfrm) 745 { 746 #define FIRST_DESC_NDELIMS 60 747 u32 nsymbits, nsymbols; 748 u16 minlen; 749 u8 flags, rix; 750 int width, streams, half_gi, ndelim, mindelim; 751 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 752 753 /* Select standard number of delimiters based on frame length alone */ 754 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 755 756 /* 757 * If encryption enabled, hardware requires some more padding between 758 * subframes. 759 * TODO - this could be improved to be dependent on the rate. 760 * The hardware can keep up at lower rates, but not higher rates 761 */ 762 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) && 763 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) 764 ndelim += ATH_AGGR_ENCRYPTDELIM; 765 766 /* 767 * Add delimiter when using RTS/CTS with aggregation 768 * and non enterprise AR9003 card 769 */ 770 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) && 771 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE)) 772 ndelim = max(ndelim, FIRST_DESC_NDELIMS); 773 774 /* 775 * Convert desired mpdu density from microeconds to bytes based 776 * on highest rate in rate series (i.e. first rate) to determine 777 * required minimum length for subframe. Take into account 778 * whether high rate is 20 or 40Mhz and half or full GI. 779 * 780 * If there is no mpdu density restriction, no further calculation 781 * is needed. 782 */ 783 784 if (tid->an->mpdudensity == 0) 785 return ndelim; 786 787 rix = bf->rates[0].idx; 788 flags = bf->rates[0].flags; 789 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 790 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 791 792 if (half_gi) 793 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 794 else 795 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 796 797 if (nsymbols == 0) 798 nsymbols = 1; 799 800 streams = HT_RC_2_STREAMS(rix); 801 nsymbits = bits_per_symbol[rix % 8][width] * streams; 802 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 803 804 if (frmlen < minlen) { 805 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 806 ndelim = max(mindelim, ndelim); 807 } 808 809 return ndelim; 810 } 811 812 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 813 struct ath_txq *txq, 814 struct ath_atx_tid *tid, 815 struct list_head *bf_q, 816 int *aggr_len) 817 { 818 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 819 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; 820 int rl = 0, nframes = 0, ndelim, prev_al = 0; 821 u16 aggr_limit = 0, al = 0, bpad = 0, 822 al_delta, h_baw = tid->baw_size / 2; 823 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 824 struct ieee80211_tx_info *tx_info; 825 struct ath_frame_info *fi; 826 struct sk_buff *skb; 827 u16 seqno; 828 829 do { 830 skb = skb_peek(&tid->buf_q); 831 fi = get_frame_info(skb); 832 bf = fi->bf; 833 if (!fi->bf) 834 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 835 836 if (!bf) { 837 __skb_unlink(skb, &tid->buf_q); 838 ieee80211_free_txskb(sc->hw, skb); 839 continue; 840 } 841 842 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; 843 seqno = bf->bf_state.seqno; 844 845 /* do not step over block-ack window */ 846 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { 847 status = ATH_AGGR_BAW_CLOSED; 848 break; 849 } 850 851 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { 852 struct ath_tx_status ts = {}; 853 struct list_head bf_head; 854 855 INIT_LIST_HEAD(&bf_head); 856 list_add(&bf->list, &bf_head); 857 __skb_unlink(skb, &tid->buf_q); 858 ath_tx_update_baw(sc, tid, seqno); 859 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 860 continue; 861 } 862 863 if (!bf_first) 864 bf_first = bf; 865 866 if (!rl) { 867 ath_set_rates(tid->an->vif, tid->an->sta, bf); 868 aggr_limit = ath_lookup_rate(sc, bf, tid); 869 rl = 1; 870 } 871 872 /* do not exceed aggregation limit */ 873 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; 874 875 if (nframes && 876 ((aggr_limit < (al + bpad + al_delta + prev_al)) || 877 ath_lookup_legacy(bf))) { 878 status = ATH_AGGR_LIMITED; 879 break; 880 } 881 882 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 883 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 884 break; 885 886 /* do not exceed subframe limit */ 887 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 888 status = ATH_AGGR_LIMITED; 889 break; 890 } 891 892 /* add padding for previous frame to aggregation length */ 893 al += bpad + al_delta; 894 895 /* 896 * Get the delimiters needed to meet the MPDU 897 * density for this node. 898 */ 899 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen, 900 !nframes); 901 bpad = PADBYTES(al_delta) + (ndelim << 2); 902 903 nframes++; 904 bf->bf_next = NULL; 905 906 /* link buffers of this frame to the aggregate */ 907 if (!fi->retries) 908 ath_tx_addto_baw(sc, tid, seqno); 909 bf->bf_state.ndelim = ndelim; 910 911 __skb_unlink(skb, &tid->buf_q); 912 list_add_tail(&bf->list, bf_q); 913 if (bf_prev) 914 bf_prev->bf_next = bf; 915 916 bf_prev = bf; 917 918 } while (!skb_queue_empty(&tid->buf_q)); 919 920 *aggr_len = al; 921 922 return status; 923 #undef PADBYTES 924 } 925 926 /* 927 * rix - rate index 928 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 929 * width - 0 for 20 MHz, 1 for 40 MHz 930 * half_gi - to use 4us v/s 3.6 us for symbol time 931 */ 932 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, 933 int width, int half_gi, bool shortPreamble) 934 { 935 u32 nbits, nsymbits, duration, nsymbols; 936 int streams; 937 938 /* find number of symbols: PLCP + data */ 939 streams = HT_RC_2_STREAMS(rix); 940 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 941 nsymbits = bits_per_symbol[rix % 8][width] * streams; 942 nsymbols = (nbits + nsymbits - 1) / nsymbits; 943 944 if (!half_gi) 945 duration = SYMBOL_TIME(nsymbols); 946 else 947 duration = SYMBOL_TIME_HALFGI(nsymbols); 948 949 /* addup duration for legacy/ht training and signal fields */ 950 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 951 952 return duration; 953 } 954 955 static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi) 956 { 957 int streams = HT_RC_2_STREAMS(mcs); 958 int symbols, bits; 959 int bytes = 0; 960 961 symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec); 962 bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams; 963 bits -= OFDM_PLCP_BITS; 964 bytes = bits / 8; 965 bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 966 if (bytes > 65532) 967 bytes = 65532; 968 969 return bytes; 970 } 971 972 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop) 973 { 974 u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi; 975 int mcs; 976 977 /* 4ms is the default (and maximum) duration */ 978 if (!txop || txop > 4096) 979 txop = 4096; 980 981 cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20]; 982 cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI]; 983 cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40]; 984 cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI]; 985 for (mcs = 0; mcs < 32; mcs++) { 986 cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false); 987 cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true); 988 cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false); 989 cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true); 990 } 991 } 992 993 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, 994 struct ath_tx_info *info, int len) 995 { 996 struct ath_hw *ah = sc->sc_ah; 997 struct sk_buff *skb; 998 struct ieee80211_tx_info *tx_info; 999 struct ieee80211_tx_rate *rates; 1000 const struct ieee80211_rate *rate; 1001 struct ieee80211_hdr *hdr; 1002 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 1003 int i; 1004 u8 rix = 0; 1005 1006 skb = bf->bf_mpdu; 1007 tx_info = IEEE80211_SKB_CB(skb); 1008 rates = bf->rates; 1009 hdr = (struct ieee80211_hdr *)skb->data; 1010 1011 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1012 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); 1013 info->rtscts_rate = fi->rtscts_rate; 1014 1015 for (i = 0; i < ARRAY_SIZE(bf->rates); i++) { 1016 bool is_40, is_sgi, is_sp; 1017 int phy; 1018 1019 if (!rates[i].count || (rates[i].idx < 0)) 1020 continue; 1021 1022 rix = rates[i].idx; 1023 info->rates[i].Tries = rates[i].count; 1024 1025 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 1026 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1027 info->flags |= ATH9K_TXDESC_RTSENA; 1028 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1029 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1030 info->flags |= ATH9K_TXDESC_CTSENA; 1031 } 1032 1033 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1034 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040; 1035 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1036 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1037 1038 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1039 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1040 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1041 1042 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1043 /* MCS rates */ 1044 info->rates[i].Rate = rix | 0x80; 1045 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1046 ah->txchainmask, info->rates[i].Rate); 1047 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len, 1048 is_40, is_sgi, is_sp); 1049 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1050 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; 1051 continue; 1052 } 1053 1054 /* legacy rates */ 1055 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1056 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1057 !(rate->flags & IEEE80211_RATE_ERP_G)) 1058 phy = WLAN_RC_PHY_CCK; 1059 else 1060 phy = WLAN_RC_PHY_OFDM; 1061 1062 info->rates[i].Rate = rate->hw_value; 1063 if (rate->hw_value_short) { 1064 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1065 info->rates[i].Rate |= rate->hw_value_short; 1066 } else { 1067 is_sp = false; 1068 } 1069 1070 if (bf->bf_state.bfs_paprd) 1071 info->rates[i].ChSel = ah->txchainmask; 1072 else 1073 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1074 ah->txchainmask, info->rates[i].Rate); 1075 1076 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1077 phy, rate->bitrate * 100, len, rix, is_sp); 1078 } 1079 1080 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1081 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) 1082 info->flags &= ~ATH9K_TXDESC_RTSENA; 1083 1084 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1085 if (info->flags & ATH9K_TXDESC_RTSENA) 1086 info->flags &= ~ATH9K_TXDESC_CTSENA; 1087 } 1088 1089 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1090 { 1091 struct ieee80211_hdr *hdr; 1092 enum ath9k_pkt_type htype; 1093 __le16 fc; 1094 1095 hdr = (struct ieee80211_hdr *)skb->data; 1096 fc = hdr->frame_control; 1097 1098 if (ieee80211_is_beacon(fc)) 1099 htype = ATH9K_PKT_TYPE_BEACON; 1100 else if (ieee80211_is_probe_resp(fc)) 1101 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1102 else if (ieee80211_is_atim(fc)) 1103 htype = ATH9K_PKT_TYPE_ATIM; 1104 else if (ieee80211_is_pspoll(fc)) 1105 htype = ATH9K_PKT_TYPE_PSPOLL; 1106 else 1107 htype = ATH9K_PKT_TYPE_NORMAL; 1108 1109 return htype; 1110 } 1111 1112 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, 1113 struct ath_txq *txq, int len) 1114 { 1115 struct ath_hw *ah = sc->sc_ah; 1116 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1117 struct ath_buf *bf_first = bf; 1118 struct ath_tx_info info; 1119 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); 1120 1121 memset(&info, 0, sizeof(info)); 1122 info.is_first = true; 1123 info.is_last = true; 1124 info.txpower = MAX_RATE_POWER; 1125 info.qcu = txq->axq_qnum; 1126 1127 info.flags = ATH9K_TXDESC_INTREQ; 1128 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1129 info.flags |= ATH9K_TXDESC_NOACK; 1130 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1131 info.flags |= ATH9K_TXDESC_LDPC; 1132 1133 ath_buf_set_rate(sc, bf, &info, len); 1134 1135 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) 1136 info.flags |= ATH9K_TXDESC_CLRDMASK; 1137 1138 if (bf->bf_state.bfs_paprd) 1139 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S; 1140 1141 1142 while (bf) { 1143 struct sk_buff *skb = bf->bf_mpdu; 1144 struct ath_frame_info *fi = get_frame_info(skb); 1145 1146 info.type = get_hw_packet_type(skb); 1147 if (bf->bf_next) 1148 info.link = bf->bf_next->bf_daddr; 1149 else 1150 info.link = 0; 1151 1152 info.buf_addr[0] = bf->bf_buf_addr; 1153 info.buf_len[0] = skb->len; 1154 info.pkt_len = fi->framelen; 1155 info.keyix = fi->keyix; 1156 info.keytype = fi->keytype; 1157 1158 if (aggr) { 1159 if (bf == bf_first) 1160 info.aggr = AGGR_BUF_FIRST; 1161 else if (!bf->bf_next) 1162 info.aggr = AGGR_BUF_LAST; 1163 else 1164 info.aggr = AGGR_BUF_MIDDLE; 1165 1166 info.ndelim = bf->bf_state.ndelim; 1167 info.aggr_len = len; 1168 } 1169 1170 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); 1171 bf = bf->bf_next; 1172 } 1173 } 1174 1175 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 1176 struct ath_atx_tid *tid) 1177 { 1178 struct ath_buf *bf; 1179 enum ATH_AGGR_STATUS status; 1180 struct ieee80211_tx_info *tx_info; 1181 struct list_head bf_q; 1182 int aggr_len; 1183 1184 do { 1185 if (skb_queue_empty(&tid->buf_q)) 1186 return; 1187 1188 INIT_LIST_HEAD(&bf_q); 1189 1190 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); 1191 1192 /* 1193 * no frames picked up to be aggregated; 1194 * block-ack window is not open. 1195 */ 1196 if (list_empty(&bf_q)) 1197 break; 1198 1199 bf = list_first_entry(&bf_q, struct ath_buf, list); 1200 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 1201 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1202 1203 if (tid->ac->clear_ps_filter) { 1204 tid->ac->clear_ps_filter = false; 1205 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1206 } else { 1207 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; 1208 } 1209 1210 /* if only one frame, send as non-aggregate */ 1211 if (bf == bf->bf_lastbf) { 1212 aggr_len = get_frame_info(bf->bf_mpdu)->framelen; 1213 bf->bf_state.bf_type = BUF_AMPDU; 1214 } else { 1215 TX_STAT_INC(txq->axq_qnum, a_aggr); 1216 } 1217 1218 ath_tx_fill_desc(sc, bf, txq, aggr_len); 1219 ath_tx_txqaddbuf(sc, txq, &bf_q, false); 1220 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && 1221 status != ATH_AGGR_BAW_CLOSED); 1222 } 1223 1224 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 1225 u16 tid, u16 *ssn) 1226 { 1227 struct ath_atx_tid *txtid; 1228 struct ath_node *an; 1229 u8 density; 1230 1231 an = (struct ath_node *)sta->drv_priv; 1232 txtid = ATH_AN_2_TID(an, tid); 1233 1234 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 1235 return -EAGAIN; 1236 1237 /* update ampdu factor/density, they may have changed. This may happen 1238 * in HT IBSS when a beacon with HT-info is received after the station 1239 * has already been added. 1240 */ 1241 if (sta->ht_cap.ht_supported) { 1242 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 1243 sta->ht_cap.ampdu_factor); 1244 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density); 1245 an->mpdudensity = density; 1246 } 1247 1248 txtid->state |= AGGR_ADDBA_PROGRESS; 1249 txtid->paused = true; 1250 *ssn = txtid->seq_start = txtid->seq_next; 1251 txtid->bar_index = -1; 1252 1253 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1254 txtid->baw_head = txtid->baw_tail = 0; 1255 1256 return 0; 1257 } 1258 1259 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1260 { 1261 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1262 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 1263 struct ath_txq *txq = txtid->ac->txq; 1264 1265 if (txtid->state & AGGR_CLEANUP) 1266 return; 1267 1268 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 1269 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1270 return; 1271 } 1272 1273 ath_txq_lock(sc, txq); 1274 txtid->paused = true; 1275 1276 /* 1277 * If frames are still being transmitted for this TID, they will be 1278 * cleaned up during tx completion. To prevent race conditions, this 1279 * TID can only be reused after all in-progress subframes have been 1280 * completed. 1281 */ 1282 if (txtid->baw_head != txtid->baw_tail) 1283 txtid->state |= AGGR_CLEANUP; 1284 else 1285 txtid->state &= ~AGGR_ADDBA_COMPLETE; 1286 1287 ath_tx_flush_tid(sc, txtid); 1288 ath_txq_unlock_complete(sc, txq); 1289 } 1290 1291 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 1292 struct ath_node *an) 1293 { 1294 struct ath_atx_tid *tid; 1295 struct ath_atx_ac *ac; 1296 struct ath_txq *txq; 1297 bool buffered; 1298 int tidno; 1299 1300 for (tidno = 0, tid = &an->tid[tidno]; 1301 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 1302 1303 if (!tid->sched) 1304 continue; 1305 1306 ac = tid->ac; 1307 txq = ac->txq; 1308 1309 ath_txq_lock(sc, txq); 1310 1311 buffered = !skb_queue_empty(&tid->buf_q); 1312 1313 tid->sched = false; 1314 list_del(&tid->list); 1315 1316 if (ac->sched) { 1317 ac->sched = false; 1318 list_del(&ac->list); 1319 } 1320 1321 ath_txq_unlock(sc, txq); 1322 1323 ieee80211_sta_set_buffered(sta, tidno, buffered); 1324 } 1325 } 1326 1327 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) 1328 { 1329 struct ath_atx_tid *tid; 1330 struct ath_atx_ac *ac; 1331 struct ath_txq *txq; 1332 int tidno; 1333 1334 for (tidno = 0, tid = &an->tid[tidno]; 1335 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 1336 1337 ac = tid->ac; 1338 txq = ac->txq; 1339 1340 ath_txq_lock(sc, txq); 1341 ac->clear_ps_filter = true; 1342 1343 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { 1344 ath_tx_queue_tid(txq, tid); 1345 ath_txq_schedule(sc, txq); 1346 } 1347 1348 ath_txq_unlock_complete(sc, txq); 1349 } 1350 } 1351 1352 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1353 { 1354 struct ath_atx_tid *txtid; 1355 struct ath_node *an; 1356 1357 an = (struct ath_node *)sta->drv_priv; 1358 1359 txtid = ATH_AN_2_TID(an, tid); 1360 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1361 txtid->state |= AGGR_ADDBA_COMPLETE; 1362 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1363 ath_tx_resume_tid(sc, txtid); 1364 } 1365 1366 /********************/ 1367 /* Queue Management */ 1368 /********************/ 1369 1370 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 1371 { 1372 struct ath_hw *ah = sc->sc_ah; 1373 struct ath9k_tx_queue_info qi; 1374 static const int subtype_txq_to_hwq[] = { 1375 [IEEE80211_AC_BE] = ATH_TXQ_AC_BE, 1376 [IEEE80211_AC_BK] = ATH_TXQ_AC_BK, 1377 [IEEE80211_AC_VI] = ATH_TXQ_AC_VI, 1378 [IEEE80211_AC_VO] = ATH_TXQ_AC_VO, 1379 }; 1380 int axq_qnum, i; 1381 1382 memset(&qi, 0, sizeof(qi)); 1383 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; 1384 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 1385 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 1386 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 1387 qi.tqi_physCompBuf = 0; 1388 1389 /* 1390 * Enable interrupts only for EOL and DESC conditions. 1391 * We mark tx descriptors to receive a DESC interrupt 1392 * when a tx queue gets deep; otherwise waiting for the 1393 * EOL to reap descriptors. Note that this is done to 1394 * reduce interrupt load and this only defers reaping 1395 * descriptors, never transmitting frames. Aside from 1396 * reducing interrupts this also permits more concurrency. 1397 * The only potential downside is if the tx queue backs 1398 * up in which case the top half of the kernel may backup 1399 * due to a lack of tx descriptors. 1400 * 1401 * The UAPSD queue is an exception, since we take a desc- 1402 * based intr on the EOSP frames. 1403 */ 1404 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1405 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE; 1406 } else { 1407 if (qtype == ATH9K_TX_QUEUE_UAPSD) 1408 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 1409 else 1410 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 1411 TXQ_FLAG_TXDESCINT_ENABLE; 1412 } 1413 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 1414 if (axq_qnum == -1) { 1415 /* 1416 * NB: don't print a message, this happens 1417 * normally on parts with too few tx queues 1418 */ 1419 return NULL; 1420 } 1421 if (!ATH_TXQ_SETUP(sc, axq_qnum)) { 1422 struct ath_txq *txq = &sc->tx.txq[axq_qnum]; 1423 1424 txq->axq_qnum = axq_qnum; 1425 txq->mac80211_qnum = -1; 1426 txq->axq_link = NULL; 1427 __skb_queue_head_init(&txq->complete_q); 1428 INIT_LIST_HEAD(&txq->axq_q); 1429 INIT_LIST_HEAD(&txq->axq_acq); 1430 spin_lock_init(&txq->axq_lock); 1431 txq->axq_depth = 0; 1432 txq->axq_ampdu_depth = 0; 1433 txq->axq_tx_inprogress = false; 1434 sc->tx.txqsetup |= 1<<axq_qnum; 1435 1436 txq->txq_headidx = txq->txq_tailidx = 0; 1437 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1438 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1439 } 1440 return &sc->tx.txq[axq_qnum]; 1441 } 1442 1443 int ath_txq_update(struct ath_softc *sc, int qnum, 1444 struct ath9k_tx_queue_info *qinfo) 1445 { 1446 struct ath_hw *ah = sc->sc_ah; 1447 int error = 0; 1448 struct ath9k_tx_queue_info qi; 1449 1450 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 1451 1452 ath9k_hw_get_txq_props(ah, qnum, &qi); 1453 qi.tqi_aifs = qinfo->tqi_aifs; 1454 qi.tqi_cwmin = qinfo->tqi_cwmin; 1455 qi.tqi_cwmax = qinfo->tqi_cwmax; 1456 qi.tqi_burstTime = qinfo->tqi_burstTime; 1457 qi.tqi_readyTime = qinfo->tqi_readyTime; 1458 1459 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1460 ath_err(ath9k_hw_common(sc->sc_ah), 1461 "Unable to update hardware queue %u!\n", qnum); 1462 error = -EIO; 1463 } else { 1464 ath9k_hw_resettxqueue(ah, qnum); 1465 } 1466 1467 return error; 1468 } 1469 1470 int ath_cabq_update(struct ath_softc *sc) 1471 { 1472 struct ath9k_tx_queue_info qi; 1473 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 1474 int qnum = sc->beacon.cabq->axq_qnum; 1475 1476 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1477 /* 1478 * Ensure the readytime % is within the bounds. 1479 */ 1480 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1481 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1482 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1483 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1484 1485 qi.tqi_readyTime = (cur_conf->beacon_interval * 1486 sc->config.cabqReadytime) / 100; 1487 ath_txq_update(sc, qnum, &qi); 1488 1489 return 0; 1490 } 1491 1492 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, 1493 struct list_head *list) 1494 { 1495 struct ath_buf *bf, *lastbf; 1496 struct list_head bf_head; 1497 struct ath_tx_status ts; 1498 1499 memset(&ts, 0, sizeof(ts)); 1500 ts.ts_status = ATH9K_TX_FLUSH; 1501 INIT_LIST_HEAD(&bf_head); 1502 1503 while (!list_empty(list)) { 1504 bf = list_first_entry(list, struct ath_buf, list); 1505 1506 if (bf->bf_stale) { 1507 list_del(&bf->list); 1508 1509 ath_tx_return_buffer(sc, bf); 1510 continue; 1511 } 1512 1513 lastbf = bf->bf_lastbf; 1514 list_cut_position(&bf_head, list, &lastbf->list); 1515 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 1516 } 1517 } 1518 1519 /* 1520 * Drain a given TX queue (could be Beacon or Data) 1521 * 1522 * This assumes output has been stopped and 1523 * we do not need to block ath_tx_tasklet. 1524 */ 1525 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq) 1526 { 1527 ath_txq_lock(sc, txq); 1528 1529 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1530 int idx = txq->txq_tailidx; 1531 1532 while (!list_empty(&txq->txq_fifo[idx])) { 1533 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]); 1534 1535 INCR(idx, ATH_TXFIFO_DEPTH); 1536 } 1537 txq->txq_tailidx = idx; 1538 } 1539 1540 txq->axq_link = NULL; 1541 txq->axq_tx_inprogress = false; 1542 ath_drain_txq_list(sc, txq, &txq->axq_q); 1543 1544 ath_txq_unlock_complete(sc, txq); 1545 } 1546 1547 bool ath_drain_all_txq(struct ath_softc *sc) 1548 { 1549 struct ath_hw *ah = sc->sc_ah; 1550 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1551 struct ath_txq *txq; 1552 int i; 1553 u32 npend = 0; 1554 1555 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) 1556 return true; 1557 1558 ath9k_hw_abort_tx_dma(ah); 1559 1560 /* Check if any queue remains active */ 1561 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1562 if (!ATH_TXQ_SETUP(sc, i)) 1563 continue; 1564 1565 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum)) 1566 npend |= BIT(i); 1567 } 1568 1569 if (npend) 1570 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend); 1571 1572 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1573 if (!ATH_TXQ_SETUP(sc, i)) 1574 continue; 1575 1576 /* 1577 * The caller will resume queues with ieee80211_wake_queues. 1578 * Mark the queue as not stopped to prevent ath_tx_complete 1579 * from waking the queue too early. 1580 */ 1581 txq = &sc->tx.txq[i]; 1582 txq->stopped = false; 1583 ath_draintxq(sc, txq); 1584 } 1585 1586 return !npend; 1587 } 1588 1589 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1590 { 1591 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1592 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1593 } 1594 1595 /* For each axq_acq entry, for each tid, try to schedule packets 1596 * for transmit until ampdu_depth has reached min Q depth. 1597 */ 1598 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1599 { 1600 struct ath_atx_ac *ac, *ac_tmp, *last_ac; 1601 struct ath_atx_tid *tid, *last_tid; 1602 1603 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) || 1604 list_empty(&txq->axq_acq) || 1605 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1606 return; 1607 1608 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1609 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1610 1611 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1612 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); 1613 list_del(&ac->list); 1614 ac->sched = false; 1615 1616 while (!list_empty(&ac->tid_q)) { 1617 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, 1618 list); 1619 list_del(&tid->list); 1620 tid->sched = false; 1621 1622 if (tid->paused) 1623 continue; 1624 1625 ath_tx_sched_aggr(sc, txq, tid); 1626 1627 /* 1628 * add tid to round-robin queue if more frames 1629 * are pending for the tid 1630 */ 1631 if (!skb_queue_empty(&tid->buf_q)) 1632 ath_tx_queue_tid(txq, tid); 1633 1634 if (tid == last_tid || 1635 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1636 break; 1637 } 1638 1639 if (!list_empty(&ac->tid_q) && !ac->sched) { 1640 ac->sched = true; 1641 list_add_tail(&ac->list, &txq->axq_acq); 1642 } 1643 1644 if (ac == last_ac || 1645 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1646 return; 1647 } 1648 } 1649 1650 /***********/ 1651 /* TX, DMA */ 1652 /***********/ 1653 1654 /* 1655 * Insert a chain of ath_buf (descriptors) on a txq and 1656 * assume the descriptors are already chained together by caller. 1657 */ 1658 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1659 struct list_head *head, bool internal) 1660 { 1661 struct ath_hw *ah = sc->sc_ah; 1662 struct ath_common *common = ath9k_hw_common(ah); 1663 struct ath_buf *bf, *bf_last; 1664 bool puttxbuf = false; 1665 bool edma; 1666 1667 /* 1668 * Insert the frame on the outbound list and 1669 * pass it on to the hardware. 1670 */ 1671 1672 if (list_empty(head)) 1673 return; 1674 1675 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1676 bf = list_first_entry(head, struct ath_buf, list); 1677 bf_last = list_entry(head->prev, struct ath_buf, list); 1678 1679 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n", 1680 txq->axq_qnum, txq->axq_depth); 1681 1682 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) { 1683 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]); 1684 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1685 puttxbuf = true; 1686 } else { 1687 list_splice_tail_init(head, &txq->axq_q); 1688 1689 if (txq->axq_link) { 1690 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr); 1691 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n", 1692 txq->axq_qnum, txq->axq_link, 1693 ito64(bf->bf_daddr), bf->bf_desc); 1694 } else if (!edma) 1695 puttxbuf = true; 1696 1697 txq->axq_link = bf_last->bf_desc; 1698 } 1699 1700 if (puttxbuf) { 1701 TX_STAT_INC(txq->axq_qnum, puttxbuf); 1702 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1703 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n", 1704 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1705 } 1706 1707 if (!edma) { 1708 TX_STAT_INC(txq->axq_qnum, txstart); 1709 ath9k_hw_txstart(ah, txq->axq_qnum); 1710 } 1711 1712 if (!internal) { 1713 txq->axq_depth++; 1714 if (bf_is_ampdu_not_probing(bf)) 1715 txq->axq_ampdu_depth++; 1716 } 1717 } 1718 1719 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1720 struct sk_buff *skb, struct ath_tx_control *txctl) 1721 { 1722 struct ath_frame_info *fi = get_frame_info(skb); 1723 struct list_head bf_head; 1724 struct ath_buf *bf; 1725 1726 /* 1727 * Do not queue to h/w when any of the following conditions is true: 1728 * - there are pending frames in software queue 1729 * - the TID is currently paused for ADDBA/BAR request 1730 * - seqno is not within block-ack window 1731 * - h/w queue depth exceeds low water mark 1732 */ 1733 if (!skb_queue_empty(&tid->buf_q) || tid->paused || 1734 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || 1735 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { 1736 /* 1737 * Add this frame to software queue for scheduling later 1738 * for aggregation. 1739 */ 1740 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); 1741 __skb_queue_tail(&tid->buf_q, skb); 1742 if (!txctl->an || !txctl->an->sleeping) 1743 ath_tx_queue_tid(txctl->txq, tid); 1744 return; 1745 } 1746 1747 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1748 if (!bf) { 1749 ieee80211_free_txskb(sc->hw, skb); 1750 return; 1751 } 1752 1753 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1754 bf->bf_state.bf_type = BUF_AMPDU; 1755 INIT_LIST_HEAD(&bf_head); 1756 list_add(&bf->list, &bf_head); 1757 1758 /* Add sub-frame to BAW */ 1759 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 1760 1761 /* Queue to h/w without aggregation */ 1762 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); 1763 bf->bf_lastbf = bf; 1764 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen); 1765 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false); 1766 } 1767 1768 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1769 struct ath_atx_tid *tid, struct sk_buff *skb) 1770 { 1771 struct ath_frame_info *fi = get_frame_info(skb); 1772 struct list_head bf_head; 1773 struct ath_buf *bf; 1774 1775 bf = fi->bf; 1776 1777 INIT_LIST_HEAD(&bf_head); 1778 list_add_tail(&bf->list, &bf_head); 1779 bf->bf_state.bf_type = 0; 1780 1781 bf->bf_next = NULL; 1782 bf->bf_lastbf = bf; 1783 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1784 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1785 TX_STAT_INC(txq->axq_qnum, queued); 1786 } 1787 1788 static void setup_frame_info(struct ieee80211_hw *hw, 1789 struct ieee80211_sta *sta, 1790 struct sk_buff *skb, 1791 int framelen) 1792 { 1793 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1794 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1795 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1796 const struct ieee80211_rate *rate; 1797 struct ath_frame_info *fi = get_frame_info(skb); 1798 struct ath_node *an = NULL; 1799 enum ath9k_key_type keytype; 1800 bool short_preamble = false; 1801 1802 /* 1803 * We check if Short Preamble is needed for the CTS rate by 1804 * checking the BSS's global flag. 1805 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1806 */ 1807 if (tx_info->control.vif && 1808 tx_info->control.vif->bss_conf.use_short_preamble) 1809 short_preamble = true; 1810 1811 rate = ieee80211_get_rts_cts_rate(hw, tx_info); 1812 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1813 1814 if (sta) 1815 an = (struct ath_node *) sta->drv_priv; 1816 1817 memset(fi, 0, sizeof(*fi)); 1818 if (hw_key) 1819 fi->keyix = hw_key->hw_key_idx; 1820 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0) 1821 fi->keyix = an->ps_key; 1822 else 1823 fi->keyix = ATH9K_TXKEYIX_INVALID; 1824 fi->keytype = keytype; 1825 fi->framelen = framelen; 1826 fi->rtscts_rate = rate->hw_value; 1827 if (short_preamble) 1828 fi->rtscts_rate |= rate->hw_value_short; 1829 } 1830 1831 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) 1832 { 1833 struct ath_hw *ah = sc->sc_ah; 1834 struct ath9k_channel *curchan = ah->curchan; 1835 1836 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && 1837 (curchan->channelFlags & CHANNEL_5GHZ) && 1838 (chainmask == 0x7) && (rate < 0x90)) 1839 return 0x3; 1840 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) && 1841 IS_CCK_RATE(rate)) 1842 return 0x2; 1843 else 1844 return chainmask; 1845 } 1846 1847 /* 1848 * Assign a descriptor (and sequence number if necessary, 1849 * and map buffer for DMA. Frees skb on error 1850 */ 1851 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1852 struct ath_txq *txq, 1853 struct ath_atx_tid *tid, 1854 struct sk_buff *skb) 1855 { 1856 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1857 struct ath_frame_info *fi = get_frame_info(skb); 1858 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1859 struct ath_buf *bf; 1860 int fragno; 1861 u16 seqno; 1862 1863 bf = ath_tx_get_buffer(sc); 1864 if (!bf) { 1865 ath_dbg(common, XMIT, "TX buffers are full\n"); 1866 return NULL; 1867 } 1868 1869 ATH_TXBUF_RESET(bf); 1870 1871 if (tid) { 1872 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1873 seqno = tid->seq_next; 1874 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1875 1876 if (fragno) 1877 hdr->seq_ctrl |= cpu_to_le16(fragno); 1878 1879 if (!ieee80211_has_morefrags(hdr->frame_control)) 1880 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1881 1882 bf->bf_state.seqno = seqno; 1883 } 1884 1885 bf->bf_mpdu = skb; 1886 1887 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1888 skb->len, DMA_TO_DEVICE); 1889 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 1890 bf->bf_mpdu = NULL; 1891 bf->bf_buf_addr = 0; 1892 ath_err(ath9k_hw_common(sc->sc_ah), 1893 "dma_mapping_error() on TX\n"); 1894 ath_tx_return_buffer(sc, bf); 1895 return NULL; 1896 } 1897 1898 fi->bf = bf; 1899 1900 return bf; 1901 } 1902 1903 /* Upon failure caller should free skb */ 1904 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1905 struct ath_tx_control *txctl) 1906 { 1907 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1908 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1909 struct ieee80211_sta *sta = txctl->sta; 1910 struct ieee80211_vif *vif = info->control.vif; 1911 struct ath_softc *sc = hw->priv; 1912 struct ath_txq *txq = txctl->txq; 1913 struct ath_atx_tid *tid = NULL; 1914 struct ath_buf *bf; 1915 int padpos, padsize; 1916 int frmlen = skb->len + FCS_LEN; 1917 u8 tidno; 1918 int q; 1919 1920 /* NOTE: sta can be NULL according to net/mac80211.h */ 1921 if (sta) 1922 txctl->an = (struct ath_node *)sta->drv_priv; 1923 1924 if (info->control.hw_key) 1925 frmlen += info->control.hw_key->icv_len; 1926 1927 /* 1928 * As a temporary workaround, assign seq# here; this will likely need 1929 * to be cleaned up to work better with Beacon transmission and virtual 1930 * BSSes. 1931 */ 1932 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1933 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1934 sc->tx.seq_no += 0x10; 1935 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1936 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1937 } 1938 1939 /* Add the padding after the header if this is not already done */ 1940 padpos = ieee80211_hdrlen(hdr->frame_control); 1941 padsize = padpos & 3; 1942 if (padsize && skb->len > padpos) { 1943 if (skb_headroom(skb) < padsize) 1944 return -ENOMEM; 1945 1946 skb_push(skb, padsize); 1947 memmove(skb->data, skb->data + padsize, padpos); 1948 hdr = (struct ieee80211_hdr *) skb->data; 1949 } 1950 1951 if ((vif && vif->type != NL80211_IFTYPE_AP && 1952 vif->type != NL80211_IFTYPE_AP_VLAN) || 1953 !ieee80211_is_data(hdr->frame_control)) 1954 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1955 1956 setup_frame_info(hw, sta, skb, frmlen); 1957 1958 /* 1959 * At this point, the vif, hw_key and sta pointers in the tx control 1960 * info are no longer valid (overwritten by the ath_frame_info data. 1961 */ 1962 1963 q = skb_get_queue_mapping(skb); 1964 1965 ath_txq_lock(sc, txq); 1966 if (txq == sc->tx.txq_map[q] && 1967 ++txq->pending_frames > sc->tx.txq_max_pending[q] && 1968 !txq->stopped) { 1969 ieee80211_stop_queue(sc->hw, q); 1970 txq->stopped = true; 1971 } 1972 1973 if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { 1974 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1975 IEEE80211_QOS_CTL_TID_MASK; 1976 tid = ATH_AN_2_TID(txctl->an, tidno); 1977 1978 WARN_ON(tid->ac->txq != txctl->txq); 1979 } 1980 1981 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { 1982 /* 1983 * Try aggregation if it's a unicast data frame 1984 * and the destination is HT capable. 1985 */ 1986 ath_tx_send_ampdu(sc, tid, skb, txctl); 1987 goto out; 1988 } 1989 1990 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1991 if (!bf) { 1992 if (txctl->paprd) 1993 dev_kfree_skb_any(skb); 1994 else 1995 ieee80211_free_txskb(sc->hw, skb); 1996 goto out; 1997 } 1998 1999 bf->bf_state.bfs_paprd = txctl->paprd; 2000 2001 if (txctl->paprd) 2002 bf->bf_state.bfs_paprd_timestamp = jiffies; 2003 2004 ath_set_rates(vif, sta, bf); 2005 ath_tx_send_normal(sc, txctl->txq, tid, skb); 2006 2007 out: 2008 ath_txq_unlock(sc, txq); 2009 2010 return 0; 2011 } 2012 2013 /*****************/ 2014 /* TX Completion */ 2015 /*****************/ 2016 2017 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 2018 int tx_flags, struct ath_txq *txq) 2019 { 2020 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2021 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2022 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 2023 int q, padpos, padsize; 2024 unsigned long flags; 2025 2026 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2027 2028 if (sc->sc_ah->caldata) 2029 sc->sc_ah->caldata->paprd_packet_sent = true; 2030 2031 if (!(tx_flags & ATH_TX_ERROR)) 2032 /* Frame was ACKed */ 2033 tx_info->flags |= IEEE80211_TX_STAT_ACK; 2034 2035 padpos = ieee80211_hdrlen(hdr->frame_control); 2036 padsize = padpos & 3; 2037 if (padsize && skb->len>padpos+padsize) { 2038 /* 2039 * Remove MAC header padding before giving the frame back to 2040 * mac80211. 2041 */ 2042 memmove(skb->data + padsize, skb->data, padpos); 2043 skb_pull(skb, padsize); 2044 } 2045 2046 spin_lock_irqsave(&sc->sc_pm_lock, flags); 2047 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { 2048 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 2049 ath_dbg(common, PS, 2050 "Going back to sleep after having received TX status (0x%lx)\n", 2051 sc->ps_flags & (PS_WAIT_FOR_BEACON | 2052 PS_WAIT_FOR_CAB | 2053 PS_WAIT_FOR_PSPOLL_DATA | 2054 PS_WAIT_FOR_TX_ACK)); 2055 } 2056 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 2057 2058 q = skb_get_queue_mapping(skb); 2059 if (txq == sc->tx.txq_map[q]) { 2060 if (WARN_ON(--txq->pending_frames < 0)) 2061 txq->pending_frames = 0; 2062 2063 if (txq->stopped && 2064 txq->pending_frames < sc->tx.txq_max_pending[q]) { 2065 ieee80211_wake_queue(sc->hw, q); 2066 txq->stopped = false; 2067 } 2068 } 2069 2070 __skb_queue_tail(&txq->complete_q, skb); 2071 } 2072 2073 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 2074 struct ath_txq *txq, struct list_head *bf_q, 2075 struct ath_tx_status *ts, int txok) 2076 { 2077 struct sk_buff *skb = bf->bf_mpdu; 2078 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2079 unsigned long flags; 2080 int tx_flags = 0; 2081 2082 if (!txok) 2083 tx_flags |= ATH_TX_ERROR; 2084 2085 if (ts->ts_status & ATH9K_TXERR_FILT) 2086 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2087 2088 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); 2089 bf->bf_buf_addr = 0; 2090 2091 if (bf->bf_state.bfs_paprd) { 2092 if (time_after(jiffies, 2093 bf->bf_state.bfs_paprd_timestamp + 2094 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 2095 dev_kfree_skb_any(skb); 2096 else 2097 complete(&sc->paprd_complete); 2098 } else { 2099 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags); 2100 ath_tx_complete(sc, skb, tx_flags, txq); 2101 } 2102 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 2103 * accidentally reference it later. 2104 */ 2105 bf->bf_mpdu = NULL; 2106 2107 /* 2108 * Return the list of ath_buf of this mpdu to free queue 2109 */ 2110 spin_lock_irqsave(&sc->tx.txbuflock, flags); 2111 list_splice_tail_init(bf_q, &sc->tx.txbuf); 2112 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 2113 } 2114 2115 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 2116 struct ath_tx_status *ts, int nframes, int nbad, 2117 int txok) 2118 { 2119 struct sk_buff *skb = bf->bf_mpdu; 2120 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2121 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2122 struct ieee80211_hw *hw = sc->hw; 2123 struct ath_hw *ah = sc->sc_ah; 2124 u8 i, tx_rateindex; 2125 2126 if (txok) 2127 tx_info->status.ack_signal = ts->ts_rssi; 2128 2129 tx_rateindex = ts->ts_rateindex; 2130 WARN_ON(tx_rateindex >= hw->max_rates); 2131 2132 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 2133 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2134 2135 BUG_ON(nbad > nframes); 2136 } 2137 tx_info->status.ampdu_len = nframes; 2138 tx_info->status.ampdu_ack_len = nframes - nbad; 2139 2140 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2141 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { 2142 /* 2143 * If an underrun error is seen assume it as an excessive 2144 * retry only if max frame trigger level has been reached 2145 * (2 KB for single stream, and 4 KB for dual stream). 2146 * Adjust the long retry as if the frame was tried 2147 * hw->max_rate_tries times to affect how rate control updates 2148 * PER for the failed rate. 2149 * In case of congestion on the bus penalizing this type of 2150 * underruns should help hardware actually transmit new frames 2151 * successfully by eventually preferring slower rates. 2152 * This itself should also alleviate congestion on the bus. 2153 */ 2154 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | 2155 ATH9K_TX_DELIM_UNDERRUN)) && 2156 ieee80211_is_data(hdr->frame_control) && 2157 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level) 2158 tx_info->status.rates[tx_rateindex].count = 2159 hw->max_rate_tries; 2160 } 2161 2162 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2163 tx_info->status.rates[i].count = 0; 2164 tx_info->status.rates[i].idx = -1; 2165 } 2166 2167 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2168 } 2169 2170 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2171 { 2172 struct ath_hw *ah = sc->sc_ah; 2173 struct ath_common *common = ath9k_hw_common(ah); 2174 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2175 struct list_head bf_head; 2176 struct ath_desc *ds; 2177 struct ath_tx_status ts; 2178 int status; 2179 2180 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n", 2181 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2182 txq->axq_link); 2183 2184 ath_txq_lock(sc, txq); 2185 for (;;) { 2186 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2187 break; 2188 2189 if (list_empty(&txq->axq_q)) { 2190 txq->axq_link = NULL; 2191 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2192 ath_txq_schedule(sc, txq); 2193 break; 2194 } 2195 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2196 2197 /* 2198 * There is a race condition that a BH gets scheduled 2199 * after sw writes TxE and before hw re-load the last 2200 * descriptor to get the newly chained one. 2201 * Software must keep the last DONE descriptor as a 2202 * holding descriptor - software does so by marking 2203 * it with the STALE flag. 2204 */ 2205 bf_held = NULL; 2206 if (bf->bf_stale) { 2207 bf_held = bf; 2208 if (list_is_last(&bf_held->list, &txq->axq_q)) 2209 break; 2210 2211 bf = list_entry(bf_held->list.next, struct ath_buf, 2212 list); 2213 } 2214 2215 lastbf = bf->bf_lastbf; 2216 ds = lastbf->bf_desc; 2217 2218 memset(&ts, 0, sizeof(ts)); 2219 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2220 if (status == -EINPROGRESS) 2221 break; 2222 2223 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2224 2225 /* 2226 * Remove ath_buf's of the same transmit unit from txq, 2227 * however leave the last descriptor back as the holding 2228 * descriptor for hw. 2229 */ 2230 lastbf->bf_stale = true; 2231 INIT_LIST_HEAD(&bf_head); 2232 if (!list_is_singular(&lastbf->list)) 2233 list_cut_position(&bf_head, 2234 &txq->axq_q, lastbf->list.prev); 2235 2236 if (bf_held) { 2237 list_del(&bf_held->list); 2238 ath_tx_return_buffer(sc, bf_held); 2239 } 2240 2241 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2242 } 2243 ath_txq_unlock_complete(sc, txq); 2244 } 2245 2246 void ath_tx_tasklet(struct ath_softc *sc) 2247 { 2248 struct ath_hw *ah = sc->sc_ah; 2249 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs; 2250 int i; 2251 2252 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2253 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2254 ath_tx_processq(sc, &sc->tx.txq[i]); 2255 } 2256 } 2257 2258 void ath_tx_edma_tasklet(struct ath_softc *sc) 2259 { 2260 struct ath_tx_status ts; 2261 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2262 struct ath_hw *ah = sc->sc_ah; 2263 struct ath_txq *txq; 2264 struct ath_buf *bf, *lastbf; 2265 struct list_head bf_head; 2266 struct list_head *fifo_list; 2267 int status; 2268 2269 for (;;) { 2270 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2271 break; 2272 2273 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts); 2274 if (status == -EINPROGRESS) 2275 break; 2276 if (status == -EIO) { 2277 ath_dbg(common, XMIT, "Error processing tx status\n"); 2278 break; 2279 } 2280 2281 /* Process beacon completions separately */ 2282 if (ts.qid == sc->beacon.beaconq) { 2283 sc->beacon.tx_processed = true; 2284 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); 2285 continue; 2286 } 2287 2288 txq = &sc->tx.txq[ts.qid]; 2289 2290 ath_txq_lock(sc, txq); 2291 2292 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2293 2294 fifo_list = &txq->txq_fifo[txq->txq_tailidx]; 2295 if (list_empty(fifo_list)) { 2296 ath_txq_unlock(sc, txq); 2297 return; 2298 } 2299 2300 bf = list_first_entry(fifo_list, struct ath_buf, list); 2301 if (bf->bf_stale) { 2302 list_del(&bf->list); 2303 ath_tx_return_buffer(sc, bf); 2304 bf = list_first_entry(fifo_list, struct ath_buf, list); 2305 } 2306 2307 lastbf = bf->bf_lastbf; 2308 2309 INIT_LIST_HEAD(&bf_head); 2310 if (list_is_last(&lastbf->list, fifo_list)) { 2311 list_splice_tail_init(fifo_list, &bf_head); 2312 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2313 2314 if (!list_empty(&txq->axq_q)) { 2315 struct list_head bf_q; 2316 2317 INIT_LIST_HEAD(&bf_q); 2318 txq->axq_link = NULL; 2319 list_splice_tail_init(&txq->axq_q, &bf_q); 2320 ath_tx_txqaddbuf(sc, txq, &bf_q, true); 2321 } 2322 } else { 2323 lastbf->bf_stale = true; 2324 if (bf != lastbf) 2325 list_cut_position(&bf_head, fifo_list, 2326 lastbf->list.prev); 2327 } 2328 2329 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2330 ath_txq_unlock_complete(sc, txq); 2331 } 2332 } 2333 2334 /*****************/ 2335 /* Init, Cleanup */ 2336 /*****************/ 2337 2338 static int ath_txstatus_setup(struct ath_softc *sc, int size) 2339 { 2340 struct ath_descdma *dd = &sc->txsdma; 2341 u8 txs_len = sc->sc_ah->caps.txs_len; 2342 2343 dd->dd_desc_len = size * txs_len; 2344 dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len, 2345 &dd->dd_desc_paddr, GFP_KERNEL); 2346 if (!dd->dd_desc) 2347 return -ENOMEM; 2348 2349 return 0; 2350 } 2351 2352 static int ath_tx_edma_init(struct ath_softc *sc) 2353 { 2354 int err; 2355 2356 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2357 if (!err) 2358 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2359 sc->txsdma.dd_desc_paddr, 2360 ATH_TXSTATUS_RING_SIZE); 2361 2362 return err; 2363 } 2364 2365 int ath_tx_init(struct ath_softc *sc, int nbufs) 2366 { 2367 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2368 int error = 0; 2369 2370 spin_lock_init(&sc->tx.txbuflock); 2371 2372 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2373 "tx", nbufs, 1, 1); 2374 if (error != 0) { 2375 ath_err(common, 2376 "Failed to allocate tx descriptors: %d\n", error); 2377 return error; 2378 } 2379 2380 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2381 "beacon", ATH_BCBUF, 1, 1); 2382 if (error != 0) { 2383 ath_err(common, 2384 "Failed to allocate beacon descriptors: %d\n", error); 2385 return error; 2386 } 2387 2388 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2389 2390 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2391 error = ath_tx_edma_init(sc); 2392 2393 return error; 2394 } 2395 2396 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2397 { 2398 struct ath_atx_tid *tid; 2399 struct ath_atx_ac *ac; 2400 int tidno, acno; 2401 2402 for (tidno = 0, tid = &an->tid[tidno]; 2403 tidno < IEEE80211_NUM_TIDS; 2404 tidno++, tid++) { 2405 tid->an = an; 2406 tid->tidno = tidno; 2407 tid->seq_start = tid->seq_next = 0; 2408 tid->baw_size = WME_MAX_BA; 2409 tid->baw_head = tid->baw_tail = 0; 2410 tid->sched = false; 2411 tid->paused = false; 2412 tid->state &= ~AGGR_CLEANUP; 2413 __skb_queue_head_init(&tid->buf_q); 2414 acno = TID_TO_WME_AC(tidno); 2415 tid->ac = &an->ac[acno]; 2416 tid->state &= ~AGGR_ADDBA_COMPLETE; 2417 tid->state &= ~AGGR_ADDBA_PROGRESS; 2418 } 2419 2420 for (acno = 0, ac = &an->ac[acno]; 2421 acno < IEEE80211_NUM_ACS; acno++, ac++) { 2422 ac->sched = false; 2423 ac->txq = sc->tx.txq_map[acno]; 2424 INIT_LIST_HEAD(&ac->tid_q); 2425 } 2426 } 2427 2428 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2429 { 2430 struct ath_atx_ac *ac; 2431 struct ath_atx_tid *tid; 2432 struct ath_txq *txq; 2433 int tidno; 2434 2435 for (tidno = 0, tid = &an->tid[tidno]; 2436 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 2437 2438 ac = tid->ac; 2439 txq = ac->txq; 2440 2441 ath_txq_lock(sc, txq); 2442 2443 if (tid->sched) { 2444 list_del(&tid->list); 2445 tid->sched = false; 2446 } 2447 2448 if (ac->sched) { 2449 list_del(&ac->list); 2450 tid->ac->sched = false; 2451 } 2452 2453 ath_tid_drain(sc, txq, tid); 2454 tid->state &= ~AGGR_ADDBA_COMPLETE; 2455 tid->state &= ~AGGR_CLEANUP; 2456 2457 ath_txq_unlock(sc, txq); 2458 } 2459 } 2460