1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define BITS_PER_BYTE 8 22 #define OFDM_PLCP_BITS 22 23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 24 #define L_STF 8 25 #define L_LTF 8 26 #define L_SIG 4 27 #define HT_SIG 8 28 #define HT_STF 4 29 #define HT_LTF(_ns) (4 * (_ns)) 30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 32 #define TIME_SYMBOLS(t) ((t) >> 2) 33 #define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18) 34 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 35 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 36 37 38 static u16 bits_per_symbol[][2] = { 39 /* 20MHz 40MHz */ 40 { 26, 54 }, /* 0: BPSK */ 41 { 52, 108 }, /* 1: QPSK 1/2 */ 42 { 78, 162 }, /* 2: QPSK 3/4 */ 43 { 104, 216 }, /* 3: 16-QAM 1/2 */ 44 { 156, 324 }, /* 4: 16-QAM 3/4 */ 45 { 208, 432 }, /* 5: 64-QAM 2/3 */ 46 { 234, 486 }, /* 6: 64-QAM 3/4 */ 47 { 260, 540 }, /* 7: 64-QAM 5/6 */ 48 }; 49 50 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 51 52 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 53 struct ath_atx_tid *tid, struct sk_buff *skb); 54 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 55 int tx_flags, struct ath_txq *txq); 56 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 57 struct ath_txq *txq, struct list_head *bf_q, 58 struct ath_tx_status *ts, int txok); 59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 60 struct list_head *head, bool internal); 61 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 62 struct ath_tx_status *ts, int nframes, int nbad, 63 int txok); 64 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 65 int seqno); 66 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 67 struct ath_txq *txq, 68 struct ath_atx_tid *tid, 69 struct sk_buff *skb); 70 71 enum { 72 MCS_HT20, 73 MCS_HT20_SGI, 74 MCS_HT40, 75 MCS_HT40_SGI, 76 }; 77 78 /*********************/ 79 /* Aggregation logic */ 80 /*********************/ 81 82 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) 83 __acquires(&txq->axq_lock) 84 { 85 spin_lock_bh(&txq->axq_lock); 86 } 87 88 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) 89 __releases(&txq->axq_lock) 90 { 91 spin_unlock_bh(&txq->axq_lock); 92 } 93 94 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) 95 __releases(&txq->axq_lock) 96 { 97 struct sk_buff_head q; 98 struct sk_buff *skb; 99 100 __skb_queue_head_init(&q); 101 skb_queue_splice_init(&txq->complete_q, &q); 102 spin_unlock_bh(&txq->axq_lock); 103 104 while ((skb = __skb_dequeue(&q))) 105 ieee80211_tx_status(sc->hw, skb); 106 } 107 108 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 109 { 110 struct ath_atx_ac *ac = tid->ac; 111 112 if (tid->paused) 113 return; 114 115 if (tid->sched) 116 return; 117 118 tid->sched = true; 119 list_add_tail(&tid->list, &ac->tid_q); 120 121 if (ac->sched) 122 return; 123 124 ac->sched = true; 125 list_add_tail(&ac->list, &txq->axq_acq); 126 } 127 128 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 129 { 130 struct ath_txq *txq = tid->ac->txq; 131 132 WARN_ON(!tid->paused); 133 134 ath_txq_lock(sc, txq); 135 tid->paused = false; 136 137 if (skb_queue_empty(&tid->buf_q)) 138 goto unlock; 139 140 ath_tx_queue_tid(txq, tid); 141 ath_txq_schedule(sc, txq); 142 unlock: 143 ath_txq_unlock_complete(sc, txq); 144 } 145 146 static struct ath_frame_info *get_frame_info(struct sk_buff *skb) 147 { 148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 149 BUILD_BUG_ON(sizeof(struct ath_frame_info) > 150 sizeof(tx_info->rate_driver_data)); 151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; 152 } 153 154 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) 155 { 156 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, 157 seqno << IEEE80211_SEQ_SEQ_SHIFT); 158 } 159 160 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 161 { 162 struct ath_txq *txq = tid->ac->txq; 163 struct sk_buff *skb; 164 struct ath_buf *bf; 165 struct list_head bf_head; 166 struct ath_tx_status ts; 167 struct ath_frame_info *fi; 168 bool sendbar = false; 169 170 INIT_LIST_HEAD(&bf_head); 171 172 memset(&ts, 0, sizeof(ts)); 173 174 while ((skb = __skb_dequeue(&tid->buf_q))) { 175 fi = get_frame_info(skb); 176 bf = fi->bf; 177 178 if (!bf) { 179 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 180 if (!bf) { 181 ieee80211_free_txskb(sc->hw, skb); 182 continue; 183 } 184 } 185 186 if (fi->retries) { 187 list_add_tail(&bf->list, &bf_head); 188 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 189 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 190 sendbar = true; 191 } else { 192 ath_tx_send_normal(sc, txq, NULL, skb); 193 } 194 } 195 196 if (tid->baw_head == tid->baw_tail) { 197 tid->state &= ~AGGR_ADDBA_COMPLETE; 198 tid->state &= ~AGGR_CLEANUP; 199 } 200 201 if (sendbar) { 202 ath_txq_unlock(sc, txq); 203 ath_send_bar(tid, tid->seq_start); 204 ath_txq_lock(sc, txq); 205 } 206 } 207 208 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 209 int seqno) 210 { 211 int index, cindex; 212 213 index = ATH_BA_INDEX(tid->seq_start, seqno); 214 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 215 216 __clear_bit(cindex, tid->tx_buf); 217 218 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { 219 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 220 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 221 if (tid->bar_index >= 0) 222 tid->bar_index--; 223 } 224 } 225 226 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 227 u16 seqno) 228 { 229 int index, cindex; 230 231 index = ATH_BA_INDEX(tid->seq_start, seqno); 232 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 233 __set_bit(cindex, tid->tx_buf); 234 235 if (index >= ((tid->baw_tail - tid->baw_head) & 236 (ATH_TID_MAX_BUFS - 1))) { 237 tid->baw_tail = cindex; 238 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 239 } 240 } 241 242 /* 243 * TODO: For frame(s) that are in the retry state, we will reuse the 244 * sequence number(s) without setting the retry bit. The 245 * alternative is to give up on these and BAR the receiver's window 246 * forward. 247 */ 248 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 249 struct ath_atx_tid *tid) 250 251 { 252 struct sk_buff *skb; 253 struct ath_buf *bf; 254 struct list_head bf_head; 255 struct ath_tx_status ts; 256 struct ath_frame_info *fi; 257 258 memset(&ts, 0, sizeof(ts)); 259 INIT_LIST_HEAD(&bf_head); 260 261 while ((skb = __skb_dequeue(&tid->buf_q))) { 262 fi = get_frame_info(skb); 263 bf = fi->bf; 264 265 if (!bf) { 266 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); 267 continue; 268 } 269 270 list_add_tail(&bf->list, &bf_head); 271 272 if (fi->retries) 273 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 274 275 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 276 } 277 278 tid->seq_next = tid->seq_start; 279 tid->baw_tail = tid->baw_head; 280 tid->bar_index = -1; 281 } 282 283 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 284 struct sk_buff *skb, int count) 285 { 286 struct ath_frame_info *fi = get_frame_info(skb); 287 struct ath_buf *bf = fi->bf; 288 struct ieee80211_hdr *hdr; 289 int prev = fi->retries; 290 291 TX_STAT_INC(txq->axq_qnum, a_retries); 292 fi->retries += count; 293 294 if (prev > 0) 295 return; 296 297 hdr = (struct ieee80211_hdr *)skb->data; 298 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 299 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 300 sizeof(*hdr), DMA_TO_DEVICE); 301 } 302 303 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 304 { 305 struct ath_buf *bf = NULL; 306 307 spin_lock_bh(&sc->tx.txbuflock); 308 309 if (unlikely(list_empty(&sc->tx.txbuf))) { 310 spin_unlock_bh(&sc->tx.txbuflock); 311 return NULL; 312 } 313 314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 315 list_del(&bf->list); 316 317 spin_unlock_bh(&sc->tx.txbuflock); 318 319 return bf; 320 } 321 322 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 323 { 324 spin_lock_bh(&sc->tx.txbuflock); 325 list_add_tail(&bf->list, &sc->tx.txbuf); 326 spin_unlock_bh(&sc->tx.txbuflock); 327 } 328 329 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 330 { 331 struct ath_buf *tbf; 332 333 tbf = ath_tx_get_buffer(sc); 334 if (WARN_ON(!tbf)) 335 return NULL; 336 337 ATH_TXBUF_RESET(tbf); 338 339 tbf->bf_mpdu = bf->bf_mpdu; 340 tbf->bf_buf_addr = bf->bf_buf_addr; 341 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 342 tbf->bf_state = bf->bf_state; 343 344 return tbf; 345 } 346 347 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, 348 struct ath_tx_status *ts, int txok, 349 int *nframes, int *nbad) 350 { 351 struct ath_frame_info *fi; 352 u16 seq_st = 0; 353 u32 ba[WME_BA_BMP_SIZE >> 5]; 354 int ba_index; 355 int isaggr = 0; 356 357 *nbad = 0; 358 *nframes = 0; 359 360 isaggr = bf_isaggr(bf); 361 if (isaggr) { 362 seq_st = ts->ts_seqnum; 363 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 364 } 365 366 while (bf) { 367 fi = get_frame_info(bf->bf_mpdu); 368 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno); 369 370 (*nframes)++; 371 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 372 (*nbad)++; 373 374 bf = bf->bf_next; 375 } 376 } 377 378 379 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 380 struct ath_buf *bf, struct list_head *bf_q, 381 struct ath_tx_status *ts, int txok, bool retry) 382 { 383 struct ath_node *an = NULL; 384 struct sk_buff *skb; 385 struct ieee80211_sta *sta; 386 struct ieee80211_hw *hw = sc->hw; 387 struct ieee80211_hdr *hdr; 388 struct ieee80211_tx_info *tx_info; 389 struct ath_atx_tid *tid = NULL; 390 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 391 struct list_head bf_head; 392 struct sk_buff_head bf_pending; 393 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; 394 u32 ba[WME_BA_BMP_SIZE >> 5]; 395 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 396 bool rc_update = true, isba; 397 struct ieee80211_tx_rate rates[4]; 398 struct ath_frame_info *fi; 399 int nframes; 400 u8 tidno; 401 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 402 int i, retries; 403 int bar_index = -1; 404 405 skb = bf->bf_mpdu; 406 hdr = (struct ieee80211_hdr *)skb->data; 407 408 tx_info = IEEE80211_SKB_CB(skb); 409 410 memcpy(rates, tx_info->control.rates, sizeof(rates)); 411 412 retries = ts->ts_longretry + 1; 413 for (i = 0; i < ts->ts_rateindex; i++) 414 retries += rates[i].count; 415 416 rcu_read_lock(); 417 418 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 419 if (!sta) { 420 rcu_read_unlock(); 421 422 INIT_LIST_HEAD(&bf_head); 423 while (bf) { 424 bf_next = bf->bf_next; 425 426 if (!bf->bf_stale || bf_next != NULL) 427 list_move_tail(&bf->list, &bf_head); 428 429 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0); 430 431 bf = bf_next; 432 } 433 return; 434 } 435 436 an = (struct ath_node *)sta->drv_priv; 437 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 438 tid = ATH_AN_2_TID(an, tidno); 439 seq_first = tid->seq_start; 440 isba = ts->ts_flags & ATH9K_TX_BA; 441 442 /* 443 * The hardware occasionally sends a tx status for the wrong TID. 444 * In this case, the BA status cannot be considered valid and all 445 * subframes need to be retransmitted 446 * 447 * Only BlockAcks have a TID and therefore normal Acks cannot be 448 * checked 449 */ 450 if (isba && tidno != ts->tid) 451 txok = false; 452 453 isaggr = bf_isaggr(bf); 454 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 455 456 if (isaggr && txok) { 457 if (ts->ts_flags & ATH9K_TX_BA) { 458 seq_st = ts->ts_seqnum; 459 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 460 } else { 461 /* 462 * AR5416 can become deaf/mute when BA 463 * issue happens. Chip needs to be reset. 464 * But AP code may have sychronization issues 465 * when perform internal reset in this routine. 466 * Only enable reset in STA mode for now. 467 */ 468 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 469 needreset = 1; 470 } 471 } 472 473 __skb_queue_head_init(&bf_pending); 474 475 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 476 while (bf) { 477 u16 seqno = bf->bf_state.seqno; 478 479 txfail = txpending = sendbar = 0; 480 bf_next = bf->bf_next; 481 482 skb = bf->bf_mpdu; 483 tx_info = IEEE80211_SKB_CB(skb); 484 fi = get_frame_info(skb); 485 486 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { 487 /* transmit completion, subframe is 488 * acked by block ack */ 489 acked_cnt++; 490 } else if (!isaggr && txok) { 491 /* transmit completion */ 492 acked_cnt++; 493 } else if ((tid->state & AGGR_CLEANUP) || !retry) { 494 /* 495 * cleanup in progress, just fail 496 * the un-acked sub-frames 497 */ 498 txfail = 1; 499 } else if (flush) { 500 txpending = 1; 501 } else if (fi->retries < ATH_MAX_SW_RETRIES) { 502 if (txok || !an->sleeping) 503 ath_tx_set_retry(sc, txq, bf->bf_mpdu, 504 retries); 505 506 txpending = 1; 507 } else { 508 txfail = 1; 509 txfail_cnt++; 510 bar_index = max_t(int, bar_index, 511 ATH_BA_INDEX(seq_first, seqno)); 512 } 513 514 /* 515 * Make sure the last desc is reclaimed if it 516 * not a holding desc. 517 */ 518 INIT_LIST_HEAD(&bf_head); 519 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || 520 bf_next != NULL || !bf_last->bf_stale) 521 list_move_tail(&bf->list, &bf_head); 522 523 if (!txpending || (tid->state & AGGR_CLEANUP)) { 524 /* 525 * complete the acked-ones/xretried ones; update 526 * block-ack window 527 */ 528 ath_tx_update_baw(sc, tid, seqno); 529 530 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 531 memcpy(tx_info->control.rates, rates, sizeof(rates)); 532 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok); 533 rc_update = false; 534 } 535 536 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 537 !txfail); 538 } else { 539 /* retry the un-acked ones */ 540 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 541 bf->bf_next == NULL && bf_last->bf_stale) { 542 struct ath_buf *tbf; 543 544 tbf = ath_clone_txbuf(sc, bf_last); 545 /* 546 * Update tx baw and complete the 547 * frame with failed status if we 548 * run out of tx buf. 549 */ 550 if (!tbf) { 551 ath_tx_update_baw(sc, tid, seqno); 552 553 ath_tx_complete_buf(sc, bf, txq, 554 &bf_head, ts, 0); 555 bar_index = max_t(int, bar_index, 556 ATH_BA_INDEX(seq_first, seqno)); 557 break; 558 } 559 560 fi->bf = tbf; 561 } 562 563 /* 564 * Put this buffer to the temporary pending 565 * queue to retain ordering 566 */ 567 __skb_queue_tail(&bf_pending, skb); 568 } 569 570 bf = bf_next; 571 } 572 573 /* prepend un-acked frames to the beginning of the pending frame queue */ 574 if (!skb_queue_empty(&bf_pending)) { 575 if (an->sleeping) 576 ieee80211_sta_set_buffered(sta, tid->tidno, true); 577 578 skb_queue_splice(&bf_pending, &tid->buf_q); 579 if (!an->sleeping) { 580 ath_tx_queue_tid(txq, tid); 581 582 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) 583 tid->ac->clear_ps_filter = true; 584 } 585 } 586 587 if (bar_index >= 0) { 588 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index); 589 590 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq)) 591 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq); 592 593 ath_txq_unlock(sc, txq); 594 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1)); 595 ath_txq_lock(sc, txq); 596 } 597 598 if (tid->state & AGGR_CLEANUP) 599 ath_tx_flush_tid(sc, tid); 600 601 rcu_read_unlock(); 602 603 if (needreset) 604 ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR); 605 } 606 607 static bool ath_lookup_legacy(struct ath_buf *bf) 608 { 609 struct sk_buff *skb; 610 struct ieee80211_tx_info *tx_info; 611 struct ieee80211_tx_rate *rates; 612 int i; 613 614 skb = bf->bf_mpdu; 615 tx_info = IEEE80211_SKB_CB(skb); 616 rates = tx_info->control.rates; 617 618 for (i = 0; i < 4; i++) { 619 if (!rates[i].count || rates[i].idx < 0) 620 break; 621 622 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) 623 return true; 624 } 625 626 return false; 627 } 628 629 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 630 struct ath_atx_tid *tid) 631 { 632 struct sk_buff *skb; 633 struct ieee80211_tx_info *tx_info; 634 struct ieee80211_tx_rate *rates; 635 u32 max_4ms_framelen, frmlen; 636 u16 aggr_limit, bt_aggr_limit, legacy = 0; 637 int q = tid->ac->txq->mac80211_qnum; 638 int i; 639 640 skb = bf->bf_mpdu; 641 tx_info = IEEE80211_SKB_CB(skb); 642 rates = tx_info->control.rates; 643 644 /* 645 * Find the lowest frame length among the rate series that will have a 646 * 4ms (or TXOP limited) transmit duration. 647 */ 648 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 649 650 for (i = 0; i < 4; i++) { 651 int modeidx; 652 653 if (!rates[i].count) 654 continue; 655 656 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 657 legacy = 1; 658 break; 659 } 660 661 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 662 modeidx = MCS_HT40; 663 else 664 modeidx = MCS_HT20; 665 666 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 667 modeidx++; 668 669 frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx]; 670 max_4ms_framelen = min(max_4ms_framelen, frmlen); 671 } 672 673 /* 674 * limit aggregate size by the minimum rate if rate selected is 675 * not a probe rate, if rate selected is a probe rate then 676 * avoid aggregation of this packet. 677 */ 678 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 679 return 0; 680 681 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX); 682 683 /* 684 * Override the default aggregation limit for BTCOEX. 685 */ 686 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen); 687 if (bt_aggr_limit) 688 aggr_limit = bt_aggr_limit; 689 690 /* 691 * h/w can accept aggregates up to 16 bit lengths (65535). 692 * The IE, however can hold up to 65536, which shows up here 693 * as zero. Ignore 65536 since we are constrained by hw. 694 */ 695 if (tid->an->maxampdu) 696 aggr_limit = min(aggr_limit, tid->an->maxampdu); 697 698 return aggr_limit; 699 } 700 701 /* 702 * Returns the number of delimiters to be added to 703 * meet the minimum required mpdudensity. 704 */ 705 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 706 struct ath_buf *bf, u16 frmlen, 707 bool first_subfrm) 708 { 709 #define FIRST_DESC_NDELIMS 60 710 struct sk_buff *skb = bf->bf_mpdu; 711 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 712 u32 nsymbits, nsymbols; 713 u16 minlen; 714 u8 flags, rix; 715 int width, streams, half_gi, ndelim, mindelim; 716 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 717 718 /* Select standard number of delimiters based on frame length alone */ 719 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 720 721 /* 722 * If encryption enabled, hardware requires some more padding between 723 * subframes. 724 * TODO - this could be improved to be dependent on the rate. 725 * The hardware can keep up at lower rates, but not higher rates 726 */ 727 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) && 728 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) 729 ndelim += ATH_AGGR_ENCRYPTDELIM; 730 731 /* 732 * Add delimiter when using RTS/CTS with aggregation 733 * and non enterprise AR9003 card 734 */ 735 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) && 736 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE)) 737 ndelim = max(ndelim, FIRST_DESC_NDELIMS); 738 739 /* 740 * Convert desired mpdu density from microeconds to bytes based 741 * on highest rate in rate series (i.e. first rate) to determine 742 * required minimum length for subframe. Take into account 743 * whether high rate is 20 or 40Mhz and half or full GI. 744 * 745 * If there is no mpdu density restriction, no further calculation 746 * is needed. 747 */ 748 749 if (tid->an->mpdudensity == 0) 750 return ndelim; 751 752 rix = tx_info->control.rates[0].idx; 753 flags = tx_info->control.rates[0].flags; 754 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 755 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 756 757 if (half_gi) 758 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 759 else 760 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 761 762 if (nsymbols == 0) 763 nsymbols = 1; 764 765 streams = HT_RC_2_STREAMS(rix); 766 nsymbits = bits_per_symbol[rix % 8][width] * streams; 767 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 768 769 if (frmlen < minlen) { 770 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 771 ndelim = max(mindelim, ndelim); 772 } 773 774 return ndelim; 775 } 776 777 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 778 struct ath_txq *txq, 779 struct ath_atx_tid *tid, 780 struct list_head *bf_q, 781 int *aggr_len) 782 { 783 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 784 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; 785 int rl = 0, nframes = 0, ndelim, prev_al = 0; 786 u16 aggr_limit = 0, al = 0, bpad = 0, 787 al_delta, h_baw = tid->baw_size / 2; 788 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 789 struct ieee80211_tx_info *tx_info; 790 struct ath_frame_info *fi; 791 struct sk_buff *skb; 792 u16 seqno; 793 794 do { 795 skb = skb_peek(&tid->buf_q); 796 fi = get_frame_info(skb); 797 bf = fi->bf; 798 if (!fi->bf) 799 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 800 801 if (!bf) { 802 __skb_unlink(skb, &tid->buf_q); 803 ieee80211_free_txskb(sc->hw, skb); 804 continue; 805 } 806 807 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; 808 seqno = bf->bf_state.seqno; 809 810 /* do not step over block-ack window */ 811 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { 812 status = ATH_AGGR_BAW_CLOSED; 813 break; 814 } 815 816 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { 817 struct ath_tx_status ts = {}; 818 struct list_head bf_head; 819 820 INIT_LIST_HEAD(&bf_head); 821 list_add(&bf->list, &bf_head); 822 __skb_unlink(skb, &tid->buf_q); 823 ath_tx_update_baw(sc, tid, seqno); 824 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 825 continue; 826 } 827 828 if (!bf_first) 829 bf_first = bf; 830 831 if (!rl) { 832 aggr_limit = ath_lookup_rate(sc, bf, tid); 833 rl = 1; 834 } 835 836 /* do not exceed aggregation limit */ 837 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; 838 839 if (nframes && 840 ((aggr_limit < (al + bpad + al_delta + prev_al)) || 841 ath_lookup_legacy(bf))) { 842 status = ATH_AGGR_LIMITED; 843 break; 844 } 845 846 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 847 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 848 break; 849 850 /* do not exceed subframe limit */ 851 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 852 status = ATH_AGGR_LIMITED; 853 break; 854 } 855 856 /* add padding for previous frame to aggregation length */ 857 al += bpad + al_delta; 858 859 /* 860 * Get the delimiters needed to meet the MPDU 861 * density for this node. 862 */ 863 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen, 864 !nframes); 865 bpad = PADBYTES(al_delta) + (ndelim << 2); 866 867 nframes++; 868 bf->bf_next = NULL; 869 870 /* link buffers of this frame to the aggregate */ 871 if (!fi->retries) 872 ath_tx_addto_baw(sc, tid, seqno); 873 bf->bf_state.ndelim = ndelim; 874 875 __skb_unlink(skb, &tid->buf_q); 876 list_add_tail(&bf->list, bf_q); 877 if (bf_prev) 878 bf_prev->bf_next = bf; 879 880 bf_prev = bf; 881 882 } while (!skb_queue_empty(&tid->buf_q)); 883 884 *aggr_len = al; 885 886 return status; 887 #undef PADBYTES 888 } 889 890 /* 891 * rix - rate index 892 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 893 * width - 0 for 20 MHz, 1 for 40 MHz 894 * half_gi - to use 4us v/s 3.6 us for symbol time 895 */ 896 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, 897 int width, int half_gi, bool shortPreamble) 898 { 899 u32 nbits, nsymbits, duration, nsymbols; 900 int streams; 901 902 /* find number of symbols: PLCP + data */ 903 streams = HT_RC_2_STREAMS(rix); 904 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 905 nsymbits = bits_per_symbol[rix % 8][width] * streams; 906 nsymbols = (nbits + nsymbits - 1) / nsymbits; 907 908 if (!half_gi) 909 duration = SYMBOL_TIME(nsymbols); 910 else 911 duration = SYMBOL_TIME_HALFGI(nsymbols); 912 913 /* addup duration for legacy/ht training and signal fields */ 914 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 915 916 return duration; 917 } 918 919 static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi) 920 { 921 int streams = HT_RC_2_STREAMS(mcs); 922 int symbols, bits; 923 int bytes = 0; 924 925 symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec); 926 bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams; 927 bits -= OFDM_PLCP_BITS; 928 bytes = bits / 8; 929 bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 930 if (bytes > 65532) 931 bytes = 65532; 932 933 return bytes; 934 } 935 936 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop) 937 { 938 u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi; 939 int mcs; 940 941 /* 4ms is the default (and maximum) duration */ 942 if (!txop || txop > 4096) 943 txop = 4096; 944 945 cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20]; 946 cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI]; 947 cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40]; 948 cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI]; 949 for (mcs = 0; mcs < 32; mcs++) { 950 cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false); 951 cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true); 952 cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false); 953 cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true); 954 } 955 } 956 957 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, 958 struct ath_tx_info *info, int len) 959 { 960 struct ath_hw *ah = sc->sc_ah; 961 struct sk_buff *skb; 962 struct ieee80211_tx_info *tx_info; 963 struct ieee80211_tx_rate *rates; 964 const struct ieee80211_rate *rate; 965 struct ieee80211_hdr *hdr; 966 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 967 int i; 968 u8 rix = 0; 969 970 skb = bf->bf_mpdu; 971 tx_info = IEEE80211_SKB_CB(skb); 972 rates = tx_info->control.rates; 973 hdr = (struct ieee80211_hdr *)skb->data; 974 975 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 976 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); 977 info->rtscts_rate = fi->rtscts_rate; 978 979 for (i = 0; i < 4; i++) { 980 bool is_40, is_sgi, is_sp; 981 int phy; 982 983 if (!rates[i].count || (rates[i].idx < 0)) 984 continue; 985 986 rix = rates[i].idx; 987 info->rates[i].Tries = rates[i].count; 988 989 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 990 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 991 info->flags |= ATH9K_TXDESC_RTSENA; 992 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 993 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 994 info->flags |= ATH9K_TXDESC_CTSENA; 995 } 996 997 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 998 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040; 999 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1000 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1001 1002 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1003 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1004 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1005 1006 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1007 /* MCS rates */ 1008 info->rates[i].Rate = rix | 0x80; 1009 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1010 ah->txchainmask, info->rates[i].Rate); 1011 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len, 1012 is_40, is_sgi, is_sp); 1013 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1014 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; 1015 continue; 1016 } 1017 1018 /* legacy rates */ 1019 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1020 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1021 !(rate->flags & IEEE80211_RATE_ERP_G)) 1022 phy = WLAN_RC_PHY_CCK; 1023 else 1024 phy = WLAN_RC_PHY_OFDM; 1025 1026 info->rates[i].Rate = rate->hw_value; 1027 if (rate->hw_value_short) { 1028 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1029 info->rates[i].Rate |= rate->hw_value_short; 1030 } else { 1031 is_sp = false; 1032 } 1033 1034 if (bf->bf_state.bfs_paprd) 1035 info->rates[i].ChSel = ah->txchainmask; 1036 else 1037 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1038 ah->txchainmask, info->rates[i].Rate); 1039 1040 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1041 phy, rate->bitrate * 100, len, rix, is_sp); 1042 } 1043 1044 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1045 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) 1046 info->flags &= ~ATH9K_TXDESC_RTSENA; 1047 1048 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1049 if (info->flags & ATH9K_TXDESC_RTSENA) 1050 info->flags &= ~ATH9K_TXDESC_CTSENA; 1051 } 1052 1053 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1054 { 1055 struct ieee80211_hdr *hdr; 1056 enum ath9k_pkt_type htype; 1057 __le16 fc; 1058 1059 hdr = (struct ieee80211_hdr *)skb->data; 1060 fc = hdr->frame_control; 1061 1062 if (ieee80211_is_beacon(fc)) 1063 htype = ATH9K_PKT_TYPE_BEACON; 1064 else if (ieee80211_is_probe_resp(fc)) 1065 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1066 else if (ieee80211_is_atim(fc)) 1067 htype = ATH9K_PKT_TYPE_ATIM; 1068 else if (ieee80211_is_pspoll(fc)) 1069 htype = ATH9K_PKT_TYPE_PSPOLL; 1070 else 1071 htype = ATH9K_PKT_TYPE_NORMAL; 1072 1073 return htype; 1074 } 1075 1076 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, 1077 struct ath_txq *txq, int len) 1078 { 1079 struct ath_hw *ah = sc->sc_ah; 1080 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1081 struct ath_buf *bf_first = bf; 1082 struct ath_tx_info info; 1083 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); 1084 1085 memset(&info, 0, sizeof(info)); 1086 info.is_first = true; 1087 info.is_last = true; 1088 info.txpower = MAX_RATE_POWER; 1089 info.qcu = txq->axq_qnum; 1090 1091 info.flags = ATH9K_TXDESC_INTREQ; 1092 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1093 info.flags |= ATH9K_TXDESC_NOACK; 1094 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1095 info.flags |= ATH9K_TXDESC_LDPC; 1096 1097 ath_buf_set_rate(sc, bf, &info, len); 1098 1099 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) 1100 info.flags |= ATH9K_TXDESC_CLRDMASK; 1101 1102 if (bf->bf_state.bfs_paprd) 1103 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S; 1104 1105 1106 while (bf) { 1107 struct sk_buff *skb = bf->bf_mpdu; 1108 struct ath_frame_info *fi = get_frame_info(skb); 1109 1110 info.type = get_hw_packet_type(skb); 1111 if (bf->bf_next) 1112 info.link = bf->bf_next->bf_daddr; 1113 else 1114 info.link = 0; 1115 1116 info.buf_addr[0] = bf->bf_buf_addr; 1117 info.buf_len[0] = skb->len; 1118 info.pkt_len = fi->framelen; 1119 info.keyix = fi->keyix; 1120 info.keytype = fi->keytype; 1121 1122 if (aggr) { 1123 if (bf == bf_first) 1124 info.aggr = AGGR_BUF_FIRST; 1125 else if (!bf->bf_next) 1126 info.aggr = AGGR_BUF_LAST; 1127 else 1128 info.aggr = AGGR_BUF_MIDDLE; 1129 1130 info.ndelim = bf->bf_state.ndelim; 1131 info.aggr_len = len; 1132 } 1133 1134 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); 1135 bf = bf->bf_next; 1136 } 1137 } 1138 1139 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 1140 struct ath_atx_tid *tid) 1141 { 1142 struct ath_buf *bf; 1143 enum ATH_AGGR_STATUS status; 1144 struct ieee80211_tx_info *tx_info; 1145 struct list_head bf_q; 1146 int aggr_len; 1147 1148 do { 1149 if (skb_queue_empty(&tid->buf_q)) 1150 return; 1151 1152 INIT_LIST_HEAD(&bf_q); 1153 1154 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); 1155 1156 /* 1157 * no frames picked up to be aggregated; 1158 * block-ack window is not open. 1159 */ 1160 if (list_empty(&bf_q)) 1161 break; 1162 1163 bf = list_first_entry(&bf_q, struct ath_buf, list); 1164 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 1165 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1166 1167 if (tid->ac->clear_ps_filter) { 1168 tid->ac->clear_ps_filter = false; 1169 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1170 } else { 1171 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; 1172 } 1173 1174 /* if only one frame, send as non-aggregate */ 1175 if (bf == bf->bf_lastbf) { 1176 aggr_len = get_frame_info(bf->bf_mpdu)->framelen; 1177 bf->bf_state.bf_type = BUF_AMPDU; 1178 } else { 1179 TX_STAT_INC(txq->axq_qnum, a_aggr); 1180 } 1181 1182 ath_tx_fill_desc(sc, bf, txq, aggr_len); 1183 ath_tx_txqaddbuf(sc, txq, &bf_q, false); 1184 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && 1185 status != ATH_AGGR_BAW_CLOSED); 1186 } 1187 1188 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 1189 u16 tid, u16 *ssn) 1190 { 1191 struct ath_atx_tid *txtid; 1192 struct ath_node *an; 1193 u8 density; 1194 1195 an = (struct ath_node *)sta->drv_priv; 1196 txtid = ATH_AN_2_TID(an, tid); 1197 1198 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 1199 return -EAGAIN; 1200 1201 /* update ampdu factor/density, they may have changed. This may happen 1202 * in HT IBSS when a beacon with HT-info is received after the station 1203 * has already been added. 1204 */ 1205 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 1206 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 1207 sta->ht_cap.ampdu_factor); 1208 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density); 1209 an->mpdudensity = density; 1210 } 1211 1212 txtid->state |= AGGR_ADDBA_PROGRESS; 1213 txtid->paused = true; 1214 *ssn = txtid->seq_start = txtid->seq_next; 1215 txtid->bar_index = -1; 1216 1217 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1218 txtid->baw_head = txtid->baw_tail = 0; 1219 1220 return 0; 1221 } 1222 1223 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1224 { 1225 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1226 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 1227 struct ath_txq *txq = txtid->ac->txq; 1228 1229 if (txtid->state & AGGR_CLEANUP) 1230 return; 1231 1232 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 1233 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1234 return; 1235 } 1236 1237 ath_txq_lock(sc, txq); 1238 txtid->paused = true; 1239 1240 /* 1241 * If frames are still being transmitted for this TID, they will be 1242 * cleaned up during tx completion. To prevent race conditions, this 1243 * TID can only be reused after all in-progress subframes have been 1244 * completed. 1245 */ 1246 if (txtid->baw_head != txtid->baw_tail) 1247 txtid->state |= AGGR_CLEANUP; 1248 else 1249 txtid->state &= ~AGGR_ADDBA_COMPLETE; 1250 1251 ath_tx_flush_tid(sc, txtid); 1252 ath_txq_unlock_complete(sc, txq); 1253 } 1254 1255 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 1256 struct ath_node *an) 1257 { 1258 struct ath_atx_tid *tid; 1259 struct ath_atx_ac *ac; 1260 struct ath_txq *txq; 1261 bool buffered; 1262 int tidno; 1263 1264 for (tidno = 0, tid = &an->tid[tidno]; 1265 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 1266 1267 if (!tid->sched) 1268 continue; 1269 1270 ac = tid->ac; 1271 txq = ac->txq; 1272 1273 ath_txq_lock(sc, txq); 1274 1275 buffered = !skb_queue_empty(&tid->buf_q); 1276 1277 tid->sched = false; 1278 list_del(&tid->list); 1279 1280 if (ac->sched) { 1281 ac->sched = false; 1282 list_del(&ac->list); 1283 } 1284 1285 ath_txq_unlock(sc, txq); 1286 1287 ieee80211_sta_set_buffered(sta, tidno, buffered); 1288 } 1289 } 1290 1291 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) 1292 { 1293 struct ath_atx_tid *tid; 1294 struct ath_atx_ac *ac; 1295 struct ath_txq *txq; 1296 int tidno; 1297 1298 for (tidno = 0, tid = &an->tid[tidno]; 1299 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 1300 1301 ac = tid->ac; 1302 txq = ac->txq; 1303 1304 ath_txq_lock(sc, txq); 1305 ac->clear_ps_filter = true; 1306 1307 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { 1308 ath_tx_queue_tid(txq, tid); 1309 ath_txq_schedule(sc, txq); 1310 } 1311 1312 ath_txq_unlock_complete(sc, txq); 1313 } 1314 } 1315 1316 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1317 { 1318 struct ath_atx_tid *txtid; 1319 struct ath_node *an; 1320 1321 an = (struct ath_node *)sta->drv_priv; 1322 1323 txtid = ATH_AN_2_TID(an, tid); 1324 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1325 txtid->state |= AGGR_ADDBA_COMPLETE; 1326 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1327 ath_tx_resume_tid(sc, txtid); 1328 } 1329 1330 /********************/ 1331 /* Queue Management */ 1332 /********************/ 1333 1334 static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 1335 struct ath_txq *txq) 1336 { 1337 struct ath_atx_ac *ac, *ac_tmp; 1338 struct ath_atx_tid *tid, *tid_tmp; 1339 1340 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1341 list_del(&ac->list); 1342 ac->sched = false; 1343 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 1344 list_del(&tid->list); 1345 tid->sched = false; 1346 ath_tid_drain(sc, txq, tid); 1347 } 1348 } 1349 } 1350 1351 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 1352 { 1353 struct ath_hw *ah = sc->sc_ah; 1354 struct ath9k_tx_queue_info qi; 1355 static const int subtype_txq_to_hwq[] = { 1356 [IEEE80211_AC_BE] = ATH_TXQ_AC_BE, 1357 [IEEE80211_AC_BK] = ATH_TXQ_AC_BK, 1358 [IEEE80211_AC_VI] = ATH_TXQ_AC_VI, 1359 [IEEE80211_AC_VO] = ATH_TXQ_AC_VO, 1360 }; 1361 int axq_qnum, i; 1362 1363 memset(&qi, 0, sizeof(qi)); 1364 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; 1365 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 1366 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 1367 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 1368 qi.tqi_physCompBuf = 0; 1369 1370 /* 1371 * Enable interrupts only for EOL and DESC conditions. 1372 * We mark tx descriptors to receive a DESC interrupt 1373 * when a tx queue gets deep; otherwise waiting for the 1374 * EOL to reap descriptors. Note that this is done to 1375 * reduce interrupt load and this only defers reaping 1376 * descriptors, never transmitting frames. Aside from 1377 * reducing interrupts this also permits more concurrency. 1378 * The only potential downside is if the tx queue backs 1379 * up in which case the top half of the kernel may backup 1380 * due to a lack of tx descriptors. 1381 * 1382 * The UAPSD queue is an exception, since we take a desc- 1383 * based intr on the EOSP frames. 1384 */ 1385 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1386 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE; 1387 } else { 1388 if (qtype == ATH9K_TX_QUEUE_UAPSD) 1389 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 1390 else 1391 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 1392 TXQ_FLAG_TXDESCINT_ENABLE; 1393 } 1394 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 1395 if (axq_qnum == -1) { 1396 /* 1397 * NB: don't print a message, this happens 1398 * normally on parts with too few tx queues 1399 */ 1400 return NULL; 1401 } 1402 if (!ATH_TXQ_SETUP(sc, axq_qnum)) { 1403 struct ath_txq *txq = &sc->tx.txq[axq_qnum]; 1404 1405 txq->axq_qnum = axq_qnum; 1406 txq->mac80211_qnum = -1; 1407 txq->axq_link = NULL; 1408 __skb_queue_head_init(&txq->complete_q); 1409 INIT_LIST_HEAD(&txq->axq_q); 1410 INIT_LIST_HEAD(&txq->axq_acq); 1411 spin_lock_init(&txq->axq_lock); 1412 txq->axq_depth = 0; 1413 txq->axq_ampdu_depth = 0; 1414 txq->axq_tx_inprogress = false; 1415 sc->tx.txqsetup |= 1<<axq_qnum; 1416 1417 txq->txq_headidx = txq->txq_tailidx = 0; 1418 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1419 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1420 } 1421 return &sc->tx.txq[axq_qnum]; 1422 } 1423 1424 int ath_txq_update(struct ath_softc *sc, int qnum, 1425 struct ath9k_tx_queue_info *qinfo) 1426 { 1427 struct ath_hw *ah = sc->sc_ah; 1428 int error = 0; 1429 struct ath9k_tx_queue_info qi; 1430 1431 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 1432 1433 ath9k_hw_get_txq_props(ah, qnum, &qi); 1434 qi.tqi_aifs = qinfo->tqi_aifs; 1435 qi.tqi_cwmin = qinfo->tqi_cwmin; 1436 qi.tqi_cwmax = qinfo->tqi_cwmax; 1437 qi.tqi_burstTime = qinfo->tqi_burstTime; 1438 qi.tqi_readyTime = qinfo->tqi_readyTime; 1439 1440 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1441 ath_err(ath9k_hw_common(sc->sc_ah), 1442 "Unable to update hardware queue %u!\n", qnum); 1443 error = -EIO; 1444 } else { 1445 ath9k_hw_resettxqueue(ah, qnum); 1446 } 1447 1448 return error; 1449 } 1450 1451 int ath_cabq_update(struct ath_softc *sc) 1452 { 1453 struct ath9k_tx_queue_info qi; 1454 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 1455 int qnum = sc->beacon.cabq->axq_qnum; 1456 1457 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1458 /* 1459 * Ensure the readytime % is within the bounds. 1460 */ 1461 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1462 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1463 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1464 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1465 1466 qi.tqi_readyTime = (cur_conf->beacon_interval * 1467 sc->config.cabqReadytime) / 100; 1468 ath_txq_update(sc, qnum, &qi); 1469 1470 return 0; 1471 } 1472 1473 static bool bf_is_ampdu_not_probing(struct ath_buf *bf) 1474 { 1475 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu); 1476 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1477 } 1478 1479 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, 1480 struct list_head *list, bool retry_tx) 1481 { 1482 struct ath_buf *bf, *lastbf; 1483 struct list_head bf_head; 1484 struct ath_tx_status ts; 1485 1486 memset(&ts, 0, sizeof(ts)); 1487 ts.ts_status = ATH9K_TX_FLUSH; 1488 INIT_LIST_HEAD(&bf_head); 1489 1490 while (!list_empty(list)) { 1491 bf = list_first_entry(list, struct ath_buf, list); 1492 1493 if (bf->bf_stale) { 1494 list_del(&bf->list); 1495 1496 ath_tx_return_buffer(sc, bf); 1497 continue; 1498 } 1499 1500 lastbf = bf->bf_lastbf; 1501 list_cut_position(&bf_head, list, &lastbf->list); 1502 1503 txq->axq_depth--; 1504 if (bf_is_ampdu_not_probing(bf)) 1505 txq->axq_ampdu_depth--; 1506 1507 if (bf_isampdu(bf)) 1508 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, 1509 retry_tx); 1510 else 1511 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 1512 } 1513 } 1514 1515 /* 1516 * Drain a given TX queue (could be Beacon or Data) 1517 * 1518 * This assumes output has been stopped and 1519 * we do not need to block ath_tx_tasklet. 1520 */ 1521 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) 1522 { 1523 ath_txq_lock(sc, txq); 1524 1525 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1526 int idx = txq->txq_tailidx; 1527 1528 while (!list_empty(&txq->txq_fifo[idx])) { 1529 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx], 1530 retry_tx); 1531 1532 INCR(idx, ATH_TXFIFO_DEPTH); 1533 } 1534 txq->txq_tailidx = idx; 1535 } 1536 1537 txq->axq_link = NULL; 1538 txq->axq_tx_inprogress = false; 1539 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx); 1540 1541 /* flush any pending frames if aggregation is enabled */ 1542 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx) 1543 ath_txq_drain_pending_buffers(sc, txq); 1544 1545 ath_txq_unlock_complete(sc, txq); 1546 } 1547 1548 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1549 { 1550 struct ath_hw *ah = sc->sc_ah; 1551 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1552 struct ath_txq *txq; 1553 int i; 1554 u32 npend = 0; 1555 1556 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) 1557 return true; 1558 1559 ath9k_hw_abort_tx_dma(ah); 1560 1561 /* Check if any queue remains active */ 1562 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1563 if (!ATH_TXQ_SETUP(sc, i)) 1564 continue; 1565 1566 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum)) 1567 npend |= BIT(i); 1568 } 1569 1570 if (npend) 1571 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend); 1572 1573 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1574 if (!ATH_TXQ_SETUP(sc, i)) 1575 continue; 1576 1577 /* 1578 * The caller will resume queues with ieee80211_wake_queues. 1579 * Mark the queue as not stopped to prevent ath_tx_complete 1580 * from waking the queue too early. 1581 */ 1582 txq = &sc->tx.txq[i]; 1583 txq->stopped = false; 1584 ath_draintxq(sc, txq, retry_tx); 1585 } 1586 1587 return !npend; 1588 } 1589 1590 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1591 { 1592 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1593 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1594 } 1595 1596 /* For each axq_acq entry, for each tid, try to schedule packets 1597 * for transmit until ampdu_depth has reached min Q depth. 1598 */ 1599 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1600 { 1601 struct ath_atx_ac *ac, *ac_tmp, *last_ac; 1602 struct ath_atx_tid *tid, *last_tid; 1603 1604 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) || 1605 list_empty(&txq->axq_acq) || 1606 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1607 return; 1608 1609 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1610 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1611 1612 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1613 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); 1614 list_del(&ac->list); 1615 ac->sched = false; 1616 1617 while (!list_empty(&ac->tid_q)) { 1618 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, 1619 list); 1620 list_del(&tid->list); 1621 tid->sched = false; 1622 1623 if (tid->paused) 1624 continue; 1625 1626 ath_tx_sched_aggr(sc, txq, tid); 1627 1628 /* 1629 * add tid to round-robin queue if more frames 1630 * are pending for the tid 1631 */ 1632 if (!skb_queue_empty(&tid->buf_q)) 1633 ath_tx_queue_tid(txq, tid); 1634 1635 if (tid == last_tid || 1636 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1637 break; 1638 } 1639 1640 if (!list_empty(&ac->tid_q) && !ac->sched) { 1641 ac->sched = true; 1642 list_add_tail(&ac->list, &txq->axq_acq); 1643 } 1644 1645 if (ac == last_ac || 1646 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1647 return; 1648 } 1649 } 1650 1651 /***********/ 1652 /* TX, DMA */ 1653 /***********/ 1654 1655 /* 1656 * Insert a chain of ath_buf (descriptors) on a txq and 1657 * assume the descriptors are already chained together by caller. 1658 */ 1659 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1660 struct list_head *head, bool internal) 1661 { 1662 struct ath_hw *ah = sc->sc_ah; 1663 struct ath_common *common = ath9k_hw_common(ah); 1664 struct ath_buf *bf, *bf_last; 1665 bool puttxbuf = false; 1666 bool edma; 1667 1668 /* 1669 * Insert the frame on the outbound list and 1670 * pass it on to the hardware. 1671 */ 1672 1673 if (list_empty(head)) 1674 return; 1675 1676 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1677 bf = list_first_entry(head, struct ath_buf, list); 1678 bf_last = list_entry(head->prev, struct ath_buf, list); 1679 1680 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n", 1681 txq->axq_qnum, txq->axq_depth); 1682 1683 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) { 1684 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]); 1685 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1686 puttxbuf = true; 1687 } else { 1688 list_splice_tail_init(head, &txq->axq_q); 1689 1690 if (txq->axq_link) { 1691 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr); 1692 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n", 1693 txq->axq_qnum, txq->axq_link, 1694 ito64(bf->bf_daddr), bf->bf_desc); 1695 } else if (!edma) 1696 puttxbuf = true; 1697 1698 txq->axq_link = bf_last->bf_desc; 1699 } 1700 1701 if (puttxbuf) { 1702 TX_STAT_INC(txq->axq_qnum, puttxbuf); 1703 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1704 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n", 1705 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1706 } 1707 1708 if (!edma) { 1709 TX_STAT_INC(txq->axq_qnum, txstart); 1710 ath9k_hw_txstart(ah, txq->axq_qnum); 1711 } 1712 1713 if (!internal) { 1714 txq->axq_depth++; 1715 if (bf_is_ampdu_not_probing(bf)) 1716 txq->axq_ampdu_depth++; 1717 } 1718 } 1719 1720 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1721 struct sk_buff *skb, struct ath_tx_control *txctl) 1722 { 1723 struct ath_frame_info *fi = get_frame_info(skb); 1724 struct list_head bf_head; 1725 struct ath_buf *bf; 1726 1727 /* 1728 * Do not queue to h/w when any of the following conditions is true: 1729 * - there are pending frames in software queue 1730 * - the TID is currently paused for ADDBA/BAR request 1731 * - seqno is not within block-ack window 1732 * - h/w queue depth exceeds low water mark 1733 */ 1734 if (!skb_queue_empty(&tid->buf_q) || tid->paused || 1735 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || 1736 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { 1737 /* 1738 * Add this frame to software queue for scheduling later 1739 * for aggregation. 1740 */ 1741 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); 1742 __skb_queue_tail(&tid->buf_q, skb); 1743 if (!txctl->an || !txctl->an->sleeping) 1744 ath_tx_queue_tid(txctl->txq, tid); 1745 return; 1746 } 1747 1748 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1749 if (!bf) { 1750 ieee80211_free_txskb(sc->hw, skb); 1751 return; 1752 } 1753 1754 bf->bf_state.bf_type = BUF_AMPDU; 1755 INIT_LIST_HEAD(&bf_head); 1756 list_add(&bf->list, &bf_head); 1757 1758 /* Add sub-frame to BAW */ 1759 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 1760 1761 /* Queue to h/w without aggregation */ 1762 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); 1763 bf->bf_lastbf = bf; 1764 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen); 1765 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false); 1766 } 1767 1768 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1769 struct ath_atx_tid *tid, struct sk_buff *skb) 1770 { 1771 struct ath_frame_info *fi = get_frame_info(skb); 1772 struct list_head bf_head; 1773 struct ath_buf *bf; 1774 1775 bf = fi->bf; 1776 1777 INIT_LIST_HEAD(&bf_head); 1778 list_add_tail(&bf->list, &bf_head); 1779 bf->bf_state.bf_type = 0; 1780 1781 bf->bf_next = NULL; 1782 bf->bf_lastbf = bf; 1783 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1784 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1785 TX_STAT_INC(txq->axq_qnum, queued); 1786 } 1787 1788 static void setup_frame_info(struct ieee80211_hw *hw, 1789 struct ieee80211_sta *sta, 1790 struct sk_buff *skb, 1791 int framelen) 1792 { 1793 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1794 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1795 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1796 const struct ieee80211_rate *rate; 1797 struct ath_frame_info *fi = get_frame_info(skb); 1798 struct ath_node *an = NULL; 1799 enum ath9k_key_type keytype; 1800 bool short_preamble = false; 1801 1802 /* 1803 * We check if Short Preamble is needed for the CTS rate by 1804 * checking the BSS's global flag. 1805 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1806 */ 1807 if (tx_info->control.vif && 1808 tx_info->control.vif->bss_conf.use_short_preamble) 1809 short_preamble = true; 1810 1811 rate = ieee80211_get_rts_cts_rate(hw, tx_info); 1812 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1813 1814 if (sta) 1815 an = (struct ath_node *) sta->drv_priv; 1816 1817 memset(fi, 0, sizeof(*fi)); 1818 if (hw_key) 1819 fi->keyix = hw_key->hw_key_idx; 1820 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0) 1821 fi->keyix = an->ps_key; 1822 else 1823 fi->keyix = ATH9K_TXKEYIX_INVALID; 1824 fi->keytype = keytype; 1825 fi->framelen = framelen; 1826 fi->rtscts_rate = rate->hw_value; 1827 if (short_preamble) 1828 fi->rtscts_rate |= rate->hw_value_short; 1829 } 1830 1831 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) 1832 { 1833 struct ath_hw *ah = sc->sc_ah; 1834 struct ath9k_channel *curchan = ah->curchan; 1835 1836 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && 1837 (curchan->channelFlags & CHANNEL_5GHZ) && 1838 (chainmask == 0x7) && (rate < 0x90)) 1839 return 0x3; 1840 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) && 1841 IS_CCK_RATE(rate)) 1842 return 0x2; 1843 else 1844 return chainmask; 1845 } 1846 1847 /* 1848 * Assign a descriptor (and sequence number if necessary, 1849 * and map buffer for DMA. Frees skb on error 1850 */ 1851 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1852 struct ath_txq *txq, 1853 struct ath_atx_tid *tid, 1854 struct sk_buff *skb) 1855 { 1856 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1857 struct ath_frame_info *fi = get_frame_info(skb); 1858 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1859 struct ath_buf *bf; 1860 int fragno; 1861 u16 seqno; 1862 1863 bf = ath_tx_get_buffer(sc); 1864 if (!bf) { 1865 ath_dbg(common, XMIT, "TX buffers are full\n"); 1866 return NULL; 1867 } 1868 1869 ATH_TXBUF_RESET(bf); 1870 1871 if (tid) { 1872 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1873 seqno = tid->seq_next; 1874 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1875 1876 if (fragno) 1877 hdr->seq_ctrl |= cpu_to_le16(fragno); 1878 1879 if (!ieee80211_has_morefrags(hdr->frame_control)) 1880 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1881 1882 bf->bf_state.seqno = seqno; 1883 } 1884 1885 bf->bf_mpdu = skb; 1886 1887 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1888 skb->len, DMA_TO_DEVICE); 1889 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 1890 bf->bf_mpdu = NULL; 1891 bf->bf_buf_addr = 0; 1892 ath_err(ath9k_hw_common(sc->sc_ah), 1893 "dma_mapping_error() on TX\n"); 1894 ath_tx_return_buffer(sc, bf); 1895 return NULL; 1896 } 1897 1898 fi->bf = bf; 1899 1900 return bf; 1901 } 1902 1903 /* FIXME: tx power */ 1904 static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, 1905 struct ath_tx_control *txctl) 1906 { 1907 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1908 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1909 struct ath_atx_tid *tid = NULL; 1910 struct ath_buf *bf; 1911 u8 tidno; 1912 1913 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an && 1914 ieee80211_is_data_qos(hdr->frame_control)) { 1915 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1916 IEEE80211_QOS_CTL_TID_MASK; 1917 tid = ATH_AN_2_TID(txctl->an, tidno); 1918 1919 WARN_ON(tid->ac->txq != txctl->txq); 1920 } 1921 1922 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { 1923 /* 1924 * Try aggregation if it's a unicast data frame 1925 * and the destination is HT capable. 1926 */ 1927 ath_tx_send_ampdu(sc, tid, skb, txctl); 1928 } else { 1929 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1930 if (!bf) { 1931 if (txctl->paprd) 1932 dev_kfree_skb_any(skb); 1933 else 1934 ieee80211_free_txskb(sc->hw, skb); 1935 return; 1936 } 1937 1938 bf->bf_state.bfs_paprd = txctl->paprd; 1939 1940 if (txctl->paprd) 1941 bf->bf_state.bfs_paprd_timestamp = jiffies; 1942 1943 ath_tx_send_normal(sc, txctl->txq, tid, skb); 1944 } 1945 } 1946 1947 /* Upon failure caller should free skb */ 1948 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1949 struct ath_tx_control *txctl) 1950 { 1951 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1952 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1953 struct ieee80211_sta *sta = txctl->sta; 1954 struct ieee80211_vif *vif = info->control.vif; 1955 struct ath_softc *sc = hw->priv; 1956 struct ath_txq *txq = txctl->txq; 1957 int padpos, padsize; 1958 int frmlen = skb->len + FCS_LEN; 1959 int q; 1960 1961 /* NOTE: sta can be NULL according to net/mac80211.h */ 1962 if (sta) 1963 txctl->an = (struct ath_node *)sta->drv_priv; 1964 1965 if (info->control.hw_key) 1966 frmlen += info->control.hw_key->icv_len; 1967 1968 /* 1969 * As a temporary workaround, assign seq# here; this will likely need 1970 * to be cleaned up to work better with Beacon transmission and virtual 1971 * BSSes. 1972 */ 1973 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1974 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1975 sc->tx.seq_no += 0x10; 1976 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1977 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1978 } 1979 1980 /* Add the padding after the header if this is not already done */ 1981 padpos = ath9k_cmn_padpos(hdr->frame_control); 1982 padsize = padpos & 3; 1983 if (padsize && skb->len > padpos) { 1984 if (skb_headroom(skb) < padsize) 1985 return -ENOMEM; 1986 1987 skb_push(skb, padsize); 1988 memmove(skb->data, skb->data + padsize, padpos); 1989 hdr = (struct ieee80211_hdr *) skb->data; 1990 } 1991 1992 if ((vif && vif->type != NL80211_IFTYPE_AP && 1993 vif->type != NL80211_IFTYPE_AP_VLAN) || 1994 !ieee80211_is_data(hdr->frame_control)) 1995 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1996 1997 setup_frame_info(hw, sta, skb, frmlen); 1998 1999 /* 2000 * At this point, the vif, hw_key and sta pointers in the tx control 2001 * info are no longer valid (overwritten by the ath_frame_info data. 2002 */ 2003 2004 q = skb_get_queue_mapping(skb); 2005 2006 ath_txq_lock(sc, txq); 2007 if (txq == sc->tx.txq_map[q] && 2008 ++txq->pending_frames > sc->tx.txq_max_pending[q] && 2009 !txq->stopped) { 2010 ieee80211_stop_queue(sc->hw, q); 2011 txq->stopped = true; 2012 } 2013 2014 ath_tx_start_dma(sc, skb, txctl); 2015 2016 ath_txq_unlock(sc, txq); 2017 2018 return 0; 2019 } 2020 2021 /*****************/ 2022 /* TX Completion */ 2023 /*****************/ 2024 2025 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 2026 int tx_flags, struct ath_txq *txq) 2027 { 2028 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2029 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2030 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 2031 int q, padpos, padsize; 2032 unsigned long flags; 2033 2034 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2035 2036 if (sc->sc_ah->caldata) 2037 sc->sc_ah->caldata->paprd_packet_sent = true; 2038 2039 if (!(tx_flags & ATH_TX_ERROR)) 2040 /* Frame was ACKed */ 2041 tx_info->flags |= IEEE80211_TX_STAT_ACK; 2042 2043 padpos = ath9k_cmn_padpos(hdr->frame_control); 2044 padsize = padpos & 3; 2045 if (padsize && skb->len>padpos+padsize) { 2046 /* 2047 * Remove MAC header padding before giving the frame back to 2048 * mac80211. 2049 */ 2050 memmove(skb->data + padsize, skb->data, padpos); 2051 skb_pull(skb, padsize); 2052 } 2053 2054 spin_lock_irqsave(&sc->sc_pm_lock, flags); 2055 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { 2056 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 2057 ath_dbg(common, PS, 2058 "Going back to sleep after having received TX status (0x%lx)\n", 2059 sc->ps_flags & (PS_WAIT_FOR_BEACON | 2060 PS_WAIT_FOR_CAB | 2061 PS_WAIT_FOR_PSPOLL_DATA | 2062 PS_WAIT_FOR_TX_ACK)); 2063 } 2064 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 2065 2066 q = skb_get_queue_mapping(skb); 2067 if (txq == sc->tx.txq_map[q]) { 2068 if (WARN_ON(--txq->pending_frames < 0)) 2069 txq->pending_frames = 0; 2070 2071 if (txq->stopped && 2072 txq->pending_frames < sc->tx.txq_max_pending[q]) { 2073 ieee80211_wake_queue(sc->hw, q); 2074 txq->stopped = false; 2075 } 2076 } 2077 2078 __skb_queue_tail(&txq->complete_q, skb); 2079 } 2080 2081 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 2082 struct ath_txq *txq, struct list_head *bf_q, 2083 struct ath_tx_status *ts, int txok) 2084 { 2085 struct sk_buff *skb = bf->bf_mpdu; 2086 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2087 unsigned long flags; 2088 int tx_flags = 0; 2089 2090 if (!txok) 2091 tx_flags |= ATH_TX_ERROR; 2092 2093 if (ts->ts_status & ATH9K_TXERR_FILT) 2094 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2095 2096 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); 2097 bf->bf_buf_addr = 0; 2098 2099 if (bf->bf_state.bfs_paprd) { 2100 if (time_after(jiffies, 2101 bf->bf_state.bfs_paprd_timestamp + 2102 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 2103 dev_kfree_skb_any(skb); 2104 else 2105 complete(&sc->paprd_complete); 2106 } else { 2107 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags); 2108 ath_tx_complete(sc, skb, tx_flags, txq); 2109 } 2110 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 2111 * accidentally reference it later. 2112 */ 2113 bf->bf_mpdu = NULL; 2114 2115 /* 2116 * Return the list of ath_buf of this mpdu to free queue 2117 */ 2118 spin_lock_irqsave(&sc->tx.txbuflock, flags); 2119 list_splice_tail_init(bf_q, &sc->tx.txbuf); 2120 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 2121 } 2122 2123 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 2124 struct ath_tx_status *ts, int nframes, int nbad, 2125 int txok) 2126 { 2127 struct sk_buff *skb = bf->bf_mpdu; 2128 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2129 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2130 struct ieee80211_hw *hw = sc->hw; 2131 struct ath_hw *ah = sc->sc_ah; 2132 u8 i, tx_rateindex; 2133 2134 if (txok) 2135 tx_info->status.ack_signal = ts->ts_rssi; 2136 2137 tx_rateindex = ts->ts_rateindex; 2138 WARN_ON(tx_rateindex >= hw->max_rates); 2139 2140 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 2141 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2142 2143 BUG_ON(nbad > nframes); 2144 } 2145 tx_info->status.ampdu_len = nframes; 2146 tx_info->status.ampdu_ack_len = nframes - nbad; 2147 2148 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2149 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { 2150 /* 2151 * If an underrun error is seen assume it as an excessive 2152 * retry only if max frame trigger level has been reached 2153 * (2 KB for single stream, and 4 KB for dual stream). 2154 * Adjust the long retry as if the frame was tried 2155 * hw->max_rate_tries times to affect how rate control updates 2156 * PER for the failed rate. 2157 * In case of congestion on the bus penalizing this type of 2158 * underruns should help hardware actually transmit new frames 2159 * successfully by eventually preferring slower rates. 2160 * This itself should also alleviate congestion on the bus. 2161 */ 2162 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | 2163 ATH9K_TX_DELIM_UNDERRUN)) && 2164 ieee80211_is_data(hdr->frame_control) && 2165 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level) 2166 tx_info->status.rates[tx_rateindex].count = 2167 hw->max_rate_tries; 2168 } 2169 2170 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2171 tx_info->status.rates[i].count = 0; 2172 tx_info->status.rates[i].idx = -1; 2173 } 2174 2175 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2176 } 2177 2178 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, 2179 struct ath_tx_status *ts, struct ath_buf *bf, 2180 struct list_head *bf_head) 2181 { 2182 int txok; 2183 2184 txq->axq_depth--; 2185 txok = !(ts->ts_status & ATH9K_TXERR_MASK); 2186 txq->axq_tx_inprogress = false; 2187 if (bf_is_ampdu_not_probing(bf)) 2188 txq->axq_ampdu_depth--; 2189 2190 if (!bf_isampdu(bf)) { 2191 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); 2192 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); 2193 } else 2194 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); 2195 2196 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2197 ath_txq_schedule(sc, txq); 2198 } 2199 2200 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2201 { 2202 struct ath_hw *ah = sc->sc_ah; 2203 struct ath_common *common = ath9k_hw_common(ah); 2204 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2205 struct list_head bf_head; 2206 struct ath_desc *ds; 2207 struct ath_tx_status ts; 2208 int status; 2209 2210 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n", 2211 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2212 txq->axq_link); 2213 2214 ath_txq_lock(sc, txq); 2215 for (;;) { 2216 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2217 break; 2218 2219 if (list_empty(&txq->axq_q)) { 2220 txq->axq_link = NULL; 2221 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2222 ath_txq_schedule(sc, txq); 2223 break; 2224 } 2225 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2226 2227 /* 2228 * There is a race condition that a BH gets scheduled 2229 * after sw writes TxE and before hw re-load the last 2230 * descriptor to get the newly chained one. 2231 * Software must keep the last DONE descriptor as a 2232 * holding descriptor - software does so by marking 2233 * it with the STALE flag. 2234 */ 2235 bf_held = NULL; 2236 if (bf->bf_stale) { 2237 bf_held = bf; 2238 if (list_is_last(&bf_held->list, &txq->axq_q)) 2239 break; 2240 2241 bf = list_entry(bf_held->list.next, struct ath_buf, 2242 list); 2243 } 2244 2245 lastbf = bf->bf_lastbf; 2246 ds = lastbf->bf_desc; 2247 2248 memset(&ts, 0, sizeof(ts)); 2249 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2250 if (status == -EINPROGRESS) 2251 break; 2252 2253 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2254 2255 /* 2256 * Remove ath_buf's of the same transmit unit from txq, 2257 * however leave the last descriptor back as the holding 2258 * descriptor for hw. 2259 */ 2260 lastbf->bf_stale = true; 2261 INIT_LIST_HEAD(&bf_head); 2262 if (!list_is_singular(&lastbf->list)) 2263 list_cut_position(&bf_head, 2264 &txq->axq_q, lastbf->list.prev); 2265 2266 if (bf_held) { 2267 list_del(&bf_held->list); 2268 ath_tx_return_buffer(sc, bf_held); 2269 } 2270 2271 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2272 } 2273 ath_txq_unlock_complete(sc, txq); 2274 } 2275 2276 void ath_tx_tasklet(struct ath_softc *sc) 2277 { 2278 struct ath_hw *ah = sc->sc_ah; 2279 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs; 2280 int i; 2281 2282 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2283 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2284 ath_tx_processq(sc, &sc->tx.txq[i]); 2285 } 2286 } 2287 2288 void ath_tx_edma_tasklet(struct ath_softc *sc) 2289 { 2290 struct ath_tx_status ts; 2291 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2292 struct ath_hw *ah = sc->sc_ah; 2293 struct ath_txq *txq; 2294 struct ath_buf *bf, *lastbf; 2295 struct list_head bf_head; 2296 int status; 2297 2298 for (;;) { 2299 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2300 break; 2301 2302 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts); 2303 if (status == -EINPROGRESS) 2304 break; 2305 if (status == -EIO) { 2306 ath_dbg(common, XMIT, "Error processing tx status\n"); 2307 break; 2308 } 2309 2310 /* Process beacon completions separately */ 2311 if (ts.qid == sc->beacon.beaconq) { 2312 sc->beacon.tx_processed = true; 2313 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); 2314 continue; 2315 } 2316 2317 txq = &sc->tx.txq[ts.qid]; 2318 2319 ath_txq_lock(sc, txq); 2320 2321 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2322 2323 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2324 ath_txq_unlock(sc, txq); 2325 return; 2326 } 2327 2328 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 2329 struct ath_buf, list); 2330 lastbf = bf->bf_lastbf; 2331 2332 INIT_LIST_HEAD(&bf_head); 2333 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx], 2334 &lastbf->list); 2335 2336 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2337 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2338 2339 if (!list_empty(&txq->axq_q)) { 2340 struct list_head bf_q; 2341 2342 INIT_LIST_HEAD(&bf_q); 2343 txq->axq_link = NULL; 2344 list_splice_tail_init(&txq->axq_q, &bf_q); 2345 ath_tx_txqaddbuf(sc, txq, &bf_q, true); 2346 } 2347 } 2348 2349 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2350 ath_txq_unlock_complete(sc, txq); 2351 } 2352 } 2353 2354 /*****************/ 2355 /* Init, Cleanup */ 2356 /*****************/ 2357 2358 static int ath_txstatus_setup(struct ath_softc *sc, int size) 2359 { 2360 struct ath_descdma *dd = &sc->txsdma; 2361 u8 txs_len = sc->sc_ah->caps.txs_len; 2362 2363 dd->dd_desc_len = size * txs_len; 2364 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 2365 &dd->dd_desc_paddr, GFP_KERNEL); 2366 if (!dd->dd_desc) 2367 return -ENOMEM; 2368 2369 return 0; 2370 } 2371 2372 static int ath_tx_edma_init(struct ath_softc *sc) 2373 { 2374 int err; 2375 2376 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2377 if (!err) 2378 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2379 sc->txsdma.dd_desc_paddr, 2380 ATH_TXSTATUS_RING_SIZE); 2381 2382 return err; 2383 } 2384 2385 static void ath_tx_edma_cleanup(struct ath_softc *sc) 2386 { 2387 struct ath_descdma *dd = &sc->txsdma; 2388 2389 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 2390 dd->dd_desc_paddr); 2391 } 2392 2393 int ath_tx_init(struct ath_softc *sc, int nbufs) 2394 { 2395 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2396 int error = 0; 2397 2398 spin_lock_init(&sc->tx.txbuflock); 2399 2400 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2401 "tx", nbufs, 1, 1); 2402 if (error != 0) { 2403 ath_err(common, 2404 "Failed to allocate tx descriptors: %d\n", error); 2405 goto err; 2406 } 2407 2408 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2409 "beacon", ATH_BCBUF, 1, 1); 2410 if (error != 0) { 2411 ath_err(common, 2412 "Failed to allocate beacon descriptors: %d\n", error); 2413 goto err; 2414 } 2415 2416 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2417 2418 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2419 error = ath_tx_edma_init(sc); 2420 if (error) 2421 goto err; 2422 } 2423 2424 err: 2425 if (error != 0) 2426 ath_tx_cleanup(sc); 2427 2428 return error; 2429 } 2430 2431 void ath_tx_cleanup(struct ath_softc *sc) 2432 { 2433 if (sc->beacon.bdma.dd_desc_len != 0) 2434 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); 2435 2436 if (sc->tx.txdma.dd_desc_len != 0) 2437 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2438 2439 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2440 ath_tx_edma_cleanup(sc); 2441 } 2442 2443 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2444 { 2445 struct ath_atx_tid *tid; 2446 struct ath_atx_ac *ac; 2447 int tidno, acno; 2448 2449 for (tidno = 0, tid = &an->tid[tidno]; 2450 tidno < IEEE80211_NUM_TIDS; 2451 tidno++, tid++) { 2452 tid->an = an; 2453 tid->tidno = tidno; 2454 tid->seq_start = tid->seq_next = 0; 2455 tid->baw_size = WME_MAX_BA; 2456 tid->baw_head = tid->baw_tail = 0; 2457 tid->sched = false; 2458 tid->paused = false; 2459 tid->state &= ~AGGR_CLEANUP; 2460 __skb_queue_head_init(&tid->buf_q); 2461 acno = TID_TO_WME_AC(tidno); 2462 tid->ac = &an->ac[acno]; 2463 tid->state &= ~AGGR_ADDBA_COMPLETE; 2464 tid->state &= ~AGGR_ADDBA_PROGRESS; 2465 } 2466 2467 for (acno = 0, ac = &an->ac[acno]; 2468 acno < IEEE80211_NUM_ACS; acno++, ac++) { 2469 ac->sched = false; 2470 ac->txq = sc->tx.txq_map[acno]; 2471 INIT_LIST_HEAD(&ac->tid_q); 2472 } 2473 } 2474 2475 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2476 { 2477 struct ath_atx_ac *ac; 2478 struct ath_atx_tid *tid; 2479 struct ath_txq *txq; 2480 int tidno; 2481 2482 for (tidno = 0, tid = &an->tid[tidno]; 2483 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 2484 2485 ac = tid->ac; 2486 txq = ac->txq; 2487 2488 ath_txq_lock(sc, txq); 2489 2490 if (tid->sched) { 2491 list_del(&tid->list); 2492 tid->sched = false; 2493 } 2494 2495 if (ac->sched) { 2496 list_del(&ac->list); 2497 tid->ac->sched = false; 2498 } 2499 2500 ath_tid_drain(sc, txq, tid); 2501 tid->state &= ~AGGR_ADDBA_COMPLETE; 2502 tid->state &= ~AGGR_CLEANUP; 2503 2504 ath_txq_unlock(sc, txq); 2505 } 2506 } 2507