1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define BITS_PER_BYTE 8 22 #define OFDM_PLCP_BITS 22 23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 24 #define L_STF 8 25 #define L_LTF 8 26 #define L_SIG 4 27 #define HT_SIG 8 28 #define HT_STF 4 29 #define HT_LTF(_ns) (4 * (_ns)) 30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 32 #define TIME_SYMBOLS(t) ((t) >> 2) 33 #define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18) 34 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 35 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 36 37 38 static u16 bits_per_symbol[][2] = { 39 /* 20MHz 40MHz */ 40 { 26, 54 }, /* 0: BPSK */ 41 { 52, 108 }, /* 1: QPSK 1/2 */ 42 { 78, 162 }, /* 2: QPSK 3/4 */ 43 { 104, 216 }, /* 3: 16-QAM 1/2 */ 44 { 156, 324 }, /* 4: 16-QAM 3/4 */ 45 { 208, 432 }, /* 5: 64-QAM 2/3 */ 46 { 234, 486 }, /* 6: 64-QAM 3/4 */ 47 { 260, 540 }, /* 7: 64-QAM 5/6 */ 48 }; 49 50 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 51 52 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 53 struct ath_atx_tid *tid, struct sk_buff *skb); 54 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 55 int tx_flags, struct ath_txq *txq); 56 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 57 struct ath_txq *txq, struct list_head *bf_q, 58 struct ath_tx_status *ts, int txok); 59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 60 struct list_head *head, bool internal); 61 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 62 struct ath_tx_status *ts, int nframes, int nbad, 63 int txok); 64 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 65 int seqno); 66 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 67 struct ath_txq *txq, 68 struct ath_atx_tid *tid, 69 struct sk_buff *skb); 70 71 enum { 72 MCS_HT20, 73 MCS_HT20_SGI, 74 MCS_HT40, 75 MCS_HT40_SGI, 76 }; 77 78 /*********************/ 79 /* Aggregation logic */ 80 /*********************/ 81 82 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) 83 __acquires(&txq->axq_lock) 84 { 85 spin_lock_bh(&txq->axq_lock); 86 } 87 88 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) 89 __releases(&txq->axq_lock) 90 { 91 spin_unlock_bh(&txq->axq_lock); 92 } 93 94 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) 95 __releases(&txq->axq_lock) 96 { 97 struct sk_buff_head q; 98 struct sk_buff *skb; 99 100 __skb_queue_head_init(&q); 101 skb_queue_splice_init(&txq->complete_q, &q); 102 spin_unlock_bh(&txq->axq_lock); 103 104 while ((skb = __skb_dequeue(&q))) 105 ieee80211_tx_status(sc->hw, skb); 106 } 107 108 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 109 { 110 struct ath_atx_ac *ac = tid->ac; 111 112 if (tid->paused) 113 return; 114 115 if (tid->sched) 116 return; 117 118 tid->sched = true; 119 list_add_tail(&tid->list, &ac->tid_q); 120 121 if (ac->sched) 122 return; 123 124 ac->sched = true; 125 list_add_tail(&ac->list, &txq->axq_acq); 126 } 127 128 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 129 { 130 struct ath_txq *txq = tid->ac->txq; 131 132 WARN_ON(!tid->paused); 133 134 ath_txq_lock(sc, txq); 135 tid->paused = false; 136 137 if (skb_queue_empty(&tid->buf_q)) 138 goto unlock; 139 140 ath_tx_queue_tid(txq, tid); 141 ath_txq_schedule(sc, txq); 142 unlock: 143 ath_txq_unlock_complete(sc, txq); 144 } 145 146 static struct ath_frame_info *get_frame_info(struct sk_buff *skb) 147 { 148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 149 BUILD_BUG_ON(sizeof(struct ath_frame_info) > 150 sizeof(tx_info->rate_driver_data)); 151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; 152 } 153 154 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) 155 { 156 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, 157 seqno << IEEE80211_SEQ_SEQ_SHIFT); 158 } 159 160 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 161 { 162 struct ath_txq *txq = tid->ac->txq; 163 struct sk_buff *skb; 164 struct ath_buf *bf; 165 struct list_head bf_head; 166 struct ath_tx_status ts; 167 struct ath_frame_info *fi; 168 bool sendbar = false; 169 170 INIT_LIST_HEAD(&bf_head); 171 172 memset(&ts, 0, sizeof(ts)); 173 174 while ((skb = __skb_dequeue(&tid->buf_q))) { 175 fi = get_frame_info(skb); 176 bf = fi->bf; 177 178 if (!bf) { 179 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 180 if (!bf) { 181 ieee80211_free_txskb(sc->hw, skb); 182 continue; 183 } 184 } 185 186 if (fi->retries) { 187 list_add_tail(&bf->list, &bf_head); 188 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 189 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 190 sendbar = true; 191 } else { 192 ath_tx_send_normal(sc, txq, NULL, skb); 193 } 194 } 195 196 if (tid->baw_head == tid->baw_tail) { 197 tid->state &= ~AGGR_ADDBA_COMPLETE; 198 tid->state &= ~AGGR_CLEANUP; 199 } 200 201 if (sendbar) { 202 ath_txq_unlock(sc, txq); 203 ath_send_bar(tid, tid->seq_start); 204 ath_txq_lock(sc, txq); 205 } 206 } 207 208 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 209 int seqno) 210 { 211 int index, cindex; 212 213 index = ATH_BA_INDEX(tid->seq_start, seqno); 214 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 215 216 __clear_bit(cindex, tid->tx_buf); 217 218 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { 219 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 220 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 221 if (tid->bar_index >= 0) 222 tid->bar_index--; 223 } 224 } 225 226 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 227 u16 seqno) 228 { 229 int index, cindex; 230 231 index = ATH_BA_INDEX(tid->seq_start, seqno); 232 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 233 __set_bit(cindex, tid->tx_buf); 234 235 if (index >= ((tid->baw_tail - tid->baw_head) & 236 (ATH_TID_MAX_BUFS - 1))) { 237 tid->baw_tail = cindex; 238 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 239 } 240 } 241 242 /* 243 * TODO: For frame(s) that are in the retry state, we will reuse the 244 * sequence number(s) without setting the retry bit. The 245 * alternative is to give up on these and BAR the receiver's window 246 * forward. 247 */ 248 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 249 struct ath_atx_tid *tid) 250 251 { 252 struct sk_buff *skb; 253 struct ath_buf *bf; 254 struct list_head bf_head; 255 struct ath_tx_status ts; 256 struct ath_frame_info *fi; 257 258 memset(&ts, 0, sizeof(ts)); 259 INIT_LIST_HEAD(&bf_head); 260 261 while ((skb = __skb_dequeue(&tid->buf_q))) { 262 fi = get_frame_info(skb); 263 bf = fi->bf; 264 265 if (!bf) { 266 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); 267 continue; 268 } 269 270 list_add_tail(&bf->list, &bf_head); 271 272 if (fi->retries) 273 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 274 275 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 276 } 277 278 tid->seq_next = tid->seq_start; 279 tid->baw_tail = tid->baw_head; 280 tid->bar_index = -1; 281 } 282 283 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 284 struct sk_buff *skb, int count) 285 { 286 struct ath_frame_info *fi = get_frame_info(skb); 287 struct ath_buf *bf = fi->bf; 288 struct ieee80211_hdr *hdr; 289 int prev = fi->retries; 290 291 TX_STAT_INC(txq->axq_qnum, a_retries); 292 fi->retries += count; 293 294 if (prev > 0) 295 return; 296 297 hdr = (struct ieee80211_hdr *)skb->data; 298 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 299 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 300 sizeof(*hdr), DMA_TO_DEVICE); 301 } 302 303 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 304 { 305 struct ath_buf *bf = NULL; 306 307 spin_lock_bh(&sc->tx.txbuflock); 308 309 if (unlikely(list_empty(&sc->tx.txbuf))) { 310 spin_unlock_bh(&sc->tx.txbuflock); 311 return NULL; 312 } 313 314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 315 bf->bf_next = NULL; 316 list_del(&bf->list); 317 318 spin_unlock_bh(&sc->tx.txbuflock); 319 320 return bf; 321 } 322 323 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 324 { 325 spin_lock_bh(&sc->tx.txbuflock); 326 list_add_tail(&bf->list, &sc->tx.txbuf); 327 spin_unlock_bh(&sc->tx.txbuflock); 328 } 329 330 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 331 { 332 struct ath_buf *tbf; 333 334 tbf = ath_tx_get_buffer(sc); 335 if (WARN_ON(!tbf)) 336 return NULL; 337 338 ATH_TXBUF_RESET(tbf); 339 340 tbf->bf_mpdu = bf->bf_mpdu; 341 tbf->bf_buf_addr = bf->bf_buf_addr; 342 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 343 tbf->bf_state = bf->bf_state; 344 345 return tbf; 346 } 347 348 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, 349 struct ath_tx_status *ts, int txok, 350 int *nframes, int *nbad) 351 { 352 struct ath_frame_info *fi; 353 u16 seq_st = 0; 354 u32 ba[WME_BA_BMP_SIZE >> 5]; 355 int ba_index; 356 int isaggr = 0; 357 358 *nbad = 0; 359 *nframes = 0; 360 361 isaggr = bf_isaggr(bf); 362 if (isaggr) { 363 seq_st = ts->ts_seqnum; 364 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 365 } 366 367 while (bf) { 368 fi = get_frame_info(bf->bf_mpdu); 369 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno); 370 371 (*nframes)++; 372 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 373 (*nbad)++; 374 375 bf = bf->bf_next; 376 } 377 } 378 379 380 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 381 struct ath_buf *bf, struct list_head *bf_q, 382 struct ath_tx_status *ts, int txok, bool retry) 383 { 384 struct ath_node *an = NULL; 385 struct sk_buff *skb; 386 struct ieee80211_sta *sta; 387 struct ieee80211_hw *hw = sc->hw; 388 struct ieee80211_hdr *hdr; 389 struct ieee80211_tx_info *tx_info; 390 struct ath_atx_tid *tid = NULL; 391 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 392 struct list_head bf_head; 393 struct sk_buff_head bf_pending; 394 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; 395 u32 ba[WME_BA_BMP_SIZE >> 5]; 396 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 397 bool rc_update = true, isba; 398 struct ieee80211_tx_rate rates[4]; 399 struct ath_frame_info *fi; 400 int nframes; 401 u8 tidno; 402 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 403 int i, retries; 404 int bar_index = -1; 405 406 skb = bf->bf_mpdu; 407 hdr = (struct ieee80211_hdr *)skb->data; 408 409 tx_info = IEEE80211_SKB_CB(skb); 410 411 memcpy(rates, tx_info->control.rates, sizeof(rates)); 412 413 retries = ts->ts_longretry + 1; 414 for (i = 0; i < ts->ts_rateindex; i++) 415 retries += rates[i].count; 416 417 rcu_read_lock(); 418 419 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 420 if (!sta) { 421 rcu_read_unlock(); 422 423 INIT_LIST_HEAD(&bf_head); 424 while (bf) { 425 bf_next = bf->bf_next; 426 427 if (!bf->bf_stale || bf_next != NULL) 428 list_move_tail(&bf->list, &bf_head); 429 430 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0); 431 432 bf = bf_next; 433 } 434 return; 435 } 436 437 an = (struct ath_node *)sta->drv_priv; 438 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 439 tid = ATH_AN_2_TID(an, tidno); 440 seq_first = tid->seq_start; 441 isba = ts->ts_flags & ATH9K_TX_BA; 442 443 /* 444 * The hardware occasionally sends a tx status for the wrong TID. 445 * In this case, the BA status cannot be considered valid and all 446 * subframes need to be retransmitted 447 * 448 * Only BlockAcks have a TID and therefore normal Acks cannot be 449 * checked 450 */ 451 if (isba && tidno != ts->tid) 452 txok = false; 453 454 isaggr = bf_isaggr(bf); 455 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 456 457 if (isaggr && txok) { 458 if (ts->ts_flags & ATH9K_TX_BA) { 459 seq_st = ts->ts_seqnum; 460 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 461 } else { 462 /* 463 * AR5416 can become deaf/mute when BA 464 * issue happens. Chip needs to be reset. 465 * But AP code may have sychronization issues 466 * when perform internal reset in this routine. 467 * Only enable reset in STA mode for now. 468 */ 469 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 470 needreset = 1; 471 } 472 } 473 474 __skb_queue_head_init(&bf_pending); 475 476 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 477 while (bf) { 478 u16 seqno = bf->bf_state.seqno; 479 480 txfail = txpending = sendbar = 0; 481 bf_next = bf->bf_next; 482 483 skb = bf->bf_mpdu; 484 tx_info = IEEE80211_SKB_CB(skb); 485 fi = get_frame_info(skb); 486 487 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { 488 /* transmit completion, subframe is 489 * acked by block ack */ 490 acked_cnt++; 491 } else if (!isaggr && txok) { 492 /* transmit completion */ 493 acked_cnt++; 494 } else if ((tid->state & AGGR_CLEANUP) || !retry) { 495 /* 496 * cleanup in progress, just fail 497 * the un-acked sub-frames 498 */ 499 txfail = 1; 500 } else if (flush) { 501 txpending = 1; 502 } else if (fi->retries < ATH_MAX_SW_RETRIES) { 503 if (txok || !an->sleeping) 504 ath_tx_set_retry(sc, txq, bf->bf_mpdu, 505 retries); 506 507 txpending = 1; 508 } else { 509 txfail = 1; 510 txfail_cnt++; 511 bar_index = max_t(int, bar_index, 512 ATH_BA_INDEX(seq_first, seqno)); 513 } 514 515 /* 516 * Make sure the last desc is reclaimed if it 517 * not a holding desc. 518 */ 519 INIT_LIST_HEAD(&bf_head); 520 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || 521 bf_next != NULL || !bf_last->bf_stale) 522 list_move_tail(&bf->list, &bf_head); 523 524 if (!txpending || (tid->state & AGGR_CLEANUP)) { 525 /* 526 * complete the acked-ones/xretried ones; update 527 * block-ack window 528 */ 529 ath_tx_update_baw(sc, tid, seqno); 530 531 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 532 memcpy(tx_info->control.rates, rates, sizeof(rates)); 533 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok); 534 rc_update = false; 535 } 536 537 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 538 !txfail); 539 } else { 540 /* retry the un-acked ones */ 541 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 542 bf->bf_next == NULL && bf_last->bf_stale) { 543 struct ath_buf *tbf; 544 545 tbf = ath_clone_txbuf(sc, bf_last); 546 /* 547 * Update tx baw and complete the 548 * frame with failed status if we 549 * run out of tx buf. 550 */ 551 if (!tbf) { 552 ath_tx_update_baw(sc, tid, seqno); 553 554 ath_tx_complete_buf(sc, bf, txq, 555 &bf_head, ts, 0); 556 bar_index = max_t(int, bar_index, 557 ATH_BA_INDEX(seq_first, seqno)); 558 break; 559 } 560 561 fi->bf = tbf; 562 } 563 564 /* 565 * Put this buffer to the temporary pending 566 * queue to retain ordering 567 */ 568 __skb_queue_tail(&bf_pending, skb); 569 } 570 571 bf = bf_next; 572 } 573 574 /* prepend un-acked frames to the beginning of the pending frame queue */ 575 if (!skb_queue_empty(&bf_pending)) { 576 if (an->sleeping) 577 ieee80211_sta_set_buffered(sta, tid->tidno, true); 578 579 skb_queue_splice(&bf_pending, &tid->buf_q); 580 if (!an->sleeping) { 581 ath_tx_queue_tid(txq, tid); 582 583 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) 584 tid->ac->clear_ps_filter = true; 585 } 586 } 587 588 if (bar_index >= 0) { 589 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index); 590 591 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq)) 592 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq); 593 594 ath_txq_unlock(sc, txq); 595 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1)); 596 ath_txq_lock(sc, txq); 597 } 598 599 if (tid->state & AGGR_CLEANUP) 600 ath_tx_flush_tid(sc, tid); 601 602 rcu_read_unlock(); 603 604 if (needreset) 605 ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR); 606 } 607 608 static bool ath_lookup_legacy(struct ath_buf *bf) 609 { 610 struct sk_buff *skb; 611 struct ieee80211_tx_info *tx_info; 612 struct ieee80211_tx_rate *rates; 613 int i; 614 615 skb = bf->bf_mpdu; 616 tx_info = IEEE80211_SKB_CB(skb); 617 rates = tx_info->control.rates; 618 619 for (i = 0; i < 4; i++) { 620 if (!rates[i].count || rates[i].idx < 0) 621 break; 622 623 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) 624 return true; 625 } 626 627 return false; 628 } 629 630 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 631 struct ath_atx_tid *tid) 632 { 633 struct sk_buff *skb; 634 struct ieee80211_tx_info *tx_info; 635 struct ieee80211_tx_rate *rates; 636 u32 max_4ms_framelen, frmlen; 637 u16 aggr_limit, bt_aggr_limit, legacy = 0; 638 int q = tid->ac->txq->mac80211_qnum; 639 int i; 640 641 skb = bf->bf_mpdu; 642 tx_info = IEEE80211_SKB_CB(skb); 643 rates = tx_info->control.rates; 644 645 /* 646 * Find the lowest frame length among the rate series that will have a 647 * 4ms (or TXOP limited) transmit duration. 648 */ 649 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 650 651 for (i = 0; i < 4; i++) { 652 int modeidx; 653 654 if (!rates[i].count) 655 continue; 656 657 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 658 legacy = 1; 659 break; 660 } 661 662 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 663 modeidx = MCS_HT40; 664 else 665 modeidx = MCS_HT20; 666 667 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 668 modeidx++; 669 670 frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx]; 671 max_4ms_framelen = min(max_4ms_framelen, frmlen); 672 } 673 674 /* 675 * limit aggregate size by the minimum rate if rate selected is 676 * not a probe rate, if rate selected is a probe rate then 677 * avoid aggregation of this packet. 678 */ 679 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 680 return 0; 681 682 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX); 683 684 /* 685 * Override the default aggregation limit for BTCOEX. 686 */ 687 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen); 688 if (bt_aggr_limit) 689 aggr_limit = bt_aggr_limit; 690 691 /* 692 * h/w can accept aggregates up to 16 bit lengths (65535). 693 * The IE, however can hold up to 65536, which shows up here 694 * as zero. Ignore 65536 since we are constrained by hw. 695 */ 696 if (tid->an->maxampdu) 697 aggr_limit = min(aggr_limit, tid->an->maxampdu); 698 699 return aggr_limit; 700 } 701 702 /* 703 * Returns the number of delimiters to be added to 704 * meet the minimum required mpdudensity. 705 */ 706 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 707 struct ath_buf *bf, u16 frmlen, 708 bool first_subfrm) 709 { 710 #define FIRST_DESC_NDELIMS 60 711 struct sk_buff *skb = bf->bf_mpdu; 712 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 713 u32 nsymbits, nsymbols; 714 u16 minlen; 715 u8 flags, rix; 716 int width, streams, half_gi, ndelim, mindelim; 717 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 718 719 /* Select standard number of delimiters based on frame length alone */ 720 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 721 722 /* 723 * If encryption enabled, hardware requires some more padding between 724 * subframes. 725 * TODO - this could be improved to be dependent on the rate. 726 * The hardware can keep up at lower rates, but not higher rates 727 */ 728 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) && 729 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) 730 ndelim += ATH_AGGR_ENCRYPTDELIM; 731 732 /* 733 * Add delimiter when using RTS/CTS with aggregation 734 * and non enterprise AR9003 card 735 */ 736 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) && 737 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE)) 738 ndelim = max(ndelim, FIRST_DESC_NDELIMS); 739 740 /* 741 * Convert desired mpdu density from microeconds to bytes based 742 * on highest rate in rate series (i.e. first rate) to determine 743 * required minimum length for subframe. Take into account 744 * whether high rate is 20 or 40Mhz and half or full GI. 745 * 746 * If there is no mpdu density restriction, no further calculation 747 * is needed. 748 */ 749 750 if (tid->an->mpdudensity == 0) 751 return ndelim; 752 753 rix = tx_info->control.rates[0].idx; 754 flags = tx_info->control.rates[0].flags; 755 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 756 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 757 758 if (half_gi) 759 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 760 else 761 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 762 763 if (nsymbols == 0) 764 nsymbols = 1; 765 766 streams = HT_RC_2_STREAMS(rix); 767 nsymbits = bits_per_symbol[rix % 8][width] * streams; 768 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 769 770 if (frmlen < minlen) { 771 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 772 ndelim = max(mindelim, ndelim); 773 } 774 775 return ndelim; 776 } 777 778 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 779 struct ath_txq *txq, 780 struct ath_atx_tid *tid, 781 struct list_head *bf_q, 782 int *aggr_len) 783 { 784 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 785 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; 786 int rl = 0, nframes = 0, ndelim, prev_al = 0; 787 u16 aggr_limit = 0, al = 0, bpad = 0, 788 al_delta, h_baw = tid->baw_size / 2; 789 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 790 struct ieee80211_tx_info *tx_info; 791 struct ath_frame_info *fi; 792 struct sk_buff *skb; 793 u16 seqno; 794 795 do { 796 skb = skb_peek(&tid->buf_q); 797 fi = get_frame_info(skb); 798 bf = fi->bf; 799 if (!fi->bf) 800 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 801 802 if (!bf) { 803 __skb_unlink(skb, &tid->buf_q); 804 ieee80211_free_txskb(sc->hw, skb); 805 continue; 806 } 807 808 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; 809 seqno = bf->bf_state.seqno; 810 811 /* do not step over block-ack window */ 812 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { 813 status = ATH_AGGR_BAW_CLOSED; 814 break; 815 } 816 817 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { 818 struct ath_tx_status ts = {}; 819 struct list_head bf_head; 820 821 INIT_LIST_HEAD(&bf_head); 822 list_add(&bf->list, &bf_head); 823 __skb_unlink(skb, &tid->buf_q); 824 ath_tx_update_baw(sc, tid, seqno); 825 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 826 continue; 827 } 828 829 if (!bf_first) 830 bf_first = bf; 831 832 if (!rl) { 833 aggr_limit = ath_lookup_rate(sc, bf, tid); 834 rl = 1; 835 } 836 837 /* do not exceed aggregation limit */ 838 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; 839 840 if (nframes && 841 ((aggr_limit < (al + bpad + al_delta + prev_al)) || 842 ath_lookup_legacy(bf))) { 843 status = ATH_AGGR_LIMITED; 844 break; 845 } 846 847 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 848 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 849 break; 850 851 /* do not exceed subframe limit */ 852 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 853 status = ATH_AGGR_LIMITED; 854 break; 855 } 856 857 /* add padding for previous frame to aggregation length */ 858 al += bpad + al_delta; 859 860 /* 861 * Get the delimiters needed to meet the MPDU 862 * density for this node. 863 */ 864 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen, 865 !nframes); 866 bpad = PADBYTES(al_delta) + (ndelim << 2); 867 868 nframes++; 869 bf->bf_next = NULL; 870 871 /* link buffers of this frame to the aggregate */ 872 if (!fi->retries) 873 ath_tx_addto_baw(sc, tid, seqno); 874 bf->bf_state.ndelim = ndelim; 875 876 __skb_unlink(skb, &tid->buf_q); 877 list_add_tail(&bf->list, bf_q); 878 if (bf_prev) 879 bf_prev->bf_next = bf; 880 881 bf_prev = bf; 882 883 } while (!skb_queue_empty(&tid->buf_q)); 884 885 *aggr_len = al; 886 887 return status; 888 #undef PADBYTES 889 } 890 891 /* 892 * rix - rate index 893 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 894 * width - 0 for 20 MHz, 1 for 40 MHz 895 * half_gi - to use 4us v/s 3.6 us for symbol time 896 */ 897 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, 898 int width, int half_gi, bool shortPreamble) 899 { 900 u32 nbits, nsymbits, duration, nsymbols; 901 int streams; 902 903 /* find number of symbols: PLCP + data */ 904 streams = HT_RC_2_STREAMS(rix); 905 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 906 nsymbits = bits_per_symbol[rix % 8][width] * streams; 907 nsymbols = (nbits + nsymbits - 1) / nsymbits; 908 909 if (!half_gi) 910 duration = SYMBOL_TIME(nsymbols); 911 else 912 duration = SYMBOL_TIME_HALFGI(nsymbols); 913 914 /* addup duration for legacy/ht training and signal fields */ 915 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 916 917 return duration; 918 } 919 920 static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi) 921 { 922 int streams = HT_RC_2_STREAMS(mcs); 923 int symbols, bits; 924 int bytes = 0; 925 926 symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec); 927 bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams; 928 bits -= OFDM_PLCP_BITS; 929 bytes = bits / 8; 930 bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 931 if (bytes > 65532) 932 bytes = 65532; 933 934 return bytes; 935 } 936 937 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop) 938 { 939 u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi; 940 int mcs; 941 942 /* 4ms is the default (and maximum) duration */ 943 if (!txop || txop > 4096) 944 txop = 4096; 945 946 cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20]; 947 cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI]; 948 cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40]; 949 cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI]; 950 for (mcs = 0; mcs < 32; mcs++) { 951 cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false); 952 cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true); 953 cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false); 954 cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true); 955 } 956 } 957 958 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, 959 struct ath_tx_info *info, int len) 960 { 961 struct ath_hw *ah = sc->sc_ah; 962 struct sk_buff *skb; 963 struct ieee80211_tx_info *tx_info; 964 struct ieee80211_tx_rate *rates; 965 const struct ieee80211_rate *rate; 966 struct ieee80211_hdr *hdr; 967 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 968 int i; 969 u8 rix = 0; 970 971 skb = bf->bf_mpdu; 972 tx_info = IEEE80211_SKB_CB(skb); 973 rates = tx_info->control.rates; 974 hdr = (struct ieee80211_hdr *)skb->data; 975 976 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 977 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); 978 info->rtscts_rate = fi->rtscts_rate; 979 980 for (i = 0; i < 4; i++) { 981 bool is_40, is_sgi, is_sp; 982 int phy; 983 984 if (!rates[i].count || (rates[i].idx < 0)) 985 continue; 986 987 rix = rates[i].idx; 988 info->rates[i].Tries = rates[i].count; 989 990 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 991 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 992 info->flags |= ATH9K_TXDESC_RTSENA; 993 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 994 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 995 info->flags |= ATH9K_TXDESC_CTSENA; 996 } 997 998 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 999 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040; 1000 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1001 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1002 1003 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1004 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1005 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1006 1007 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1008 /* MCS rates */ 1009 info->rates[i].Rate = rix | 0x80; 1010 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1011 ah->txchainmask, info->rates[i].Rate); 1012 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len, 1013 is_40, is_sgi, is_sp); 1014 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1015 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; 1016 continue; 1017 } 1018 1019 /* legacy rates */ 1020 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1021 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1022 !(rate->flags & IEEE80211_RATE_ERP_G)) 1023 phy = WLAN_RC_PHY_CCK; 1024 else 1025 phy = WLAN_RC_PHY_OFDM; 1026 1027 info->rates[i].Rate = rate->hw_value; 1028 if (rate->hw_value_short) { 1029 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1030 info->rates[i].Rate |= rate->hw_value_short; 1031 } else { 1032 is_sp = false; 1033 } 1034 1035 if (bf->bf_state.bfs_paprd) 1036 info->rates[i].ChSel = ah->txchainmask; 1037 else 1038 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1039 ah->txchainmask, info->rates[i].Rate); 1040 1041 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1042 phy, rate->bitrate * 100, len, rix, is_sp); 1043 } 1044 1045 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1046 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) 1047 info->flags &= ~ATH9K_TXDESC_RTSENA; 1048 1049 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1050 if (info->flags & ATH9K_TXDESC_RTSENA) 1051 info->flags &= ~ATH9K_TXDESC_CTSENA; 1052 } 1053 1054 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1055 { 1056 struct ieee80211_hdr *hdr; 1057 enum ath9k_pkt_type htype; 1058 __le16 fc; 1059 1060 hdr = (struct ieee80211_hdr *)skb->data; 1061 fc = hdr->frame_control; 1062 1063 if (ieee80211_is_beacon(fc)) 1064 htype = ATH9K_PKT_TYPE_BEACON; 1065 else if (ieee80211_is_probe_resp(fc)) 1066 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1067 else if (ieee80211_is_atim(fc)) 1068 htype = ATH9K_PKT_TYPE_ATIM; 1069 else if (ieee80211_is_pspoll(fc)) 1070 htype = ATH9K_PKT_TYPE_PSPOLL; 1071 else 1072 htype = ATH9K_PKT_TYPE_NORMAL; 1073 1074 return htype; 1075 } 1076 1077 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, 1078 struct ath_txq *txq, int len) 1079 { 1080 struct ath_hw *ah = sc->sc_ah; 1081 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1082 struct ath_buf *bf_first = bf; 1083 struct ath_tx_info info; 1084 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); 1085 1086 memset(&info, 0, sizeof(info)); 1087 info.is_first = true; 1088 info.is_last = true; 1089 info.txpower = MAX_RATE_POWER; 1090 info.qcu = txq->axq_qnum; 1091 1092 info.flags = ATH9K_TXDESC_INTREQ; 1093 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1094 info.flags |= ATH9K_TXDESC_NOACK; 1095 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1096 info.flags |= ATH9K_TXDESC_LDPC; 1097 1098 ath_buf_set_rate(sc, bf, &info, len); 1099 1100 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) 1101 info.flags |= ATH9K_TXDESC_CLRDMASK; 1102 1103 if (bf->bf_state.bfs_paprd) 1104 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S; 1105 1106 1107 while (bf) { 1108 struct sk_buff *skb = bf->bf_mpdu; 1109 struct ath_frame_info *fi = get_frame_info(skb); 1110 1111 info.type = get_hw_packet_type(skb); 1112 if (bf->bf_next) 1113 info.link = bf->bf_next->bf_daddr; 1114 else 1115 info.link = 0; 1116 1117 info.buf_addr[0] = bf->bf_buf_addr; 1118 info.buf_len[0] = skb->len; 1119 info.pkt_len = fi->framelen; 1120 info.keyix = fi->keyix; 1121 info.keytype = fi->keytype; 1122 1123 if (aggr) { 1124 if (bf == bf_first) 1125 info.aggr = AGGR_BUF_FIRST; 1126 else if (!bf->bf_next) 1127 info.aggr = AGGR_BUF_LAST; 1128 else 1129 info.aggr = AGGR_BUF_MIDDLE; 1130 1131 info.ndelim = bf->bf_state.ndelim; 1132 info.aggr_len = len; 1133 } 1134 1135 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); 1136 bf = bf->bf_next; 1137 } 1138 } 1139 1140 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 1141 struct ath_atx_tid *tid) 1142 { 1143 struct ath_buf *bf; 1144 enum ATH_AGGR_STATUS status; 1145 struct ieee80211_tx_info *tx_info; 1146 struct list_head bf_q; 1147 int aggr_len; 1148 1149 do { 1150 if (skb_queue_empty(&tid->buf_q)) 1151 return; 1152 1153 INIT_LIST_HEAD(&bf_q); 1154 1155 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); 1156 1157 /* 1158 * no frames picked up to be aggregated; 1159 * block-ack window is not open. 1160 */ 1161 if (list_empty(&bf_q)) 1162 break; 1163 1164 bf = list_first_entry(&bf_q, struct ath_buf, list); 1165 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 1166 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1167 1168 if (tid->ac->clear_ps_filter) { 1169 tid->ac->clear_ps_filter = false; 1170 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1171 } else { 1172 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; 1173 } 1174 1175 /* if only one frame, send as non-aggregate */ 1176 if (bf == bf->bf_lastbf) { 1177 aggr_len = get_frame_info(bf->bf_mpdu)->framelen; 1178 bf->bf_state.bf_type = BUF_AMPDU; 1179 } else { 1180 TX_STAT_INC(txq->axq_qnum, a_aggr); 1181 } 1182 1183 ath_tx_fill_desc(sc, bf, txq, aggr_len); 1184 ath_tx_txqaddbuf(sc, txq, &bf_q, false); 1185 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && 1186 status != ATH_AGGR_BAW_CLOSED); 1187 } 1188 1189 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 1190 u16 tid, u16 *ssn) 1191 { 1192 struct ath_atx_tid *txtid; 1193 struct ath_node *an; 1194 u8 density; 1195 1196 an = (struct ath_node *)sta->drv_priv; 1197 txtid = ATH_AN_2_TID(an, tid); 1198 1199 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 1200 return -EAGAIN; 1201 1202 /* update ampdu factor/density, they may have changed. This may happen 1203 * in HT IBSS when a beacon with HT-info is received after the station 1204 * has already been added. 1205 */ 1206 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 1207 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 1208 sta->ht_cap.ampdu_factor); 1209 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density); 1210 an->mpdudensity = density; 1211 } 1212 1213 txtid->state |= AGGR_ADDBA_PROGRESS; 1214 txtid->paused = true; 1215 *ssn = txtid->seq_start = txtid->seq_next; 1216 txtid->bar_index = -1; 1217 1218 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1219 txtid->baw_head = txtid->baw_tail = 0; 1220 1221 return 0; 1222 } 1223 1224 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1225 { 1226 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1227 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 1228 struct ath_txq *txq = txtid->ac->txq; 1229 1230 if (txtid->state & AGGR_CLEANUP) 1231 return; 1232 1233 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 1234 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1235 return; 1236 } 1237 1238 ath_txq_lock(sc, txq); 1239 txtid->paused = true; 1240 1241 /* 1242 * If frames are still being transmitted for this TID, they will be 1243 * cleaned up during tx completion. To prevent race conditions, this 1244 * TID can only be reused after all in-progress subframes have been 1245 * completed. 1246 */ 1247 if (txtid->baw_head != txtid->baw_tail) 1248 txtid->state |= AGGR_CLEANUP; 1249 else 1250 txtid->state &= ~AGGR_ADDBA_COMPLETE; 1251 1252 ath_tx_flush_tid(sc, txtid); 1253 ath_txq_unlock_complete(sc, txq); 1254 } 1255 1256 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 1257 struct ath_node *an) 1258 { 1259 struct ath_atx_tid *tid; 1260 struct ath_atx_ac *ac; 1261 struct ath_txq *txq; 1262 bool buffered; 1263 int tidno; 1264 1265 for (tidno = 0, tid = &an->tid[tidno]; 1266 tidno < WME_NUM_TID; tidno++, tid++) { 1267 1268 if (!tid->sched) 1269 continue; 1270 1271 ac = tid->ac; 1272 txq = ac->txq; 1273 1274 ath_txq_lock(sc, txq); 1275 1276 buffered = !skb_queue_empty(&tid->buf_q); 1277 1278 tid->sched = false; 1279 list_del(&tid->list); 1280 1281 if (ac->sched) { 1282 ac->sched = false; 1283 list_del(&ac->list); 1284 } 1285 1286 ath_txq_unlock(sc, txq); 1287 1288 ieee80211_sta_set_buffered(sta, tidno, buffered); 1289 } 1290 } 1291 1292 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) 1293 { 1294 struct ath_atx_tid *tid; 1295 struct ath_atx_ac *ac; 1296 struct ath_txq *txq; 1297 int tidno; 1298 1299 for (tidno = 0, tid = &an->tid[tidno]; 1300 tidno < WME_NUM_TID; tidno++, tid++) { 1301 1302 ac = tid->ac; 1303 txq = ac->txq; 1304 1305 ath_txq_lock(sc, txq); 1306 ac->clear_ps_filter = true; 1307 1308 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { 1309 ath_tx_queue_tid(txq, tid); 1310 ath_txq_schedule(sc, txq); 1311 } 1312 1313 ath_txq_unlock_complete(sc, txq); 1314 } 1315 } 1316 1317 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1318 { 1319 struct ath_atx_tid *txtid; 1320 struct ath_node *an; 1321 1322 an = (struct ath_node *)sta->drv_priv; 1323 1324 txtid = ATH_AN_2_TID(an, tid); 1325 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1326 txtid->state |= AGGR_ADDBA_COMPLETE; 1327 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1328 ath_tx_resume_tid(sc, txtid); 1329 } 1330 1331 /********************/ 1332 /* Queue Management */ 1333 /********************/ 1334 1335 static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 1336 struct ath_txq *txq) 1337 { 1338 struct ath_atx_ac *ac, *ac_tmp; 1339 struct ath_atx_tid *tid, *tid_tmp; 1340 1341 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1342 list_del(&ac->list); 1343 ac->sched = false; 1344 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 1345 list_del(&tid->list); 1346 tid->sched = false; 1347 ath_tid_drain(sc, txq, tid); 1348 } 1349 } 1350 } 1351 1352 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 1353 { 1354 struct ath_hw *ah = sc->sc_ah; 1355 struct ath9k_tx_queue_info qi; 1356 static const int subtype_txq_to_hwq[] = { 1357 [WME_AC_BE] = ATH_TXQ_AC_BE, 1358 [WME_AC_BK] = ATH_TXQ_AC_BK, 1359 [WME_AC_VI] = ATH_TXQ_AC_VI, 1360 [WME_AC_VO] = ATH_TXQ_AC_VO, 1361 }; 1362 int axq_qnum, i; 1363 1364 memset(&qi, 0, sizeof(qi)); 1365 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; 1366 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 1367 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 1368 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 1369 qi.tqi_physCompBuf = 0; 1370 1371 /* 1372 * Enable interrupts only for EOL and DESC conditions. 1373 * We mark tx descriptors to receive a DESC interrupt 1374 * when a tx queue gets deep; otherwise waiting for the 1375 * EOL to reap descriptors. Note that this is done to 1376 * reduce interrupt load and this only defers reaping 1377 * descriptors, never transmitting frames. Aside from 1378 * reducing interrupts this also permits more concurrency. 1379 * The only potential downside is if the tx queue backs 1380 * up in which case the top half of the kernel may backup 1381 * due to a lack of tx descriptors. 1382 * 1383 * The UAPSD queue is an exception, since we take a desc- 1384 * based intr on the EOSP frames. 1385 */ 1386 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1387 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE; 1388 } else { 1389 if (qtype == ATH9K_TX_QUEUE_UAPSD) 1390 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 1391 else 1392 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 1393 TXQ_FLAG_TXDESCINT_ENABLE; 1394 } 1395 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 1396 if (axq_qnum == -1) { 1397 /* 1398 * NB: don't print a message, this happens 1399 * normally on parts with too few tx queues 1400 */ 1401 return NULL; 1402 } 1403 if (!ATH_TXQ_SETUP(sc, axq_qnum)) { 1404 struct ath_txq *txq = &sc->tx.txq[axq_qnum]; 1405 1406 txq->axq_qnum = axq_qnum; 1407 txq->mac80211_qnum = -1; 1408 txq->axq_link = NULL; 1409 __skb_queue_head_init(&txq->complete_q); 1410 INIT_LIST_HEAD(&txq->axq_q); 1411 INIT_LIST_HEAD(&txq->axq_acq); 1412 spin_lock_init(&txq->axq_lock); 1413 txq->axq_depth = 0; 1414 txq->axq_ampdu_depth = 0; 1415 txq->axq_tx_inprogress = false; 1416 sc->tx.txqsetup |= 1<<axq_qnum; 1417 1418 txq->txq_headidx = txq->txq_tailidx = 0; 1419 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1420 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1421 } 1422 return &sc->tx.txq[axq_qnum]; 1423 } 1424 1425 int ath_txq_update(struct ath_softc *sc, int qnum, 1426 struct ath9k_tx_queue_info *qinfo) 1427 { 1428 struct ath_hw *ah = sc->sc_ah; 1429 int error = 0; 1430 struct ath9k_tx_queue_info qi; 1431 1432 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 1433 1434 ath9k_hw_get_txq_props(ah, qnum, &qi); 1435 qi.tqi_aifs = qinfo->tqi_aifs; 1436 qi.tqi_cwmin = qinfo->tqi_cwmin; 1437 qi.tqi_cwmax = qinfo->tqi_cwmax; 1438 qi.tqi_burstTime = qinfo->tqi_burstTime; 1439 qi.tqi_readyTime = qinfo->tqi_readyTime; 1440 1441 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1442 ath_err(ath9k_hw_common(sc->sc_ah), 1443 "Unable to update hardware queue %u!\n", qnum); 1444 error = -EIO; 1445 } else { 1446 ath9k_hw_resettxqueue(ah, qnum); 1447 } 1448 1449 return error; 1450 } 1451 1452 int ath_cabq_update(struct ath_softc *sc) 1453 { 1454 struct ath9k_tx_queue_info qi; 1455 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 1456 int qnum = sc->beacon.cabq->axq_qnum; 1457 1458 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1459 /* 1460 * Ensure the readytime % is within the bounds. 1461 */ 1462 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1463 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1464 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1465 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1466 1467 qi.tqi_readyTime = (cur_conf->beacon_interval * 1468 sc->config.cabqReadytime) / 100; 1469 ath_txq_update(sc, qnum, &qi); 1470 1471 return 0; 1472 } 1473 1474 static bool bf_is_ampdu_not_probing(struct ath_buf *bf) 1475 { 1476 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu); 1477 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1478 } 1479 1480 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, 1481 struct list_head *list, bool retry_tx) 1482 { 1483 struct ath_buf *bf, *lastbf; 1484 struct list_head bf_head; 1485 struct ath_tx_status ts; 1486 1487 memset(&ts, 0, sizeof(ts)); 1488 ts.ts_status = ATH9K_TX_FLUSH; 1489 INIT_LIST_HEAD(&bf_head); 1490 1491 while (!list_empty(list)) { 1492 bf = list_first_entry(list, struct ath_buf, list); 1493 1494 if (bf->bf_stale) { 1495 list_del(&bf->list); 1496 1497 ath_tx_return_buffer(sc, bf); 1498 continue; 1499 } 1500 1501 lastbf = bf->bf_lastbf; 1502 list_cut_position(&bf_head, list, &lastbf->list); 1503 1504 txq->axq_depth--; 1505 if (bf_is_ampdu_not_probing(bf)) 1506 txq->axq_ampdu_depth--; 1507 1508 if (bf_isampdu(bf)) 1509 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, 1510 retry_tx); 1511 else 1512 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 1513 } 1514 } 1515 1516 /* 1517 * Drain a given TX queue (could be Beacon or Data) 1518 * 1519 * This assumes output has been stopped and 1520 * we do not need to block ath_tx_tasklet. 1521 */ 1522 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) 1523 { 1524 ath_txq_lock(sc, txq); 1525 1526 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1527 int idx = txq->txq_tailidx; 1528 1529 while (!list_empty(&txq->txq_fifo[idx])) { 1530 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx], 1531 retry_tx); 1532 1533 INCR(idx, ATH_TXFIFO_DEPTH); 1534 } 1535 txq->txq_tailidx = idx; 1536 } 1537 1538 txq->axq_link = NULL; 1539 txq->axq_tx_inprogress = false; 1540 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx); 1541 1542 /* flush any pending frames if aggregation is enabled */ 1543 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx) 1544 ath_txq_drain_pending_buffers(sc, txq); 1545 1546 ath_txq_unlock_complete(sc, txq); 1547 } 1548 1549 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1550 { 1551 struct ath_hw *ah = sc->sc_ah; 1552 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1553 struct ath_txq *txq; 1554 int i; 1555 u32 npend = 0; 1556 1557 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) 1558 return true; 1559 1560 ath9k_hw_abort_tx_dma(ah); 1561 1562 /* Check if any queue remains active */ 1563 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1564 if (!ATH_TXQ_SETUP(sc, i)) 1565 continue; 1566 1567 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum)) 1568 npend |= BIT(i); 1569 } 1570 1571 if (npend) 1572 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend); 1573 1574 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1575 if (!ATH_TXQ_SETUP(sc, i)) 1576 continue; 1577 1578 /* 1579 * The caller will resume queues with ieee80211_wake_queues. 1580 * Mark the queue as not stopped to prevent ath_tx_complete 1581 * from waking the queue too early. 1582 */ 1583 txq = &sc->tx.txq[i]; 1584 txq->stopped = false; 1585 ath_draintxq(sc, txq, retry_tx); 1586 } 1587 1588 return !npend; 1589 } 1590 1591 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1592 { 1593 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1594 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1595 } 1596 1597 /* For each axq_acq entry, for each tid, try to schedule packets 1598 * for transmit until ampdu_depth has reached min Q depth. 1599 */ 1600 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1601 { 1602 struct ath_atx_ac *ac, *ac_tmp, *last_ac; 1603 struct ath_atx_tid *tid, *last_tid; 1604 1605 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) || 1606 list_empty(&txq->axq_acq) || 1607 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1608 return; 1609 1610 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1611 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1612 1613 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1614 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); 1615 list_del(&ac->list); 1616 ac->sched = false; 1617 1618 while (!list_empty(&ac->tid_q)) { 1619 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, 1620 list); 1621 list_del(&tid->list); 1622 tid->sched = false; 1623 1624 if (tid->paused) 1625 continue; 1626 1627 ath_tx_sched_aggr(sc, txq, tid); 1628 1629 /* 1630 * add tid to round-robin queue if more frames 1631 * are pending for the tid 1632 */ 1633 if (!skb_queue_empty(&tid->buf_q)) 1634 ath_tx_queue_tid(txq, tid); 1635 1636 if (tid == last_tid || 1637 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1638 break; 1639 } 1640 1641 if (!list_empty(&ac->tid_q) && !ac->sched) { 1642 ac->sched = true; 1643 list_add_tail(&ac->list, &txq->axq_acq); 1644 } 1645 1646 if (ac == last_ac || 1647 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1648 return; 1649 } 1650 } 1651 1652 /***********/ 1653 /* TX, DMA */ 1654 /***********/ 1655 1656 /* 1657 * Insert a chain of ath_buf (descriptors) on a txq and 1658 * assume the descriptors are already chained together by caller. 1659 */ 1660 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1661 struct list_head *head, bool internal) 1662 { 1663 struct ath_hw *ah = sc->sc_ah; 1664 struct ath_common *common = ath9k_hw_common(ah); 1665 struct ath_buf *bf, *bf_last; 1666 bool puttxbuf = false; 1667 bool edma; 1668 1669 /* 1670 * Insert the frame on the outbound list and 1671 * pass it on to the hardware. 1672 */ 1673 1674 if (list_empty(head)) 1675 return; 1676 1677 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1678 bf = list_first_entry(head, struct ath_buf, list); 1679 bf_last = list_entry(head->prev, struct ath_buf, list); 1680 1681 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n", 1682 txq->axq_qnum, txq->axq_depth); 1683 1684 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) { 1685 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]); 1686 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1687 puttxbuf = true; 1688 } else { 1689 list_splice_tail_init(head, &txq->axq_q); 1690 1691 if (txq->axq_link) { 1692 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr); 1693 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n", 1694 txq->axq_qnum, txq->axq_link, 1695 ito64(bf->bf_daddr), bf->bf_desc); 1696 } else if (!edma) 1697 puttxbuf = true; 1698 1699 txq->axq_link = bf_last->bf_desc; 1700 } 1701 1702 if (puttxbuf) { 1703 TX_STAT_INC(txq->axq_qnum, puttxbuf); 1704 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1705 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n", 1706 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1707 } 1708 1709 if (!edma) { 1710 TX_STAT_INC(txq->axq_qnum, txstart); 1711 ath9k_hw_txstart(ah, txq->axq_qnum); 1712 } 1713 1714 if (!internal) { 1715 txq->axq_depth++; 1716 if (bf_is_ampdu_not_probing(bf)) 1717 txq->axq_ampdu_depth++; 1718 } 1719 } 1720 1721 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1722 struct sk_buff *skb, struct ath_tx_control *txctl) 1723 { 1724 struct ath_frame_info *fi = get_frame_info(skb); 1725 struct list_head bf_head; 1726 struct ath_buf *bf; 1727 1728 /* 1729 * Do not queue to h/w when any of the following conditions is true: 1730 * - there are pending frames in software queue 1731 * - the TID is currently paused for ADDBA/BAR request 1732 * - seqno is not within block-ack window 1733 * - h/w queue depth exceeds low water mark 1734 */ 1735 if (!skb_queue_empty(&tid->buf_q) || tid->paused || 1736 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || 1737 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { 1738 /* 1739 * Add this frame to software queue for scheduling later 1740 * for aggregation. 1741 */ 1742 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); 1743 __skb_queue_tail(&tid->buf_q, skb); 1744 if (!txctl->an || !txctl->an->sleeping) 1745 ath_tx_queue_tid(txctl->txq, tid); 1746 return; 1747 } 1748 1749 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1750 if (!bf) { 1751 ieee80211_free_txskb(sc->hw, skb); 1752 return; 1753 } 1754 1755 bf->bf_state.bf_type = BUF_AMPDU; 1756 INIT_LIST_HEAD(&bf_head); 1757 list_add(&bf->list, &bf_head); 1758 1759 /* Add sub-frame to BAW */ 1760 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 1761 1762 /* Queue to h/w without aggregation */ 1763 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); 1764 bf->bf_lastbf = bf; 1765 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen); 1766 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false); 1767 } 1768 1769 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1770 struct ath_atx_tid *tid, struct sk_buff *skb) 1771 { 1772 struct ath_frame_info *fi = get_frame_info(skb); 1773 struct list_head bf_head; 1774 struct ath_buf *bf; 1775 1776 bf = fi->bf; 1777 1778 INIT_LIST_HEAD(&bf_head); 1779 list_add_tail(&bf->list, &bf_head); 1780 bf->bf_state.bf_type = 0; 1781 1782 bf->bf_next = NULL; 1783 bf->bf_lastbf = bf; 1784 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1785 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1786 TX_STAT_INC(txq->axq_qnum, queued); 1787 } 1788 1789 static void setup_frame_info(struct ieee80211_hw *hw, 1790 struct ieee80211_sta *sta, 1791 struct sk_buff *skb, 1792 int framelen) 1793 { 1794 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1795 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1796 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1797 const struct ieee80211_rate *rate; 1798 struct ath_frame_info *fi = get_frame_info(skb); 1799 struct ath_node *an = NULL; 1800 enum ath9k_key_type keytype; 1801 bool short_preamble = false; 1802 1803 /* 1804 * We check if Short Preamble is needed for the CTS rate by 1805 * checking the BSS's global flag. 1806 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1807 */ 1808 if (tx_info->control.vif && 1809 tx_info->control.vif->bss_conf.use_short_preamble) 1810 short_preamble = true; 1811 1812 rate = ieee80211_get_rts_cts_rate(hw, tx_info); 1813 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1814 1815 if (sta) 1816 an = (struct ath_node *) sta->drv_priv; 1817 1818 memset(fi, 0, sizeof(*fi)); 1819 if (hw_key) 1820 fi->keyix = hw_key->hw_key_idx; 1821 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0) 1822 fi->keyix = an->ps_key; 1823 else 1824 fi->keyix = ATH9K_TXKEYIX_INVALID; 1825 fi->keytype = keytype; 1826 fi->framelen = framelen; 1827 fi->rtscts_rate = rate->hw_value; 1828 if (short_preamble) 1829 fi->rtscts_rate |= rate->hw_value_short; 1830 } 1831 1832 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) 1833 { 1834 struct ath_hw *ah = sc->sc_ah; 1835 struct ath9k_channel *curchan = ah->curchan; 1836 1837 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && 1838 (curchan->channelFlags & CHANNEL_5GHZ) && 1839 (chainmask == 0x7) && (rate < 0x90)) 1840 return 0x3; 1841 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) && 1842 IS_CCK_RATE(rate)) 1843 return 0x2; 1844 else 1845 return chainmask; 1846 } 1847 1848 /* 1849 * Assign a descriptor (and sequence number if necessary, 1850 * and map buffer for DMA. Frees skb on error 1851 */ 1852 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1853 struct ath_txq *txq, 1854 struct ath_atx_tid *tid, 1855 struct sk_buff *skb) 1856 { 1857 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1858 struct ath_frame_info *fi = get_frame_info(skb); 1859 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1860 struct ath_buf *bf; 1861 int fragno; 1862 u16 seqno; 1863 1864 bf = ath_tx_get_buffer(sc); 1865 if (!bf) { 1866 ath_dbg(common, XMIT, "TX buffers are full\n"); 1867 return NULL; 1868 } 1869 1870 ATH_TXBUF_RESET(bf); 1871 1872 if (tid) { 1873 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1874 seqno = tid->seq_next; 1875 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1876 1877 if (fragno) 1878 hdr->seq_ctrl |= cpu_to_le16(fragno); 1879 1880 if (!ieee80211_has_morefrags(hdr->frame_control)) 1881 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1882 1883 bf->bf_state.seqno = seqno; 1884 } 1885 1886 bf->bf_mpdu = skb; 1887 1888 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1889 skb->len, DMA_TO_DEVICE); 1890 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 1891 bf->bf_mpdu = NULL; 1892 bf->bf_buf_addr = 0; 1893 ath_err(ath9k_hw_common(sc->sc_ah), 1894 "dma_mapping_error() on TX\n"); 1895 ath_tx_return_buffer(sc, bf); 1896 return NULL; 1897 } 1898 1899 fi->bf = bf; 1900 1901 return bf; 1902 } 1903 1904 /* FIXME: tx power */ 1905 static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, 1906 struct ath_tx_control *txctl) 1907 { 1908 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1909 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1910 struct ath_atx_tid *tid = NULL; 1911 struct ath_buf *bf; 1912 u8 tidno; 1913 1914 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an && 1915 ieee80211_is_data_qos(hdr->frame_control)) { 1916 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1917 IEEE80211_QOS_CTL_TID_MASK; 1918 tid = ATH_AN_2_TID(txctl->an, tidno); 1919 1920 WARN_ON(tid->ac->txq != txctl->txq); 1921 } 1922 1923 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { 1924 /* 1925 * Try aggregation if it's a unicast data frame 1926 * and the destination is HT capable. 1927 */ 1928 ath_tx_send_ampdu(sc, tid, skb, txctl); 1929 } else { 1930 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1931 if (!bf) { 1932 if (txctl->paprd) 1933 dev_kfree_skb_any(skb); 1934 else 1935 ieee80211_free_txskb(sc->hw, skb); 1936 return; 1937 } 1938 1939 bf->bf_state.bfs_paprd = txctl->paprd; 1940 1941 if (txctl->paprd) 1942 bf->bf_state.bfs_paprd_timestamp = jiffies; 1943 1944 ath_tx_send_normal(sc, txctl->txq, tid, skb); 1945 } 1946 } 1947 1948 /* Upon failure caller should free skb */ 1949 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1950 struct ath_tx_control *txctl) 1951 { 1952 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1953 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1954 struct ieee80211_sta *sta = txctl->sta; 1955 struct ieee80211_vif *vif = info->control.vif; 1956 struct ath_softc *sc = hw->priv; 1957 struct ath_txq *txq = txctl->txq; 1958 int padpos, padsize; 1959 int frmlen = skb->len + FCS_LEN; 1960 int q; 1961 1962 /* NOTE: sta can be NULL according to net/mac80211.h */ 1963 if (sta) 1964 txctl->an = (struct ath_node *)sta->drv_priv; 1965 1966 if (info->control.hw_key) 1967 frmlen += info->control.hw_key->icv_len; 1968 1969 /* 1970 * As a temporary workaround, assign seq# here; this will likely need 1971 * to be cleaned up to work better with Beacon transmission and virtual 1972 * BSSes. 1973 */ 1974 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1975 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1976 sc->tx.seq_no += 0x10; 1977 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1978 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1979 } 1980 1981 /* Add the padding after the header if this is not already done */ 1982 padpos = ath9k_cmn_padpos(hdr->frame_control); 1983 padsize = padpos & 3; 1984 if (padsize && skb->len > padpos) { 1985 if (skb_headroom(skb) < padsize) 1986 return -ENOMEM; 1987 1988 skb_push(skb, padsize); 1989 memmove(skb->data, skb->data + padsize, padpos); 1990 hdr = (struct ieee80211_hdr *) skb->data; 1991 } 1992 1993 if ((vif && vif->type != NL80211_IFTYPE_AP && 1994 vif->type != NL80211_IFTYPE_AP_VLAN) || 1995 !ieee80211_is_data(hdr->frame_control)) 1996 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1997 1998 setup_frame_info(hw, sta, skb, frmlen); 1999 2000 /* 2001 * At this point, the vif, hw_key and sta pointers in the tx control 2002 * info are no longer valid (overwritten by the ath_frame_info data. 2003 */ 2004 2005 q = skb_get_queue_mapping(skb); 2006 2007 ath_txq_lock(sc, txq); 2008 if (txq == sc->tx.txq_map[q] && 2009 ++txq->pending_frames > sc->tx.txq_max_pending[q] && 2010 !txq->stopped) { 2011 ieee80211_stop_queue(sc->hw, q); 2012 txq->stopped = true; 2013 } 2014 2015 ath_tx_start_dma(sc, skb, txctl); 2016 2017 ath_txq_unlock(sc, txq); 2018 2019 return 0; 2020 } 2021 2022 /*****************/ 2023 /* TX Completion */ 2024 /*****************/ 2025 2026 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 2027 int tx_flags, struct ath_txq *txq) 2028 { 2029 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2030 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2031 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 2032 int q, padpos, padsize; 2033 unsigned long flags; 2034 2035 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2036 2037 if (sc->sc_ah->caldata) 2038 sc->sc_ah->caldata->paprd_packet_sent = true; 2039 2040 if (!(tx_flags & ATH_TX_ERROR)) 2041 /* Frame was ACKed */ 2042 tx_info->flags |= IEEE80211_TX_STAT_ACK; 2043 2044 padpos = ath9k_cmn_padpos(hdr->frame_control); 2045 padsize = padpos & 3; 2046 if (padsize && skb->len>padpos+padsize) { 2047 /* 2048 * Remove MAC header padding before giving the frame back to 2049 * mac80211. 2050 */ 2051 memmove(skb->data + padsize, skb->data, padpos); 2052 skb_pull(skb, padsize); 2053 } 2054 2055 spin_lock_irqsave(&sc->sc_pm_lock, flags); 2056 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { 2057 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 2058 ath_dbg(common, PS, 2059 "Going back to sleep after having received TX status (0x%lx)\n", 2060 sc->ps_flags & (PS_WAIT_FOR_BEACON | 2061 PS_WAIT_FOR_CAB | 2062 PS_WAIT_FOR_PSPOLL_DATA | 2063 PS_WAIT_FOR_TX_ACK)); 2064 } 2065 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 2066 2067 q = skb_get_queue_mapping(skb); 2068 if (txq == sc->tx.txq_map[q]) { 2069 if (WARN_ON(--txq->pending_frames < 0)) 2070 txq->pending_frames = 0; 2071 2072 if (txq->stopped && 2073 txq->pending_frames < sc->tx.txq_max_pending[q]) { 2074 ieee80211_wake_queue(sc->hw, q); 2075 txq->stopped = false; 2076 } 2077 } 2078 2079 __skb_queue_tail(&txq->complete_q, skb); 2080 } 2081 2082 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 2083 struct ath_txq *txq, struct list_head *bf_q, 2084 struct ath_tx_status *ts, int txok) 2085 { 2086 struct sk_buff *skb = bf->bf_mpdu; 2087 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2088 unsigned long flags; 2089 int tx_flags = 0; 2090 2091 if (!txok) 2092 tx_flags |= ATH_TX_ERROR; 2093 2094 if (ts->ts_status & ATH9K_TXERR_FILT) 2095 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2096 2097 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); 2098 bf->bf_buf_addr = 0; 2099 2100 if (bf->bf_state.bfs_paprd) { 2101 if (time_after(jiffies, 2102 bf->bf_state.bfs_paprd_timestamp + 2103 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 2104 dev_kfree_skb_any(skb); 2105 else 2106 complete(&sc->paprd_complete); 2107 } else { 2108 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags); 2109 ath_tx_complete(sc, skb, tx_flags, txq); 2110 } 2111 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 2112 * accidentally reference it later. 2113 */ 2114 bf->bf_mpdu = NULL; 2115 2116 /* 2117 * Return the list of ath_buf of this mpdu to free queue 2118 */ 2119 spin_lock_irqsave(&sc->tx.txbuflock, flags); 2120 list_splice_tail_init(bf_q, &sc->tx.txbuf); 2121 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 2122 } 2123 2124 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 2125 struct ath_tx_status *ts, int nframes, int nbad, 2126 int txok) 2127 { 2128 struct sk_buff *skb = bf->bf_mpdu; 2129 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2130 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2131 struct ieee80211_hw *hw = sc->hw; 2132 struct ath_hw *ah = sc->sc_ah; 2133 u8 i, tx_rateindex; 2134 2135 if (txok) 2136 tx_info->status.ack_signal = ts->ts_rssi; 2137 2138 tx_rateindex = ts->ts_rateindex; 2139 WARN_ON(tx_rateindex >= hw->max_rates); 2140 2141 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 2142 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2143 2144 BUG_ON(nbad > nframes); 2145 } 2146 tx_info->status.ampdu_len = nframes; 2147 tx_info->status.ampdu_ack_len = nframes - nbad; 2148 2149 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2150 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { 2151 /* 2152 * If an underrun error is seen assume it as an excessive 2153 * retry only if max frame trigger level has been reached 2154 * (2 KB for single stream, and 4 KB for dual stream). 2155 * Adjust the long retry as if the frame was tried 2156 * hw->max_rate_tries times to affect how rate control updates 2157 * PER for the failed rate. 2158 * In case of congestion on the bus penalizing this type of 2159 * underruns should help hardware actually transmit new frames 2160 * successfully by eventually preferring slower rates. 2161 * This itself should also alleviate congestion on the bus. 2162 */ 2163 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | 2164 ATH9K_TX_DELIM_UNDERRUN)) && 2165 ieee80211_is_data(hdr->frame_control) && 2166 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level) 2167 tx_info->status.rates[tx_rateindex].count = 2168 hw->max_rate_tries; 2169 } 2170 2171 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2172 tx_info->status.rates[i].count = 0; 2173 tx_info->status.rates[i].idx = -1; 2174 } 2175 2176 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2177 } 2178 2179 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, 2180 struct ath_tx_status *ts, struct ath_buf *bf, 2181 struct list_head *bf_head) 2182 { 2183 int txok; 2184 2185 txq->axq_depth--; 2186 txok = !(ts->ts_status & ATH9K_TXERR_MASK); 2187 txq->axq_tx_inprogress = false; 2188 if (bf_is_ampdu_not_probing(bf)) 2189 txq->axq_ampdu_depth--; 2190 2191 if (!bf_isampdu(bf)) { 2192 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); 2193 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); 2194 } else 2195 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); 2196 2197 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2198 ath_txq_schedule(sc, txq); 2199 } 2200 2201 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2202 { 2203 struct ath_hw *ah = sc->sc_ah; 2204 struct ath_common *common = ath9k_hw_common(ah); 2205 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2206 struct list_head bf_head; 2207 struct ath_desc *ds; 2208 struct ath_tx_status ts; 2209 int status; 2210 2211 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n", 2212 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2213 txq->axq_link); 2214 2215 ath_txq_lock(sc, txq); 2216 for (;;) { 2217 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2218 break; 2219 2220 if (list_empty(&txq->axq_q)) { 2221 txq->axq_link = NULL; 2222 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2223 ath_txq_schedule(sc, txq); 2224 break; 2225 } 2226 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2227 2228 /* 2229 * There is a race condition that a BH gets scheduled 2230 * after sw writes TxE and before hw re-load the last 2231 * descriptor to get the newly chained one. 2232 * Software must keep the last DONE descriptor as a 2233 * holding descriptor - software does so by marking 2234 * it with the STALE flag. 2235 */ 2236 bf_held = NULL; 2237 if (bf->bf_stale) { 2238 bf_held = bf; 2239 if (list_is_last(&bf_held->list, &txq->axq_q)) 2240 break; 2241 2242 bf = list_entry(bf_held->list.next, struct ath_buf, 2243 list); 2244 } 2245 2246 lastbf = bf->bf_lastbf; 2247 ds = lastbf->bf_desc; 2248 2249 memset(&ts, 0, sizeof(ts)); 2250 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2251 if (status == -EINPROGRESS) 2252 break; 2253 2254 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2255 2256 /* 2257 * Remove ath_buf's of the same transmit unit from txq, 2258 * however leave the last descriptor back as the holding 2259 * descriptor for hw. 2260 */ 2261 lastbf->bf_stale = true; 2262 INIT_LIST_HEAD(&bf_head); 2263 if (!list_is_singular(&lastbf->list)) 2264 list_cut_position(&bf_head, 2265 &txq->axq_q, lastbf->list.prev); 2266 2267 if (bf_held) { 2268 list_del(&bf_held->list); 2269 ath_tx_return_buffer(sc, bf_held); 2270 } 2271 2272 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2273 } 2274 ath_txq_unlock_complete(sc, txq); 2275 } 2276 2277 void ath_tx_tasklet(struct ath_softc *sc) 2278 { 2279 struct ath_hw *ah = sc->sc_ah; 2280 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs; 2281 int i; 2282 2283 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2284 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2285 ath_tx_processq(sc, &sc->tx.txq[i]); 2286 } 2287 } 2288 2289 void ath_tx_edma_tasklet(struct ath_softc *sc) 2290 { 2291 struct ath_tx_status ts; 2292 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2293 struct ath_hw *ah = sc->sc_ah; 2294 struct ath_txq *txq; 2295 struct ath_buf *bf, *lastbf; 2296 struct list_head bf_head; 2297 int status; 2298 2299 for (;;) { 2300 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2301 break; 2302 2303 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts); 2304 if (status == -EINPROGRESS) 2305 break; 2306 if (status == -EIO) { 2307 ath_dbg(common, XMIT, "Error processing tx status\n"); 2308 break; 2309 } 2310 2311 /* Process beacon completions separately */ 2312 if (ts.qid == sc->beacon.beaconq) { 2313 sc->beacon.tx_processed = true; 2314 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); 2315 continue; 2316 } 2317 2318 txq = &sc->tx.txq[ts.qid]; 2319 2320 ath_txq_lock(sc, txq); 2321 2322 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2323 ath_txq_unlock(sc, txq); 2324 return; 2325 } 2326 2327 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 2328 struct ath_buf, list); 2329 lastbf = bf->bf_lastbf; 2330 2331 INIT_LIST_HEAD(&bf_head); 2332 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx], 2333 &lastbf->list); 2334 2335 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2336 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2337 2338 if (!list_empty(&txq->axq_q)) { 2339 struct list_head bf_q; 2340 2341 INIT_LIST_HEAD(&bf_q); 2342 txq->axq_link = NULL; 2343 list_splice_tail_init(&txq->axq_q, &bf_q); 2344 ath_tx_txqaddbuf(sc, txq, &bf_q, true); 2345 } 2346 } 2347 2348 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2349 ath_txq_unlock_complete(sc, txq); 2350 } 2351 } 2352 2353 /*****************/ 2354 /* Init, Cleanup */ 2355 /*****************/ 2356 2357 static int ath_txstatus_setup(struct ath_softc *sc, int size) 2358 { 2359 struct ath_descdma *dd = &sc->txsdma; 2360 u8 txs_len = sc->sc_ah->caps.txs_len; 2361 2362 dd->dd_desc_len = size * txs_len; 2363 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 2364 &dd->dd_desc_paddr, GFP_KERNEL); 2365 if (!dd->dd_desc) 2366 return -ENOMEM; 2367 2368 return 0; 2369 } 2370 2371 static int ath_tx_edma_init(struct ath_softc *sc) 2372 { 2373 int err; 2374 2375 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2376 if (!err) 2377 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2378 sc->txsdma.dd_desc_paddr, 2379 ATH_TXSTATUS_RING_SIZE); 2380 2381 return err; 2382 } 2383 2384 static void ath_tx_edma_cleanup(struct ath_softc *sc) 2385 { 2386 struct ath_descdma *dd = &sc->txsdma; 2387 2388 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 2389 dd->dd_desc_paddr); 2390 } 2391 2392 int ath_tx_init(struct ath_softc *sc, int nbufs) 2393 { 2394 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2395 int error = 0; 2396 2397 spin_lock_init(&sc->tx.txbuflock); 2398 2399 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2400 "tx", nbufs, 1, 1); 2401 if (error != 0) { 2402 ath_err(common, 2403 "Failed to allocate tx descriptors: %d\n", error); 2404 goto err; 2405 } 2406 2407 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2408 "beacon", ATH_BCBUF, 1, 1); 2409 if (error != 0) { 2410 ath_err(common, 2411 "Failed to allocate beacon descriptors: %d\n", error); 2412 goto err; 2413 } 2414 2415 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2416 2417 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2418 error = ath_tx_edma_init(sc); 2419 if (error) 2420 goto err; 2421 } 2422 2423 err: 2424 if (error != 0) 2425 ath_tx_cleanup(sc); 2426 2427 return error; 2428 } 2429 2430 void ath_tx_cleanup(struct ath_softc *sc) 2431 { 2432 if (sc->beacon.bdma.dd_desc_len != 0) 2433 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); 2434 2435 if (sc->tx.txdma.dd_desc_len != 0) 2436 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2437 2438 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2439 ath_tx_edma_cleanup(sc); 2440 } 2441 2442 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2443 { 2444 struct ath_atx_tid *tid; 2445 struct ath_atx_ac *ac; 2446 int tidno, acno; 2447 2448 for (tidno = 0, tid = &an->tid[tidno]; 2449 tidno < WME_NUM_TID; 2450 tidno++, tid++) { 2451 tid->an = an; 2452 tid->tidno = tidno; 2453 tid->seq_start = tid->seq_next = 0; 2454 tid->baw_size = WME_MAX_BA; 2455 tid->baw_head = tid->baw_tail = 0; 2456 tid->sched = false; 2457 tid->paused = false; 2458 tid->state &= ~AGGR_CLEANUP; 2459 __skb_queue_head_init(&tid->buf_q); 2460 acno = TID_TO_WME_AC(tidno); 2461 tid->ac = &an->ac[acno]; 2462 tid->state &= ~AGGR_ADDBA_COMPLETE; 2463 tid->state &= ~AGGR_ADDBA_PROGRESS; 2464 } 2465 2466 for (acno = 0, ac = &an->ac[acno]; 2467 acno < WME_NUM_AC; acno++, ac++) { 2468 ac->sched = false; 2469 ac->txq = sc->tx.txq_map[acno]; 2470 INIT_LIST_HEAD(&ac->tid_q); 2471 } 2472 } 2473 2474 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2475 { 2476 struct ath_atx_ac *ac; 2477 struct ath_atx_tid *tid; 2478 struct ath_txq *txq; 2479 int tidno; 2480 2481 for (tidno = 0, tid = &an->tid[tidno]; 2482 tidno < WME_NUM_TID; tidno++, tid++) { 2483 2484 ac = tid->ac; 2485 txq = ac->txq; 2486 2487 ath_txq_lock(sc, txq); 2488 2489 if (tid->sched) { 2490 list_del(&tid->list); 2491 tid->sched = false; 2492 } 2493 2494 if (ac->sched) { 2495 list_del(&ac->list); 2496 tid->ac->sched = false; 2497 } 2498 2499 ath_tid_drain(sc, txq, tid); 2500 tid->state &= ~AGGR_ADDBA_COMPLETE; 2501 tid->state &= ~AGGR_CLEANUP; 2502 2503 ath_txq_unlock(sc, txq); 2504 } 2505 } 2506