1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "ath9k.h" 18 19 #define BITS_PER_BYTE 8 20 #define OFDM_PLCP_BITS 22 21 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f) 22 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 23 #define L_STF 8 24 #define L_LTF 8 25 #define L_SIG 4 26 #define HT_SIG 8 27 #define HT_STF 4 28 #define HT_LTF(_ns) (4 * (_ns)) 29 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 30 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 31 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 32 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 33 34 #define OFDM_SIFS_TIME 16 35 36 static u32 bits_per_symbol[][2] = { 37 /* 20MHz 40MHz */ 38 { 26, 54 }, /* 0: BPSK */ 39 { 52, 108 }, /* 1: QPSK 1/2 */ 40 { 78, 162 }, /* 2: QPSK 3/4 */ 41 { 104, 216 }, /* 3: 16-QAM 1/2 */ 42 { 156, 324 }, /* 4: 16-QAM 3/4 */ 43 { 208, 432 }, /* 5: 64-QAM 2/3 */ 44 { 234, 486 }, /* 6: 64-QAM 3/4 */ 45 { 260, 540 }, /* 7: 64-QAM 5/6 */ 46 { 52, 108 }, /* 8: BPSK */ 47 { 104, 216 }, /* 9: QPSK 1/2 */ 48 { 156, 324 }, /* 10: QPSK 3/4 */ 49 { 208, 432 }, /* 11: 16-QAM 1/2 */ 50 { 312, 648 }, /* 12: 16-QAM 3/4 */ 51 { 416, 864 }, /* 13: 64-QAM 2/3 */ 52 { 468, 972 }, /* 14: 64-QAM 3/4 */ 53 { 520, 1080 }, /* 15: 64-QAM 5/6 */ 54 }; 55 56 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 57 58 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 59 struct ath_atx_tid *tid, 60 struct list_head *bf_head); 61 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 62 struct ath_txq *txq, 63 struct list_head *bf_q, 64 int txok, int sendbar); 65 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 66 struct list_head *head); 67 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 68 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 69 int txok); 70 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, 71 int nbad, int txok, bool update_rc); 72 73 enum { 74 MCS_DEFAULT, 75 MCS_HT40, 76 MCS_HT40_SGI, 77 }; 78 79 static int ath_max_4ms_framelen[3][16] = { 80 [MCS_DEFAULT] = { 81 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180, 82 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320, 83 }, 84 [MCS_HT40] = { 85 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840, 86 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600, 87 }, 88 [MCS_HT40_SGI] = { 89 /* TODO: Only MCS 7 and 15 updated, recalculate the rest */ 90 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200, 91 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400, 92 } 93 }; 94 95 96 /*********************/ 97 /* Aggregation logic */ 98 /*********************/ 99 100 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 101 { 102 struct ath_atx_ac *ac = tid->ac; 103 104 if (tid->paused) 105 return; 106 107 if (tid->sched) 108 return; 109 110 tid->sched = true; 111 list_add_tail(&tid->list, &ac->tid_q); 112 113 if (ac->sched) 114 return; 115 116 ac->sched = true; 117 list_add_tail(&ac->list, &txq->axq_acq); 118 } 119 120 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 121 { 122 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 123 124 spin_lock_bh(&txq->axq_lock); 125 tid->paused++; 126 spin_unlock_bh(&txq->axq_lock); 127 } 128 129 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 130 { 131 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 132 133 BUG_ON(tid->paused <= 0); 134 spin_lock_bh(&txq->axq_lock); 135 136 tid->paused--; 137 138 if (tid->paused > 0) 139 goto unlock; 140 141 if (list_empty(&tid->buf_q)) 142 goto unlock; 143 144 ath_tx_queue_tid(txq, tid); 145 ath_txq_schedule(sc, txq); 146 unlock: 147 spin_unlock_bh(&txq->axq_lock); 148 } 149 150 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 151 { 152 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 153 struct ath_buf *bf; 154 struct list_head bf_head; 155 INIT_LIST_HEAD(&bf_head); 156 157 BUG_ON(tid->paused <= 0); 158 spin_lock_bh(&txq->axq_lock); 159 160 tid->paused--; 161 162 if (tid->paused > 0) { 163 spin_unlock_bh(&txq->axq_lock); 164 return; 165 } 166 167 while (!list_empty(&tid->buf_q)) { 168 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 169 BUG_ON(bf_isretried(bf)); 170 list_move_tail(&bf->list, &bf_head); 171 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 172 } 173 174 spin_unlock_bh(&txq->axq_lock); 175 } 176 177 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 178 int seqno) 179 { 180 int index, cindex; 181 182 index = ATH_BA_INDEX(tid->seq_start, seqno); 183 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 184 185 tid->tx_buf[cindex] = NULL; 186 187 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { 188 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 189 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 190 } 191 } 192 193 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 194 struct ath_buf *bf) 195 { 196 int index, cindex; 197 198 if (bf_isretried(bf)) 199 return; 200 201 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 203 204 BUG_ON(tid->tx_buf[cindex] != NULL); 205 tid->tx_buf[cindex] = bf; 206 207 if (index >= ((tid->baw_tail - tid->baw_head) & 208 (ATH_TID_MAX_BUFS - 1))) { 209 tid->baw_tail = cindex; 210 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 211 } 212 } 213 214 /* 215 * TODO: For frame(s) that are in the retry state, we will reuse the 216 * sequence number(s) without setting the retry bit. The 217 * alternative is to give up on these and BAR the receiver's window 218 * forward. 219 */ 220 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 221 struct ath_atx_tid *tid) 222 223 { 224 struct ath_buf *bf; 225 struct list_head bf_head; 226 INIT_LIST_HEAD(&bf_head); 227 228 for (;;) { 229 if (list_empty(&tid->buf_q)) 230 break; 231 232 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 233 list_move_tail(&bf->list, &bf_head); 234 235 if (bf_isretried(bf)) 236 ath_tx_update_baw(sc, tid, bf->bf_seqno); 237 238 spin_unlock(&txq->axq_lock); 239 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 240 spin_lock(&txq->axq_lock); 241 } 242 243 tid->seq_next = tid->seq_start; 244 tid->baw_tail = tid->baw_head; 245 } 246 247 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 248 struct ath_buf *bf) 249 { 250 struct sk_buff *skb; 251 struct ieee80211_hdr *hdr; 252 253 bf->bf_state.bf_type |= BUF_RETRY; 254 bf->bf_retries++; 255 TX_STAT_INC(txq->axq_qnum, a_retries); 256 257 skb = bf->bf_mpdu; 258 hdr = (struct ieee80211_hdr *)skb->data; 259 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 260 } 261 262 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 263 { 264 struct ath_buf *tbf; 265 266 spin_lock_bh(&sc->tx.txbuflock); 267 if (WARN_ON(list_empty(&sc->tx.txbuf))) { 268 spin_unlock_bh(&sc->tx.txbuflock); 269 return NULL; 270 } 271 tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 272 list_del(&tbf->list); 273 spin_unlock_bh(&sc->tx.txbuflock); 274 275 ATH_TXBUF_RESET(tbf); 276 277 tbf->aphy = bf->aphy; 278 tbf->bf_mpdu = bf->bf_mpdu; 279 tbf->bf_buf_addr = bf->bf_buf_addr; 280 *(tbf->bf_desc) = *(bf->bf_desc); 281 tbf->bf_state = bf->bf_state; 282 tbf->bf_dmacontext = bf->bf_dmacontext; 283 284 return tbf; 285 } 286 287 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 288 struct ath_buf *bf, struct list_head *bf_q, 289 int txok) 290 { 291 struct ath_node *an = NULL; 292 struct sk_buff *skb; 293 struct ieee80211_sta *sta; 294 struct ieee80211_hw *hw; 295 struct ieee80211_hdr *hdr; 296 struct ieee80211_tx_info *tx_info; 297 struct ath_atx_tid *tid = NULL; 298 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 299 struct ath_desc *ds = bf_last->bf_desc; 300 struct list_head bf_head, bf_pending; 301 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 302 u32 ba[WME_BA_BMP_SIZE >> 5]; 303 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 304 bool rc_update = true; 305 306 skb = bf->bf_mpdu; 307 hdr = (struct ieee80211_hdr *)skb->data; 308 309 tx_info = IEEE80211_SKB_CB(skb); 310 hw = bf->aphy->hw; 311 312 rcu_read_lock(); 313 314 /* XXX: use ieee80211_find_sta! */ 315 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1); 316 if (!sta) { 317 rcu_read_unlock(); 318 return; 319 } 320 321 an = (struct ath_node *)sta->drv_priv; 322 tid = ATH_AN_2_TID(an, bf->bf_tidno); 323 324 isaggr = bf_isaggr(bf); 325 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 326 327 if (isaggr && txok) { 328 if (ATH_DS_TX_BA(ds)) { 329 seq_st = ATH_DS_BA_SEQ(ds); 330 memcpy(ba, ATH_DS_BA_BITMAP(ds), 331 WME_BA_BMP_SIZE >> 3); 332 } else { 333 /* 334 * AR5416 can become deaf/mute when BA 335 * issue happens. Chip needs to be reset. 336 * But AP code may have sychronization issues 337 * when perform internal reset in this routine. 338 * Only enable reset in STA mode for now. 339 */ 340 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 341 needreset = 1; 342 } 343 } 344 345 INIT_LIST_HEAD(&bf_pending); 346 INIT_LIST_HEAD(&bf_head); 347 348 nbad = ath_tx_num_badfrms(sc, bf, txok); 349 while (bf) { 350 txfail = txpending = 0; 351 bf_next = bf->bf_next; 352 353 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 354 /* transmit completion, subframe is 355 * acked by block ack */ 356 acked_cnt++; 357 } else if (!isaggr && txok) { 358 /* transmit completion */ 359 acked_cnt++; 360 } else { 361 if (!(tid->state & AGGR_CLEANUP) && 362 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { 363 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 364 ath_tx_set_retry(sc, txq, bf); 365 txpending = 1; 366 } else { 367 bf->bf_state.bf_type |= BUF_XRETRY; 368 txfail = 1; 369 sendbar = 1; 370 txfail_cnt++; 371 } 372 } else { 373 /* 374 * cleanup in progress, just fail 375 * the un-acked sub-frames 376 */ 377 txfail = 1; 378 } 379 } 380 381 if (bf_next == NULL) { 382 /* 383 * Make sure the last desc is reclaimed if it 384 * not a holding desc. 385 */ 386 if (!bf_last->bf_stale) 387 list_move_tail(&bf->list, &bf_head); 388 else 389 INIT_LIST_HEAD(&bf_head); 390 } else { 391 BUG_ON(list_empty(bf_q)); 392 list_move_tail(&bf->list, &bf_head); 393 } 394 395 if (!txpending) { 396 /* 397 * complete the acked-ones/xretried ones; update 398 * block-ack window 399 */ 400 spin_lock_bh(&txq->axq_lock); 401 ath_tx_update_baw(sc, tid, bf->bf_seqno); 402 spin_unlock_bh(&txq->axq_lock); 403 404 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 405 ath_tx_rc_status(bf, ds, nbad, txok, true); 406 rc_update = false; 407 } else { 408 ath_tx_rc_status(bf, ds, nbad, txok, false); 409 } 410 411 ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar); 412 } else { 413 /* retry the un-acked ones */ 414 if (bf->bf_next == NULL && bf_last->bf_stale) { 415 struct ath_buf *tbf; 416 417 tbf = ath_clone_txbuf(sc, bf_last); 418 /* 419 * Update tx baw and complete the frame with 420 * failed status if we run out of tx buf 421 */ 422 if (!tbf) { 423 spin_lock_bh(&txq->axq_lock); 424 ath_tx_update_baw(sc, tid, 425 bf->bf_seqno); 426 spin_unlock_bh(&txq->axq_lock); 427 428 bf->bf_state.bf_type |= BUF_XRETRY; 429 ath_tx_rc_status(bf, ds, nbad, 430 0, false); 431 ath_tx_complete_buf(sc, bf, txq, 432 &bf_head, 0, 0); 433 break; 434 } 435 436 ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc); 437 list_add_tail(&tbf->list, &bf_head); 438 } else { 439 /* 440 * Clear descriptor status words for 441 * software retry 442 */ 443 ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc); 444 } 445 446 /* 447 * Put this buffer to the temporary pending 448 * queue to retain ordering 449 */ 450 list_splice_tail_init(&bf_head, &bf_pending); 451 } 452 453 bf = bf_next; 454 } 455 456 if (tid->state & AGGR_CLEANUP) { 457 if (tid->baw_head == tid->baw_tail) { 458 tid->state &= ~AGGR_ADDBA_COMPLETE; 459 tid->state &= ~AGGR_CLEANUP; 460 461 /* send buffered frames as singles */ 462 ath_tx_flush_tid(sc, tid); 463 } 464 rcu_read_unlock(); 465 return; 466 } 467 468 /* prepend un-acked frames to the beginning of the pending frame queue */ 469 if (!list_empty(&bf_pending)) { 470 spin_lock_bh(&txq->axq_lock); 471 list_splice(&bf_pending, &tid->buf_q); 472 ath_tx_queue_tid(txq, tid); 473 spin_unlock_bh(&txq->axq_lock); 474 } 475 476 rcu_read_unlock(); 477 478 if (needreset) 479 ath_reset(sc, false); 480 } 481 482 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 483 struct ath_atx_tid *tid) 484 { 485 struct sk_buff *skb; 486 struct ieee80211_tx_info *tx_info; 487 struct ieee80211_tx_rate *rates; 488 u32 max_4ms_framelen, frmlen; 489 u16 aggr_limit, legacy = 0; 490 int i; 491 492 skb = bf->bf_mpdu; 493 tx_info = IEEE80211_SKB_CB(skb); 494 rates = tx_info->control.rates; 495 496 /* 497 * Find the lowest frame length among the rate series that will have a 498 * 4ms transmit duration. 499 * TODO - TXOP limit needs to be considered. 500 */ 501 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 502 503 for (i = 0; i < 4; i++) { 504 if (rates[i].count) { 505 int modeidx; 506 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 507 legacy = 1; 508 break; 509 } 510 511 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 512 modeidx = MCS_HT40_SGI; 513 else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 514 modeidx = MCS_HT40; 515 else 516 modeidx = MCS_DEFAULT; 517 518 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 519 max_4ms_framelen = min(max_4ms_framelen, frmlen); 520 } 521 } 522 523 /* 524 * limit aggregate size by the minimum rate if rate selected is 525 * not a probe rate, if rate selected is a probe rate then 526 * avoid aggregation of this packet. 527 */ 528 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 529 return 0; 530 531 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) 532 aggr_limit = min((max_4ms_framelen * 3) / 8, 533 (u32)ATH_AMPDU_LIMIT_MAX); 534 else 535 aggr_limit = min(max_4ms_framelen, 536 (u32)ATH_AMPDU_LIMIT_MAX); 537 538 /* 539 * h/w can accept aggregates upto 16 bit lengths (65535). 540 * The IE, however can hold upto 65536, which shows up here 541 * as zero. Ignore 65536 since we are constrained by hw. 542 */ 543 if (tid->an->maxampdu) 544 aggr_limit = min(aggr_limit, tid->an->maxampdu); 545 546 return aggr_limit; 547 } 548 549 /* 550 * Returns the number of delimiters to be added to 551 * meet the minimum required mpdudensity. 552 */ 553 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 554 struct ath_buf *bf, u16 frmlen) 555 { 556 struct sk_buff *skb = bf->bf_mpdu; 557 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 558 u32 nsymbits, nsymbols; 559 u16 minlen; 560 u8 flags, rix; 561 int width, half_gi, ndelim, mindelim; 562 563 /* Select standard number of delimiters based on frame length alone */ 564 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 565 566 /* 567 * If encryption enabled, hardware requires some more padding between 568 * subframes. 569 * TODO - this could be improved to be dependent on the rate. 570 * The hardware can keep up at lower rates, but not higher rates 571 */ 572 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) 573 ndelim += ATH_AGGR_ENCRYPTDELIM; 574 575 /* 576 * Convert desired mpdu density from microeconds to bytes based 577 * on highest rate in rate series (i.e. first rate) to determine 578 * required minimum length for subframe. Take into account 579 * whether high rate is 20 or 40Mhz and half or full GI. 580 * 581 * If there is no mpdu density restriction, no further calculation 582 * is needed. 583 */ 584 585 if (tid->an->mpdudensity == 0) 586 return ndelim; 587 588 rix = tx_info->control.rates[0].idx; 589 flags = tx_info->control.rates[0].flags; 590 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 591 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 592 593 if (half_gi) 594 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 595 else 596 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 597 598 if (nsymbols == 0) 599 nsymbols = 1; 600 601 nsymbits = bits_per_symbol[rix][width]; 602 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 603 604 if (frmlen < minlen) { 605 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 606 ndelim = max(mindelim, ndelim); 607 } 608 609 return ndelim; 610 } 611 612 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 613 struct ath_txq *txq, 614 struct ath_atx_tid *tid, 615 struct list_head *bf_q) 616 { 617 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 618 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 619 int rl = 0, nframes = 0, ndelim, prev_al = 0; 620 u16 aggr_limit = 0, al = 0, bpad = 0, 621 al_delta, h_baw = tid->baw_size / 2; 622 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 623 624 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 625 626 do { 627 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 628 629 /* do not step over block-ack window */ 630 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { 631 status = ATH_AGGR_BAW_CLOSED; 632 break; 633 } 634 635 if (!rl) { 636 aggr_limit = ath_lookup_rate(sc, bf, tid); 637 rl = 1; 638 } 639 640 /* do not exceed aggregation limit */ 641 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; 642 643 if (nframes && 644 (aggr_limit < (al + bpad + al_delta + prev_al))) { 645 status = ATH_AGGR_LIMITED; 646 break; 647 } 648 649 /* do not exceed subframe limit */ 650 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 651 status = ATH_AGGR_LIMITED; 652 break; 653 } 654 nframes++; 655 656 /* add padding for previous frame to aggregation length */ 657 al += bpad + al_delta; 658 659 /* 660 * Get the delimiters needed to meet the MPDU 661 * density for this node. 662 */ 663 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); 664 bpad = PADBYTES(al_delta) + (ndelim << 2); 665 666 bf->bf_next = NULL; 667 bf->bf_desc->ds_link = 0; 668 669 /* link buffers of this frame to the aggregate */ 670 ath_tx_addto_baw(sc, tid, bf); 671 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 672 list_move_tail(&bf->list, bf_q); 673 if (bf_prev) { 674 bf_prev->bf_next = bf; 675 bf_prev->bf_desc->ds_link = bf->bf_daddr; 676 } 677 bf_prev = bf; 678 679 } while (!list_empty(&tid->buf_q)); 680 681 bf_first->bf_al = al; 682 bf_first->bf_nframes = nframes; 683 684 return status; 685 #undef PADBYTES 686 } 687 688 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 689 struct ath_atx_tid *tid) 690 { 691 struct ath_buf *bf; 692 enum ATH_AGGR_STATUS status; 693 struct list_head bf_q; 694 695 do { 696 if (list_empty(&tid->buf_q)) 697 return; 698 699 INIT_LIST_HEAD(&bf_q); 700 701 status = ath_tx_form_aggr(sc, txq, tid, &bf_q); 702 703 /* 704 * no frames picked up to be aggregated; 705 * block-ack window is not open. 706 */ 707 if (list_empty(&bf_q)) 708 break; 709 710 bf = list_first_entry(&bf_q, struct ath_buf, list); 711 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 712 713 /* if only one frame, send as non-aggregate */ 714 if (bf->bf_nframes == 1) { 715 bf->bf_state.bf_type &= ~BUF_AGGR; 716 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); 717 ath_buf_set_rate(sc, bf); 718 ath_tx_txqaddbuf(sc, txq, &bf_q); 719 continue; 720 } 721 722 /* setup first desc of aggregate */ 723 bf->bf_state.bf_type |= BUF_AGGR; 724 ath_buf_set_rate(sc, bf); 725 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 726 727 /* anchor last desc of aggregate */ 728 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 729 730 ath_tx_txqaddbuf(sc, txq, &bf_q); 731 TX_STAT_INC(txq->axq_qnum, a_aggr); 732 733 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && 734 status != ATH_AGGR_BAW_CLOSED); 735 } 736 737 void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 738 u16 tid, u16 *ssn) 739 { 740 struct ath_atx_tid *txtid; 741 struct ath_node *an; 742 743 an = (struct ath_node *)sta->drv_priv; 744 txtid = ATH_AN_2_TID(an, tid); 745 txtid->state |= AGGR_ADDBA_PROGRESS; 746 ath_tx_pause_tid(sc, txtid); 747 *ssn = txtid->seq_start; 748 } 749 750 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 751 { 752 struct ath_node *an = (struct ath_node *)sta->drv_priv; 753 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 754 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 755 struct ath_buf *bf; 756 struct list_head bf_head; 757 INIT_LIST_HEAD(&bf_head); 758 759 if (txtid->state & AGGR_CLEANUP) 760 return; 761 762 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 763 txtid->state &= ~AGGR_ADDBA_PROGRESS; 764 return; 765 } 766 767 ath_tx_pause_tid(sc, txtid); 768 769 /* drop all software retried frames and mark this TID */ 770 spin_lock_bh(&txq->axq_lock); 771 while (!list_empty(&txtid->buf_q)) { 772 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 773 if (!bf_isretried(bf)) { 774 /* 775 * NB: it's based on the assumption that 776 * software retried frame will always stay 777 * at the head of software queue. 778 */ 779 break; 780 } 781 list_move_tail(&bf->list, &bf_head); 782 ath_tx_update_baw(sc, txtid, bf->bf_seqno); 783 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 784 } 785 spin_unlock_bh(&txq->axq_lock); 786 787 if (txtid->baw_head != txtid->baw_tail) { 788 txtid->state |= AGGR_CLEANUP; 789 } else { 790 txtid->state &= ~AGGR_ADDBA_COMPLETE; 791 ath_tx_flush_tid(sc, txtid); 792 } 793 } 794 795 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 796 { 797 struct ath_atx_tid *txtid; 798 struct ath_node *an; 799 800 an = (struct ath_node *)sta->drv_priv; 801 802 if (sc->sc_flags & SC_OP_TXAGGR) { 803 txtid = ATH_AN_2_TID(an, tid); 804 txtid->baw_size = 805 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 806 txtid->state |= AGGR_ADDBA_COMPLETE; 807 txtid->state &= ~AGGR_ADDBA_PROGRESS; 808 ath_tx_resume_tid(sc, txtid); 809 } 810 } 811 812 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) 813 { 814 struct ath_atx_tid *txtid; 815 816 if (!(sc->sc_flags & SC_OP_TXAGGR)) 817 return false; 818 819 txtid = ATH_AN_2_TID(an, tidno); 820 821 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS))) 822 return true; 823 return false; 824 } 825 826 /********************/ 827 /* Queue Management */ 828 /********************/ 829 830 static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 831 struct ath_txq *txq) 832 { 833 struct ath_atx_ac *ac, *ac_tmp; 834 struct ath_atx_tid *tid, *tid_tmp; 835 836 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 837 list_del(&ac->list); 838 ac->sched = false; 839 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 840 list_del(&tid->list); 841 tid->sched = false; 842 ath_tid_drain(sc, txq, tid); 843 } 844 } 845 } 846 847 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 848 { 849 struct ath_hw *ah = sc->sc_ah; 850 struct ath_common *common = ath9k_hw_common(ah); 851 struct ath9k_tx_queue_info qi; 852 int qnum; 853 854 memset(&qi, 0, sizeof(qi)); 855 qi.tqi_subtype = subtype; 856 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 857 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 858 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 859 qi.tqi_physCompBuf = 0; 860 861 /* 862 * Enable interrupts only for EOL and DESC conditions. 863 * We mark tx descriptors to receive a DESC interrupt 864 * when a tx queue gets deep; otherwise waiting for the 865 * EOL to reap descriptors. Note that this is done to 866 * reduce interrupt load and this only defers reaping 867 * descriptors, never transmitting frames. Aside from 868 * reducing interrupts this also permits more concurrency. 869 * The only potential downside is if the tx queue backs 870 * up in which case the top half of the kernel may backup 871 * due to a lack of tx descriptors. 872 * 873 * The UAPSD queue is an exception, since we take a desc- 874 * based intr on the EOSP frames. 875 */ 876 if (qtype == ATH9K_TX_QUEUE_UAPSD) 877 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 878 else 879 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 880 TXQ_FLAG_TXDESCINT_ENABLE; 881 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 882 if (qnum == -1) { 883 /* 884 * NB: don't print a message, this happens 885 * normally on parts with too few tx queues 886 */ 887 return NULL; 888 } 889 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 890 ath_print(common, ATH_DBG_FATAL, 891 "qnum %u out of range, max %u!\n", 892 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); 893 ath9k_hw_releasetxqueue(ah, qnum); 894 return NULL; 895 } 896 if (!ATH_TXQ_SETUP(sc, qnum)) { 897 struct ath_txq *txq = &sc->tx.txq[qnum]; 898 899 txq->axq_qnum = qnum; 900 txq->axq_link = NULL; 901 INIT_LIST_HEAD(&txq->axq_q); 902 INIT_LIST_HEAD(&txq->axq_acq); 903 spin_lock_init(&txq->axq_lock); 904 txq->axq_depth = 0; 905 txq->axq_tx_inprogress = false; 906 sc->tx.txqsetup |= 1<<qnum; 907 } 908 return &sc->tx.txq[qnum]; 909 } 910 911 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) 912 { 913 int qnum; 914 915 switch (qtype) { 916 case ATH9K_TX_QUEUE_DATA: 917 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 918 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 919 "HAL AC %u out of range, max %zu!\n", 920 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 921 return -1; 922 } 923 qnum = sc->tx.hwq_map[haltype]; 924 break; 925 case ATH9K_TX_QUEUE_BEACON: 926 qnum = sc->beacon.beaconq; 927 break; 928 case ATH9K_TX_QUEUE_CAB: 929 qnum = sc->beacon.cabq->axq_qnum; 930 break; 931 default: 932 qnum = -1; 933 } 934 return qnum; 935 } 936 937 struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) 938 { 939 struct ath_txq *txq = NULL; 940 u16 skb_queue = skb_get_queue_mapping(skb); 941 int qnum; 942 943 qnum = ath_get_hal_qnum(skb_queue, sc); 944 txq = &sc->tx.txq[qnum]; 945 946 spin_lock_bh(&txq->axq_lock); 947 948 if (txq->axq_depth >= (ATH_TXBUF - 20)) { 949 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT, 950 "TX queue: %d is full, depth: %d\n", 951 qnum, txq->axq_depth); 952 ath_mac80211_stop_queue(sc, skb_queue); 953 txq->stopped = 1; 954 spin_unlock_bh(&txq->axq_lock); 955 return NULL; 956 } 957 958 spin_unlock_bh(&txq->axq_lock); 959 960 return txq; 961 } 962 963 int ath_txq_update(struct ath_softc *sc, int qnum, 964 struct ath9k_tx_queue_info *qinfo) 965 { 966 struct ath_hw *ah = sc->sc_ah; 967 int error = 0; 968 struct ath9k_tx_queue_info qi; 969 970 if (qnum == sc->beacon.beaconq) { 971 /* 972 * XXX: for beacon queue, we just save the parameter. 973 * It will be picked up by ath_beaconq_config when 974 * it's necessary. 975 */ 976 sc->beacon.beacon_qi = *qinfo; 977 return 0; 978 } 979 980 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 981 982 ath9k_hw_get_txq_props(ah, qnum, &qi); 983 qi.tqi_aifs = qinfo->tqi_aifs; 984 qi.tqi_cwmin = qinfo->tqi_cwmin; 985 qi.tqi_cwmax = qinfo->tqi_cwmax; 986 qi.tqi_burstTime = qinfo->tqi_burstTime; 987 qi.tqi_readyTime = qinfo->tqi_readyTime; 988 989 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 990 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 991 "Unable to update hardware queue %u!\n", qnum); 992 error = -EIO; 993 } else { 994 ath9k_hw_resettxqueue(ah, qnum); 995 } 996 997 return error; 998 } 999 1000 int ath_cabq_update(struct ath_softc *sc) 1001 { 1002 struct ath9k_tx_queue_info qi; 1003 int qnum = sc->beacon.cabq->axq_qnum; 1004 1005 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1006 /* 1007 * Ensure the readytime % is within the bounds. 1008 */ 1009 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1010 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1011 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1012 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1013 1014 qi.tqi_readyTime = (sc->beacon_interval * 1015 sc->config.cabqReadytime) / 100; 1016 ath_txq_update(sc, qnum, &qi); 1017 1018 return 0; 1019 } 1020 1021 /* 1022 * Drain a given TX queue (could be Beacon or Data) 1023 * 1024 * This assumes output has been stopped and 1025 * we do not need to block ath_tx_tasklet. 1026 */ 1027 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) 1028 { 1029 struct ath_buf *bf, *lastbf; 1030 struct list_head bf_head; 1031 1032 INIT_LIST_HEAD(&bf_head); 1033 1034 for (;;) { 1035 spin_lock_bh(&txq->axq_lock); 1036 1037 if (list_empty(&txq->axq_q)) { 1038 txq->axq_link = NULL; 1039 spin_unlock_bh(&txq->axq_lock); 1040 break; 1041 } 1042 1043 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 1044 1045 if (bf->bf_stale) { 1046 list_del(&bf->list); 1047 spin_unlock_bh(&txq->axq_lock); 1048 1049 spin_lock_bh(&sc->tx.txbuflock); 1050 list_add_tail(&bf->list, &sc->tx.txbuf); 1051 spin_unlock_bh(&sc->tx.txbuflock); 1052 continue; 1053 } 1054 1055 lastbf = bf->bf_lastbf; 1056 if (!retry_tx) 1057 lastbf->bf_desc->ds_txstat.ts_flags = 1058 ATH9K_TX_SW_ABORTED; 1059 1060 /* remove ath_buf's of the same mpdu from txq */ 1061 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); 1062 txq->axq_depth--; 1063 1064 spin_unlock_bh(&txq->axq_lock); 1065 1066 if (bf_isampdu(bf)) 1067 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0); 1068 else 1069 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 1070 } 1071 1072 spin_lock_bh(&txq->axq_lock); 1073 txq->axq_tx_inprogress = false; 1074 spin_unlock_bh(&txq->axq_lock); 1075 1076 /* flush any pending frames if aggregation is enabled */ 1077 if (sc->sc_flags & SC_OP_TXAGGR) { 1078 if (!retry_tx) { 1079 spin_lock_bh(&txq->axq_lock); 1080 ath_txq_drain_pending_buffers(sc, txq); 1081 spin_unlock_bh(&txq->axq_lock); 1082 } 1083 } 1084 } 1085 1086 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1087 { 1088 struct ath_hw *ah = sc->sc_ah; 1089 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1090 struct ath_txq *txq; 1091 int i, npend = 0; 1092 1093 if (sc->sc_flags & SC_OP_INVALID) 1094 return; 1095 1096 /* Stop beacon queue */ 1097 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1098 1099 /* Stop data queues */ 1100 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1101 if (ATH_TXQ_SETUP(sc, i)) { 1102 txq = &sc->tx.txq[i]; 1103 ath9k_hw_stoptxdma(ah, txq->axq_qnum); 1104 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum); 1105 } 1106 } 1107 1108 if (npend) { 1109 int r; 1110 1111 ath_print(common, ATH_DBG_FATAL, 1112 "Unable to stop TxDMA. Reset HAL!\n"); 1113 1114 spin_lock_bh(&sc->sc_resetlock); 1115 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 1116 if (r) 1117 ath_print(common, ATH_DBG_FATAL, 1118 "Unable to reset hardware; reset status %d\n", 1119 r); 1120 spin_unlock_bh(&sc->sc_resetlock); 1121 } 1122 1123 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1124 if (ATH_TXQ_SETUP(sc, i)) 1125 ath_draintxq(sc, &sc->tx.txq[i], retry_tx); 1126 } 1127 } 1128 1129 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1130 { 1131 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1132 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1133 } 1134 1135 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1136 { 1137 struct ath_atx_ac *ac; 1138 struct ath_atx_tid *tid; 1139 1140 if (list_empty(&txq->axq_acq)) 1141 return; 1142 1143 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1144 list_del(&ac->list); 1145 ac->sched = false; 1146 1147 do { 1148 if (list_empty(&ac->tid_q)) 1149 return; 1150 1151 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); 1152 list_del(&tid->list); 1153 tid->sched = false; 1154 1155 if (tid->paused) 1156 continue; 1157 1158 ath_tx_sched_aggr(sc, txq, tid); 1159 1160 /* 1161 * add tid to round-robin queue if more frames 1162 * are pending for the tid 1163 */ 1164 if (!list_empty(&tid->buf_q)) 1165 ath_tx_queue_tid(txq, tid); 1166 1167 break; 1168 } while (!list_empty(&ac->tid_q)); 1169 1170 if (!list_empty(&ac->tid_q)) { 1171 if (!ac->sched) { 1172 ac->sched = true; 1173 list_add_tail(&ac->list, &txq->axq_acq); 1174 } 1175 } 1176 } 1177 1178 int ath_tx_setup(struct ath_softc *sc, int haltype) 1179 { 1180 struct ath_txq *txq; 1181 1182 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 1183 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1184 "HAL AC %u out of range, max %zu!\n", 1185 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 1186 return 0; 1187 } 1188 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); 1189 if (txq != NULL) { 1190 sc->tx.hwq_map[haltype] = txq->axq_qnum; 1191 return 1; 1192 } else 1193 return 0; 1194 } 1195 1196 /***********/ 1197 /* TX, DMA */ 1198 /***********/ 1199 1200 /* 1201 * Insert a chain of ath_buf (descriptors) on a txq and 1202 * assume the descriptors are already chained together by caller. 1203 */ 1204 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1205 struct list_head *head) 1206 { 1207 struct ath_hw *ah = sc->sc_ah; 1208 struct ath_common *common = ath9k_hw_common(ah); 1209 struct ath_buf *bf; 1210 1211 /* 1212 * Insert the frame on the outbound list and 1213 * pass it on to the hardware. 1214 */ 1215 1216 if (list_empty(head)) 1217 return; 1218 1219 bf = list_first_entry(head, struct ath_buf, list); 1220 1221 list_splice_tail_init(head, &txq->axq_q); 1222 txq->axq_depth++; 1223 1224 ath_print(common, ATH_DBG_QUEUE, 1225 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1226 1227 if (txq->axq_link == NULL) { 1228 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1229 ath_print(common, ATH_DBG_XMIT, 1230 "TXDP[%u] = %llx (%p)\n", 1231 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1232 } else { 1233 *txq->axq_link = bf->bf_daddr; 1234 ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n", 1235 txq->axq_qnum, txq->axq_link, 1236 ito64(bf->bf_daddr), bf->bf_desc); 1237 } 1238 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); 1239 ath9k_hw_txstart(ah, txq->axq_qnum); 1240 } 1241 1242 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 1243 { 1244 struct ath_buf *bf = NULL; 1245 1246 spin_lock_bh(&sc->tx.txbuflock); 1247 1248 if (unlikely(list_empty(&sc->tx.txbuf))) { 1249 spin_unlock_bh(&sc->tx.txbuflock); 1250 return NULL; 1251 } 1252 1253 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 1254 list_del(&bf->list); 1255 1256 spin_unlock_bh(&sc->tx.txbuflock); 1257 1258 return bf; 1259 } 1260 1261 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1262 struct list_head *bf_head, 1263 struct ath_tx_control *txctl) 1264 { 1265 struct ath_buf *bf; 1266 1267 bf = list_first_entry(bf_head, struct ath_buf, list); 1268 bf->bf_state.bf_type |= BUF_AMPDU; 1269 TX_STAT_INC(txctl->txq->axq_qnum, a_queued); 1270 1271 /* 1272 * Do not queue to h/w when any of the following conditions is true: 1273 * - there are pending frames in software queue 1274 * - the TID is currently paused for ADDBA/BAR request 1275 * - seqno is not within block-ack window 1276 * - h/w queue depth exceeds low water mark 1277 */ 1278 if (!list_empty(&tid->buf_q) || tid->paused || 1279 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1280 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1281 /* 1282 * Add this frame to software queue for scheduling later 1283 * for aggregation. 1284 */ 1285 list_move_tail(&bf->list, &tid->buf_q); 1286 ath_tx_queue_tid(txctl->txq, tid); 1287 return; 1288 } 1289 1290 /* Add sub-frame to BAW */ 1291 ath_tx_addto_baw(sc, tid, bf); 1292 1293 /* Queue to h/w without aggregation */ 1294 bf->bf_nframes = 1; 1295 bf->bf_lastbf = bf; 1296 ath_buf_set_rate(sc, bf); 1297 ath_tx_txqaddbuf(sc, txctl->txq, bf_head); 1298 } 1299 1300 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 1301 struct ath_atx_tid *tid, 1302 struct list_head *bf_head) 1303 { 1304 struct ath_buf *bf; 1305 1306 bf = list_first_entry(bf_head, struct ath_buf, list); 1307 bf->bf_state.bf_type &= ~BUF_AMPDU; 1308 1309 /* update starting sequence number for subsequent ADDBA request */ 1310 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1311 1312 bf->bf_nframes = 1; 1313 bf->bf_lastbf = bf; 1314 ath_buf_set_rate(sc, bf); 1315 ath_tx_txqaddbuf(sc, txq, bf_head); 1316 TX_STAT_INC(txq->axq_qnum, queued); 1317 } 1318 1319 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1320 struct list_head *bf_head) 1321 { 1322 struct ath_buf *bf; 1323 1324 bf = list_first_entry(bf_head, struct ath_buf, list); 1325 1326 bf->bf_lastbf = bf; 1327 bf->bf_nframes = 1; 1328 ath_buf_set_rate(sc, bf); 1329 ath_tx_txqaddbuf(sc, txq, bf_head); 1330 TX_STAT_INC(txq->axq_qnum, queued); 1331 } 1332 1333 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1334 { 1335 struct ieee80211_hdr *hdr; 1336 enum ath9k_pkt_type htype; 1337 __le16 fc; 1338 1339 hdr = (struct ieee80211_hdr *)skb->data; 1340 fc = hdr->frame_control; 1341 1342 if (ieee80211_is_beacon(fc)) 1343 htype = ATH9K_PKT_TYPE_BEACON; 1344 else if (ieee80211_is_probe_resp(fc)) 1345 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1346 else if (ieee80211_is_atim(fc)) 1347 htype = ATH9K_PKT_TYPE_ATIM; 1348 else if (ieee80211_is_pspoll(fc)) 1349 htype = ATH9K_PKT_TYPE_PSPOLL; 1350 else 1351 htype = ATH9K_PKT_TYPE_NORMAL; 1352 1353 return htype; 1354 } 1355 1356 static bool is_pae(struct sk_buff *skb) 1357 { 1358 struct ieee80211_hdr *hdr; 1359 __le16 fc; 1360 1361 hdr = (struct ieee80211_hdr *)skb->data; 1362 fc = hdr->frame_control; 1363 1364 if (ieee80211_is_data(fc)) { 1365 if (ieee80211_is_nullfunc(fc) || 1366 /* Port Access Entity (IEEE 802.1X) */ 1367 (skb->protocol == cpu_to_be16(ETH_P_PAE))) { 1368 return true; 1369 } 1370 } 1371 1372 return false; 1373 } 1374 1375 static int get_hw_crypto_keytype(struct sk_buff *skb) 1376 { 1377 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1378 1379 if (tx_info->control.hw_key) { 1380 if (tx_info->control.hw_key->alg == ALG_WEP) 1381 return ATH9K_KEY_TYPE_WEP; 1382 else if (tx_info->control.hw_key->alg == ALG_TKIP) 1383 return ATH9K_KEY_TYPE_TKIP; 1384 else if (tx_info->control.hw_key->alg == ALG_CCMP) 1385 return ATH9K_KEY_TYPE_AES; 1386 } 1387 1388 return ATH9K_KEY_TYPE_CLEAR; 1389 } 1390 1391 static void assign_aggr_tid_seqno(struct sk_buff *skb, 1392 struct ath_buf *bf) 1393 { 1394 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1395 struct ieee80211_hdr *hdr; 1396 struct ath_node *an; 1397 struct ath_atx_tid *tid; 1398 __le16 fc; 1399 u8 *qc; 1400 1401 if (!tx_info->control.sta) 1402 return; 1403 1404 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1405 hdr = (struct ieee80211_hdr *)skb->data; 1406 fc = hdr->frame_control; 1407 1408 if (ieee80211_is_data_qos(fc)) { 1409 qc = ieee80211_get_qos_ctl(hdr); 1410 bf->bf_tidno = qc[0] & 0xf; 1411 } 1412 1413 /* 1414 * For HT capable stations, we save tidno for later use. 1415 * We also override seqno set by upper layer with the one 1416 * in tx aggregation state. 1417 */ 1418 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1419 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1420 bf->bf_seqno = tid->seq_next; 1421 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1422 } 1423 1424 static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb, 1425 struct ath_txq *txq) 1426 { 1427 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1428 int flags = 0; 1429 1430 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ 1431 flags |= ATH9K_TXDESC_INTREQ; 1432 1433 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1434 flags |= ATH9K_TXDESC_NOACK; 1435 1436 return flags; 1437 } 1438 1439 /* 1440 * rix - rate index 1441 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 1442 * width - 0 for 20 MHz, 1 for 40 MHz 1443 * half_gi - to use 4us v/s 3.6 us for symbol time 1444 */ 1445 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, 1446 int width, int half_gi, bool shortPreamble) 1447 { 1448 u32 nbits, nsymbits, duration, nsymbols; 1449 int streams, pktlen; 1450 1451 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; 1452 1453 /* find number of symbols: PLCP + data */ 1454 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 1455 nsymbits = bits_per_symbol[rix][width]; 1456 nsymbols = (nbits + nsymbits - 1) / nsymbits; 1457 1458 if (!half_gi) 1459 duration = SYMBOL_TIME(nsymbols); 1460 else 1461 duration = SYMBOL_TIME_HALFGI(nsymbols); 1462 1463 /* addup duration for legacy/ht training and signal fields */ 1464 streams = HT_RC_2_STREAMS(rix); 1465 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 1466 1467 return duration; 1468 } 1469 1470 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1471 { 1472 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1473 struct ath9k_11n_rate_series series[4]; 1474 struct sk_buff *skb; 1475 struct ieee80211_tx_info *tx_info; 1476 struct ieee80211_tx_rate *rates; 1477 const struct ieee80211_rate *rate; 1478 struct ieee80211_hdr *hdr; 1479 int i, flags = 0; 1480 u8 rix = 0, ctsrate = 0; 1481 bool is_pspoll; 1482 1483 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 1484 1485 skb = bf->bf_mpdu; 1486 tx_info = IEEE80211_SKB_CB(skb); 1487 rates = tx_info->control.rates; 1488 hdr = (struct ieee80211_hdr *)skb->data; 1489 is_pspoll = ieee80211_is_pspoll(hdr->frame_control); 1490 1491 /* 1492 * We check if Short Preamble is needed for the CTS rate by 1493 * checking the BSS's global flag. 1494 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1495 */ 1496 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); 1497 ctsrate = rate->hw_value; 1498 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 1499 ctsrate |= rate->hw_value_short; 1500 1501 /* 1502 * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. 1503 * Check the first rate in the series to decide whether RTS/CTS 1504 * or CTS-to-self has to be used. 1505 */ 1506 if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) 1507 flags = ATH9K_TXDESC_CTSENA; 1508 else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) 1509 flags = ATH9K_TXDESC_RTSENA; 1510 1511 /* FIXME: Handle aggregation protection */ 1512 if (sc->config.ath_aggr_prot && 1513 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) { 1514 flags = ATH9K_TXDESC_RTSENA; 1515 } 1516 1517 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1518 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit)) 1519 flags &= ~(ATH9K_TXDESC_RTSENA); 1520 1521 for (i = 0; i < 4; i++) { 1522 bool is_40, is_sgi, is_sp; 1523 int phy; 1524 1525 if (!rates[i].count || (rates[i].idx < 0)) 1526 continue; 1527 1528 rix = rates[i].idx; 1529 series[i].Tries = rates[i].count; 1530 series[i].ChSel = common->tx_chainmask; 1531 1532 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) 1533 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1534 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1535 series[i].RateFlags |= ATH9K_RATESERIES_2040; 1536 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1537 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1538 1539 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1540 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1541 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1542 1543 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1544 /* MCS rates */ 1545 series[i].Rate = rix | 0x80; 1546 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1547 is_40, is_sgi, is_sp); 1548 continue; 1549 } 1550 1551 /* legcay rates */ 1552 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1553 !(rate->flags & IEEE80211_RATE_ERP_G)) 1554 phy = WLAN_RC_PHY_CCK; 1555 else 1556 phy = WLAN_RC_PHY_OFDM; 1557 1558 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1559 series[i].Rate = rate->hw_value; 1560 if (rate->hw_value_short) { 1561 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1562 series[i].Rate |= rate->hw_value_short; 1563 } else { 1564 is_sp = false; 1565 } 1566 1567 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1568 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); 1569 } 1570 1571 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1572 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc, 1573 bf->bf_lastbf->bf_desc, 1574 !is_pspoll, ctsrate, 1575 0, series, 4, flags); 1576 1577 if (sc->config.ath_aggr_prot && flags) 1578 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192); 1579 } 1580 1581 static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, 1582 struct sk_buff *skb, 1583 struct ath_tx_control *txctl) 1584 { 1585 struct ath_wiphy *aphy = hw->priv; 1586 struct ath_softc *sc = aphy->sc; 1587 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1588 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1589 int hdrlen; 1590 __le16 fc; 1591 int padpos, padsize; 1592 1593 tx_info->pad[0] = 0; 1594 switch (txctl->frame_type) { 1595 case ATH9K_NOT_INTERNAL: 1596 break; 1597 case ATH9K_INT_PAUSE: 1598 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE; 1599 /* fall through */ 1600 case ATH9K_INT_UNPAUSE: 1601 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL; 1602 break; 1603 } 1604 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1605 fc = hdr->frame_control; 1606 1607 ATH_TXBUF_RESET(bf); 1608 1609 bf->aphy = aphy; 1610 bf->bf_frmlen = skb->len + FCS_LEN; 1611 /* Remove the padding size from bf_frmlen, if any */ 1612 padpos = ath9k_cmn_padpos(hdr->frame_control); 1613 padsize = padpos & 3; 1614 if (padsize && skb->len>padpos+padsize) { 1615 bf->bf_frmlen -= padsize; 1616 } 1617 1618 if (conf_is_ht(&hw->conf)) 1619 bf->bf_state.bf_type |= BUF_HT; 1620 1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); 1622 1623 bf->bf_keytype = get_hw_crypto_keytype(skb); 1624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1625 bf->bf_frmlen += tx_info->control.hw_key->icv_len; 1626 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; 1627 } else { 1628 bf->bf_keyix = ATH9K_TXKEYIX_INVALID; 1629 } 1630 1631 if (ieee80211_is_data_qos(fc) && bf_isht(bf) && 1632 (sc->sc_flags & SC_OP_TXAGGR)) 1633 assign_aggr_tid_seqno(skb, bf); 1634 1635 bf->bf_mpdu = skb; 1636 1637 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data, 1638 skb->len, DMA_TO_DEVICE); 1639 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { 1640 bf->bf_mpdu = NULL; 1641 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1642 "dma_mapping_error() on TX\n"); 1643 return -ENOMEM; 1644 } 1645 1646 bf->bf_buf_addr = bf->bf_dmacontext; 1647 1648 /* tag if this is a nullfunc frame to enable PS when AP acks it */ 1649 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { 1650 bf->bf_isnullfunc = true; 1651 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; 1652 } else 1653 bf->bf_isnullfunc = false; 1654 1655 return 0; 1656 } 1657 1658 /* FIXME: tx power */ 1659 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, 1660 struct ath_tx_control *txctl) 1661 { 1662 struct sk_buff *skb = bf->bf_mpdu; 1663 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1664 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1665 struct ath_node *an = NULL; 1666 struct list_head bf_head; 1667 struct ath_desc *ds; 1668 struct ath_atx_tid *tid; 1669 struct ath_hw *ah = sc->sc_ah; 1670 int frm_type; 1671 __le16 fc; 1672 1673 frm_type = get_hw_packet_type(skb); 1674 fc = hdr->frame_control; 1675 1676 INIT_LIST_HEAD(&bf_head); 1677 list_add_tail(&bf->list, &bf_head); 1678 1679 ds = bf->bf_desc; 1680 ds->ds_link = 0; 1681 ds->ds_data = bf->bf_buf_addr; 1682 1683 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1684 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1685 1686 ath9k_hw_filltxdesc(ah, ds, 1687 skb->len, /* segment length */ 1688 true, /* first segment */ 1689 true, /* last segment */ 1690 ds); /* first descriptor */ 1691 1692 spin_lock_bh(&txctl->txq->axq_lock); 1693 1694 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 1695 tx_info->control.sta) { 1696 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1697 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1698 1699 if (!ieee80211_is_data_qos(fc)) { 1700 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1701 goto tx_done; 1702 } 1703 1704 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { 1705 /* 1706 * Try aggregation if it's a unicast data frame 1707 * and the destination is HT capable. 1708 */ 1709 ath_tx_send_ampdu(sc, tid, &bf_head, txctl); 1710 } else { 1711 /* 1712 * Send this frame as regular when ADDBA 1713 * exchange is neither complete nor pending. 1714 */ 1715 ath_tx_send_ht_normal(sc, txctl->txq, 1716 tid, &bf_head); 1717 } 1718 } else { 1719 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1720 } 1721 1722 tx_done: 1723 spin_unlock_bh(&txctl->txq->axq_lock); 1724 } 1725 1726 /* Upon failure caller should free skb */ 1727 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1728 struct ath_tx_control *txctl) 1729 { 1730 struct ath_wiphy *aphy = hw->priv; 1731 struct ath_softc *sc = aphy->sc; 1732 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1733 struct ath_buf *bf; 1734 int r; 1735 1736 bf = ath_tx_get_buffer(sc); 1737 if (!bf) { 1738 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1739 return -1; 1740 } 1741 1742 r = ath_tx_setup_buffer(hw, bf, skb, txctl); 1743 if (unlikely(r)) { 1744 struct ath_txq *txq = txctl->txq; 1745 1746 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1747 1748 /* upon ath_tx_processq() this TX queue will be resumed, we 1749 * guarantee this will happen by knowing beforehand that 1750 * we will at least have to run TX completionon one buffer 1751 * on the queue */ 1752 spin_lock_bh(&txq->axq_lock); 1753 if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) { 1754 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1755 txq->stopped = 1; 1756 } 1757 spin_unlock_bh(&txq->axq_lock); 1758 1759 spin_lock_bh(&sc->tx.txbuflock); 1760 list_add_tail(&bf->list, &sc->tx.txbuf); 1761 spin_unlock_bh(&sc->tx.txbuflock); 1762 1763 return r; 1764 } 1765 1766 ath_tx_start_dma(sc, bf, txctl); 1767 1768 return 0; 1769 } 1770 1771 void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) 1772 { 1773 struct ath_wiphy *aphy = hw->priv; 1774 struct ath_softc *sc = aphy->sc; 1775 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1776 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1777 int padpos, padsize; 1778 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1779 struct ath_tx_control txctl; 1780 1781 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1782 1783 /* 1784 * As a temporary workaround, assign seq# here; this will likely need 1785 * to be cleaned up to work better with Beacon transmission and virtual 1786 * BSSes. 1787 */ 1788 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1789 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1790 sc->tx.seq_no += 0x10; 1791 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1792 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1793 } 1794 1795 /* Add the padding after the header if this is not already done */ 1796 padpos = ath9k_cmn_padpos(hdr->frame_control); 1797 padsize = padpos & 3; 1798 if (padsize && skb->len>padpos) { 1799 if (skb_headroom(skb) < padsize) { 1800 ath_print(common, ATH_DBG_XMIT, 1801 "TX CABQ padding failed\n"); 1802 dev_kfree_skb_any(skb); 1803 return; 1804 } 1805 skb_push(skb, padsize); 1806 memmove(skb->data, skb->data + padsize, padpos); 1807 } 1808 1809 txctl.txq = sc->beacon.cabq; 1810 1811 ath_print(common, ATH_DBG_XMIT, 1812 "transmitting CABQ packet, skb: %p\n", skb); 1813 1814 if (ath_tx_start(hw, skb, &txctl) != 0) { 1815 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n"); 1816 goto exit; 1817 } 1818 1819 return; 1820 exit: 1821 dev_kfree_skb_any(skb); 1822 } 1823 1824 /*****************/ 1825 /* TX Completion */ 1826 /*****************/ 1827 1828 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1829 struct ath_wiphy *aphy, int tx_flags) 1830 { 1831 struct ieee80211_hw *hw = sc->hw; 1832 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1833 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1834 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1835 int padpos, padsize; 1836 1837 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1838 1839 if (aphy) 1840 hw = aphy->hw; 1841 1842 if (tx_flags & ATH_TX_BAR) 1843 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1844 1845 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { 1846 /* Frame was ACKed */ 1847 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1848 } 1849 1850 padpos = ath9k_cmn_padpos(hdr->frame_control); 1851 padsize = padpos & 3; 1852 if (padsize && skb->len>padpos+padsize) { 1853 /* 1854 * Remove MAC header padding before giving the frame back to 1855 * mac80211. 1856 */ 1857 memmove(skb->data + padsize, skb->data, padpos); 1858 skb_pull(skb, padsize); 1859 } 1860 1861 if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) { 1862 sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK; 1863 ath_print(common, ATH_DBG_PS, 1864 "Going back to sleep after having " 1865 "received TX status (0x%x)\n", 1866 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 1867 SC_OP_WAIT_FOR_CAB | 1868 SC_OP_WAIT_FOR_PSPOLL_DATA | 1869 SC_OP_WAIT_FOR_TX_ACK)); 1870 } 1871 1872 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1873 ath9k_tx_status(hw, skb); 1874 else 1875 ieee80211_tx_status(hw, skb); 1876 } 1877 1878 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1879 struct ath_txq *txq, 1880 struct list_head *bf_q, 1881 int txok, int sendbar) 1882 { 1883 struct sk_buff *skb = bf->bf_mpdu; 1884 unsigned long flags; 1885 int tx_flags = 0; 1886 1887 if (sendbar) 1888 tx_flags = ATH_TX_BAR; 1889 1890 if (!txok) { 1891 tx_flags |= ATH_TX_ERROR; 1892 1893 if (bf_isxretried(bf)) 1894 tx_flags |= ATH_TX_XRETRY; 1895 } 1896 1897 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1898 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1899 ath_debug_stat_tx(sc, txq, bf); 1900 1901 /* 1902 * Return the list of ath_buf of this mpdu to free queue 1903 */ 1904 spin_lock_irqsave(&sc->tx.txbuflock, flags); 1905 list_splice_tail_init(bf_q, &sc->tx.txbuf); 1906 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1907 } 1908 1909 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 1910 int txok) 1911 { 1912 struct ath_buf *bf_last = bf->bf_lastbf; 1913 struct ath_desc *ds = bf_last->bf_desc; 1914 u16 seq_st = 0; 1915 u32 ba[WME_BA_BMP_SIZE >> 5]; 1916 int ba_index; 1917 int nbad = 0; 1918 int isaggr = 0; 1919 1920 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) 1921 return 0; 1922 1923 isaggr = bf_isaggr(bf); 1924 if (isaggr) { 1925 seq_st = ATH_DS_BA_SEQ(ds); 1926 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); 1927 } 1928 1929 while (bf) { 1930 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); 1931 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 1932 nbad++; 1933 1934 bf = bf->bf_next; 1935 } 1936 1937 return nbad; 1938 } 1939 1940 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, 1941 int nbad, int txok, bool update_rc) 1942 { 1943 struct sk_buff *skb = bf->bf_mpdu; 1944 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1945 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1946 struct ieee80211_hw *hw = bf->aphy->hw; 1947 u8 i, tx_rateindex; 1948 1949 if (txok) 1950 tx_info->status.ack_signal = ds->ds_txstat.ts_rssi; 1951 1952 tx_rateindex = ds->ds_txstat.ts_rateindex; 1953 WARN_ON(tx_rateindex >= hw->max_rates); 1954 1955 if (update_rc) 1956 tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC; 1957 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) 1958 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1959 1960 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && 1961 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 1962 if (ieee80211_is_data(hdr->frame_control)) { 1963 if (ds->ds_txstat.ts_flags & 1964 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 1965 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 1966 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) || 1967 (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)) 1968 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 1969 tx_info->status.ampdu_len = bf->bf_nframes; 1970 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 1971 } 1972 } 1973 1974 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 1975 tx_info->status.rates[i].count = 0; 1976 tx_info->status.rates[i].idx = -1; 1977 } 1978 1979 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1; 1980 } 1981 1982 static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 1983 { 1984 int qnum; 1985 1986 spin_lock_bh(&txq->axq_lock); 1987 if (txq->stopped && 1988 sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) { 1989 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); 1990 if (qnum != -1) { 1991 ath_mac80211_start_queue(sc, qnum); 1992 txq->stopped = 0; 1993 } 1994 } 1995 spin_unlock_bh(&txq->axq_lock); 1996 } 1997 1998 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 1999 { 2000 struct ath_hw *ah = sc->sc_ah; 2001 struct ath_common *common = ath9k_hw_common(ah); 2002 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2003 struct list_head bf_head; 2004 struct ath_desc *ds; 2005 int txok; 2006 int status; 2007 2008 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2009 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2010 txq->axq_link); 2011 2012 for (;;) { 2013 spin_lock_bh(&txq->axq_lock); 2014 if (list_empty(&txq->axq_q)) { 2015 txq->axq_link = NULL; 2016 spin_unlock_bh(&txq->axq_lock); 2017 break; 2018 } 2019 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2020 2021 /* 2022 * There is a race condition that a BH gets scheduled 2023 * after sw writes TxE and before hw re-load the last 2024 * descriptor to get the newly chained one. 2025 * Software must keep the last DONE descriptor as a 2026 * holding descriptor - software does so by marking 2027 * it with the STALE flag. 2028 */ 2029 bf_held = NULL; 2030 if (bf->bf_stale) { 2031 bf_held = bf; 2032 if (list_is_last(&bf_held->list, &txq->axq_q)) { 2033 spin_unlock_bh(&txq->axq_lock); 2034 break; 2035 } else { 2036 bf = list_entry(bf_held->list.next, 2037 struct ath_buf, list); 2038 } 2039 } 2040 2041 lastbf = bf->bf_lastbf; 2042 ds = lastbf->bf_desc; 2043 2044 status = ath9k_hw_txprocdesc(ah, ds); 2045 if (status == -EINPROGRESS) { 2046 spin_unlock_bh(&txq->axq_lock); 2047 break; 2048 } 2049 2050 /* 2051 * We now know the nullfunc frame has been ACKed so we 2052 * can disable RX. 2053 */ 2054 if (bf->bf_isnullfunc && 2055 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { 2056 if ((sc->sc_flags & SC_OP_PS_ENABLED)) { 2057 sc->ps_enabled = true; 2058 ath9k_hw_setrxabort(sc->sc_ah, 1); 2059 } else 2060 sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED; 2061 } 2062 2063 /* 2064 * Remove ath_buf's of the same transmit unit from txq, 2065 * however leave the last descriptor back as the holding 2066 * descriptor for hw. 2067 */ 2068 lastbf->bf_stale = true; 2069 INIT_LIST_HEAD(&bf_head); 2070 if (!list_is_singular(&lastbf->list)) 2071 list_cut_position(&bf_head, 2072 &txq->axq_q, lastbf->list.prev); 2073 2074 txq->axq_depth--; 2075 txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK); 2076 txq->axq_tx_inprogress = false; 2077 spin_unlock_bh(&txq->axq_lock); 2078 2079 if (bf_held) { 2080 spin_lock_bh(&sc->tx.txbuflock); 2081 list_move_tail(&bf_held->list, &sc->tx.txbuf); 2082 spin_unlock_bh(&sc->tx.txbuflock); 2083 } 2084 2085 if (!bf_isampdu(bf)) { 2086 /* 2087 * This frame is sent out as a single frame. 2088 * Use hardware retry status for this frame. 2089 */ 2090 bf->bf_retries = ds->ds_txstat.ts_longretry; 2091 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) 2092 bf->bf_state.bf_type |= BUF_XRETRY; 2093 ath_tx_rc_status(bf, ds, 0, txok, true); 2094 } 2095 2096 if (bf_isampdu(bf)) 2097 ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); 2098 else 2099 ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0); 2100 2101 ath_wake_mac80211_queue(sc, txq); 2102 2103 spin_lock_bh(&txq->axq_lock); 2104 if (sc->sc_flags & SC_OP_TXAGGR) 2105 ath_txq_schedule(sc, txq); 2106 spin_unlock_bh(&txq->axq_lock); 2107 } 2108 } 2109 2110 static void ath_tx_complete_poll_work(struct work_struct *work) 2111 { 2112 struct ath_softc *sc = container_of(work, struct ath_softc, 2113 tx_complete_work.work); 2114 struct ath_txq *txq; 2115 int i; 2116 bool needreset = false; 2117 2118 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2119 if (ATH_TXQ_SETUP(sc, i)) { 2120 txq = &sc->tx.txq[i]; 2121 spin_lock_bh(&txq->axq_lock); 2122 if (txq->axq_depth) { 2123 if (txq->axq_tx_inprogress) { 2124 needreset = true; 2125 spin_unlock_bh(&txq->axq_lock); 2126 break; 2127 } else { 2128 txq->axq_tx_inprogress = true; 2129 } 2130 } 2131 spin_unlock_bh(&txq->axq_lock); 2132 } 2133 2134 if (needreset) { 2135 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2136 "tx hung, resetting the chip\n"); 2137 ath9k_ps_wakeup(sc); 2138 ath_reset(sc, false); 2139 ath9k_ps_restore(sc); 2140 } 2141 2142 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2143 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); 2144 } 2145 2146 2147 2148 void ath_tx_tasklet(struct ath_softc *sc) 2149 { 2150 int i; 2151 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2152 2153 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2154 2155 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2156 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2157 ath_tx_processq(sc, &sc->tx.txq[i]); 2158 } 2159 } 2160 2161 /*****************/ 2162 /* Init, Cleanup */ 2163 /*****************/ 2164 2165 int ath_tx_init(struct ath_softc *sc, int nbufs) 2166 { 2167 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2168 int error = 0; 2169 2170 spin_lock_init(&sc->tx.txbuflock); 2171 2172 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2173 "tx", nbufs, 1); 2174 if (error != 0) { 2175 ath_print(common, ATH_DBG_FATAL, 2176 "Failed to allocate tx descriptors: %d\n", error); 2177 goto err; 2178 } 2179 2180 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2181 "beacon", ATH_BCBUF, 1); 2182 if (error != 0) { 2183 ath_print(common, ATH_DBG_FATAL, 2184 "Failed to allocate beacon descriptors: %d\n", error); 2185 goto err; 2186 } 2187 2188 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2189 2190 err: 2191 if (error != 0) 2192 ath_tx_cleanup(sc); 2193 2194 return error; 2195 } 2196 2197 void ath_tx_cleanup(struct ath_softc *sc) 2198 { 2199 if (sc->beacon.bdma.dd_desc_len != 0) 2200 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); 2201 2202 if (sc->tx.txdma.dd_desc_len != 0) 2203 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2204 } 2205 2206 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2207 { 2208 struct ath_atx_tid *tid; 2209 struct ath_atx_ac *ac; 2210 int tidno, acno; 2211 2212 for (tidno = 0, tid = &an->tid[tidno]; 2213 tidno < WME_NUM_TID; 2214 tidno++, tid++) { 2215 tid->an = an; 2216 tid->tidno = tidno; 2217 tid->seq_start = tid->seq_next = 0; 2218 tid->baw_size = WME_MAX_BA; 2219 tid->baw_head = tid->baw_tail = 0; 2220 tid->sched = false; 2221 tid->paused = false; 2222 tid->state &= ~AGGR_CLEANUP; 2223 INIT_LIST_HEAD(&tid->buf_q); 2224 acno = TID_TO_WME_AC(tidno); 2225 tid->ac = &an->ac[acno]; 2226 tid->state &= ~AGGR_ADDBA_COMPLETE; 2227 tid->state &= ~AGGR_ADDBA_PROGRESS; 2228 } 2229 2230 for (acno = 0, ac = &an->ac[acno]; 2231 acno < WME_NUM_AC; acno++, ac++) { 2232 ac->sched = false; 2233 INIT_LIST_HEAD(&ac->tid_q); 2234 2235 switch (acno) { 2236 case WME_AC_BE: 2237 ac->qnum = ath_tx_get_qnum(sc, 2238 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); 2239 break; 2240 case WME_AC_BK: 2241 ac->qnum = ath_tx_get_qnum(sc, 2242 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK); 2243 break; 2244 case WME_AC_VI: 2245 ac->qnum = ath_tx_get_qnum(sc, 2246 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI); 2247 break; 2248 case WME_AC_VO: 2249 ac->qnum = ath_tx_get_qnum(sc, 2250 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO); 2251 break; 2252 } 2253 } 2254 } 2255 2256 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2257 { 2258 int i; 2259 struct ath_atx_ac *ac, *ac_tmp; 2260 struct ath_atx_tid *tid, *tid_tmp; 2261 struct ath_txq *txq; 2262 2263 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2264 if (ATH_TXQ_SETUP(sc, i)) { 2265 txq = &sc->tx.txq[i]; 2266 2267 spin_lock(&txq->axq_lock); 2268 2269 list_for_each_entry_safe(ac, 2270 ac_tmp, &txq->axq_acq, list) { 2271 tid = list_first_entry(&ac->tid_q, 2272 struct ath_atx_tid, list); 2273 if (tid && tid->an != an) 2274 continue; 2275 list_del(&ac->list); 2276 ac->sched = false; 2277 2278 list_for_each_entry_safe(tid, 2279 tid_tmp, &ac->tid_q, list) { 2280 list_del(&tid->list); 2281 tid->sched = false; 2282 ath_tid_drain(sc, txq, tid); 2283 tid->state &= ~AGGR_ADDBA_COMPLETE; 2284 tid->state &= ~AGGR_CLEANUP; 2285 } 2286 } 2287 2288 spin_unlock(&txq->axq_lock); 2289 } 2290 } 2291 } 2292