1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static struct mt76_txwi_cache * 9 mt76_alloc_txwi(struct mt76_dev *dev) 10 { 11 struct mt76_txwi_cache *t; 12 dma_addr_t addr; 13 u8 *txwi; 14 int size; 15 16 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 17 txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 18 if (!txwi) 19 return NULL; 20 21 addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, 22 DMA_TO_DEVICE); 23 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 24 t->dma_addr = addr; 25 26 return t; 27 } 28 29 static struct mt76_txwi_cache * 30 __mt76_get_txwi(struct mt76_dev *dev) 31 { 32 struct mt76_txwi_cache *t = NULL; 33 34 spin_lock_bh(&dev->lock); 35 if (!list_empty(&dev->txwi_cache)) { 36 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 37 list); 38 list_del(&t->list); 39 } 40 spin_unlock_bh(&dev->lock); 41 42 return t; 43 } 44 45 struct mt76_txwi_cache * 46 mt76_get_txwi(struct mt76_dev *dev) 47 { 48 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 49 50 if (t) 51 return t; 52 53 return mt76_alloc_txwi(dev); 54 } 55 56 void 57 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 58 { 59 if (!t) 60 return; 61 62 spin_lock_bh(&dev->lock); 63 list_add(&t->list, &dev->txwi_cache); 64 spin_unlock_bh(&dev->lock); 65 } 66 EXPORT_SYMBOL_GPL(mt76_put_txwi); 67 68 void mt76_tx_free(struct mt76_dev *dev) 69 { 70 struct mt76_txwi_cache *t; 71 72 while ((t = __mt76_get_txwi(dev)) != NULL) 73 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, 74 DMA_TO_DEVICE); 75 } 76 77 static int 78 mt76_txq_get_qid(struct ieee80211_txq *txq) 79 { 80 if (!txq->sta) 81 return MT_TXQ_BE; 82 83 return txq->ac; 84 } 85 86 static void 87 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) 88 { 89 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 90 91 if (!ieee80211_is_data_qos(hdr->frame_control) || 92 !ieee80211_is_data_present(hdr->frame_control)) 93 return; 94 95 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 96 } 97 98 void 99 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 100 __acquires(&dev->status_list.lock) 101 { 102 __skb_queue_head_init(list); 103 spin_lock_bh(&dev->status_list.lock); 104 } 105 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 106 107 void 108 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 109 __releases(&dev->status_list.lock) 110 { 111 struct ieee80211_hw *hw; 112 struct sk_buff *skb; 113 114 spin_unlock_bh(&dev->status_list.lock); 115 116 while ((skb = __skb_dequeue(list)) != NULL) { 117 hw = mt76_tx_status_get_hw(dev, skb); 118 ieee80211_tx_status(hw, skb); 119 } 120 121 } 122 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 123 124 static void 125 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 126 struct sk_buff_head *list) 127 { 128 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 129 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 130 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 131 132 flags |= cb->flags; 133 cb->flags = flags; 134 135 if ((flags & done) != done) 136 return; 137 138 __skb_unlink(skb, &dev->status_list); 139 140 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 141 if (flags & MT_TX_CB_TXS_FAILED) { 142 ieee80211_tx_info_clear_status(info); 143 info->status.rates[0].idx = -1; 144 info->flags |= IEEE80211_TX_STAT_ACK; 145 } 146 147 __skb_queue_tail(list, skb); 148 } 149 150 void 151 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 152 struct sk_buff_head *list) 153 { 154 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 155 } 156 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 157 158 int 159 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 160 struct sk_buff *skb) 161 { 162 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 163 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 164 int pid; 165 166 if (!wcid) 167 return MT_PACKET_ID_NO_ACK; 168 169 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 170 return MT_PACKET_ID_NO_ACK; 171 172 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 173 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) 174 return MT_PACKET_ID_NO_SKB; 175 176 spin_lock_bh(&dev->status_list.lock); 177 178 memset(cb, 0, sizeof(*cb)); 179 wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; 180 if (wcid->packet_id == MT_PACKET_ID_NO_ACK || 181 wcid->packet_id == MT_PACKET_ID_NO_SKB) 182 wcid->packet_id = MT_PACKET_ID_FIRST; 183 184 pid = wcid->packet_id; 185 cb->wcid = wcid->idx; 186 cb->pktid = pid; 187 cb->jiffies = jiffies; 188 189 __skb_queue_tail(&dev->status_list, skb); 190 spin_unlock_bh(&dev->status_list.lock); 191 192 return pid; 193 } 194 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 195 196 struct sk_buff * 197 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 198 struct sk_buff_head *list) 199 { 200 struct sk_buff *skb, *tmp; 201 202 skb_queue_walk_safe(&dev->status_list, skb, tmp) { 203 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 204 205 if (wcid && cb->wcid != wcid->idx) 206 continue; 207 208 if (cb->pktid == pktid) 209 return skb; 210 211 if (pktid >= 0 && !time_after(jiffies, cb->jiffies + 212 MT_TX_STATUS_SKB_TIMEOUT)) 213 continue; 214 215 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 216 MT_TX_CB_TXS_DONE, list); 217 } 218 219 return NULL; 220 } 221 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 222 223 void 224 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) 225 { 226 struct sk_buff_head list; 227 228 mt76_tx_status_lock(dev, &list); 229 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 230 mt76_tx_status_unlock(dev, &list); 231 } 232 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 233 234 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) 235 { 236 struct ieee80211_hw *hw; 237 struct sk_buff_head list; 238 239 if (!skb->prev) { 240 hw = mt76_tx_status_get_hw(dev, skb); 241 ieee80211_free_txskb(hw, skb); 242 return; 243 } 244 245 mt76_tx_status_lock(dev, &list); 246 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 247 mt76_tx_status_unlock(dev, &list); 248 } 249 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); 250 251 void 252 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, 253 struct mt76_wcid *wcid, struct sk_buff *skb) 254 { 255 struct mt76_dev *dev = phy->dev; 256 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 257 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 258 struct mt76_queue *q; 259 int qid = skb_get_queue_mapping(skb); 260 bool ext_phy = phy != &dev->phy; 261 262 if (WARN_ON(qid >= MT_TXQ_PSD)) { 263 qid = MT_TXQ_BE; 264 skb_set_queue_mapping(skb, qid); 265 } 266 267 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 268 !ieee80211_is_data(hdr->frame_control) && 269 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { 270 qid = MT_TXQ_PSD; 271 skb_set_queue_mapping(skb, qid); 272 } 273 274 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 275 ieee80211_get_tx_rates(info->control.vif, sta, skb, 276 info->control.rates, 1); 277 278 if (sta && ieee80211_is_data_qos(hdr->frame_control)) { 279 struct ieee80211_txq *txq; 280 struct mt76_txq *mtxq; 281 u8 tid; 282 283 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 284 txq = sta->txq[tid]; 285 mtxq = (struct mt76_txq *)txq->drv_priv; 286 287 if (mtxq->aggr) 288 mt76_check_agg_ssn(mtxq, skb); 289 } 290 291 if (ext_phy) 292 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 293 294 q = dev->q_tx[qid].q; 295 296 spin_lock_bh(&q->lock); 297 dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta); 298 dev->queue_ops->kick(dev, q); 299 300 if (q->queued > q->ndesc - 8 && !q->stopped) { 301 ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb)); 302 q->stopped = true; 303 } 304 305 spin_unlock_bh(&q->lock); 306 } 307 EXPORT_SYMBOL_GPL(mt76_tx); 308 309 static struct sk_buff * 310 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps) 311 { 312 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 313 struct ieee80211_tx_info *info; 314 bool ext_phy = phy != &phy->dev->phy; 315 struct sk_buff *skb; 316 317 skb = skb_dequeue(&mtxq->retry_q); 318 if (skb) { 319 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 320 321 if (ps && skb_queue_empty(&mtxq->retry_q)) 322 ieee80211_sta_set_buffered(txq->sta, tid, false); 323 324 return skb; 325 } 326 327 skb = ieee80211_tx_dequeue(phy->hw, txq); 328 if (!skb) 329 return NULL; 330 331 info = IEEE80211_SKB_CB(skb); 332 if (ext_phy) 333 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 334 335 return skb; 336 } 337 338 static void 339 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, 340 struct sk_buff *skb, bool last) 341 { 342 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 343 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 344 345 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 346 if (last) 347 info->flags |= IEEE80211_TX_STATUS_EOSP | 348 IEEE80211_TX_CTL_REQ_TX_STATUS; 349 350 mt76_skb_set_moredata(skb, !last); 351 dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta); 352 } 353 354 void 355 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 356 u16 tids, int nframes, 357 enum ieee80211_frame_release_type reason, 358 bool more_data) 359 { 360 struct mt76_phy *phy = hw->priv; 361 struct mt76_dev *dev = phy->dev; 362 struct sk_buff *last_skb = NULL; 363 struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q; 364 int i; 365 366 spin_lock_bh(&hwq->lock); 367 for (i = 0; tids && nframes; i++, tids >>= 1) { 368 struct ieee80211_txq *txq = sta->txq[i]; 369 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 370 struct sk_buff *skb; 371 372 if (!(tids & 1)) 373 continue; 374 375 do { 376 skb = mt76_txq_dequeue(phy, mtxq, true); 377 if (!skb) 378 break; 379 380 if (mtxq->aggr) 381 mt76_check_agg_ssn(mtxq, skb); 382 383 nframes--; 384 if (last_skb) 385 mt76_queue_ps_skb(dev, sta, last_skb, false); 386 387 last_skb = skb; 388 } while (nframes); 389 } 390 391 if (last_skb) { 392 mt76_queue_ps_skb(dev, sta, last_skb, true); 393 dev->queue_ops->kick(dev, hwq); 394 } else { 395 ieee80211_sta_eosp(sta); 396 } 397 398 spin_unlock_bh(&hwq->lock); 399 } 400 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 401 402 static int 403 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq, 404 struct mt76_txq *mtxq) 405 { 406 struct mt76_dev *dev = phy->dev; 407 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 408 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 409 struct mt76_wcid *wcid = mtxq->wcid; 410 struct mt76_queue *hwq = sq->q; 411 struct ieee80211_tx_info *info; 412 struct sk_buff *skb; 413 int n_frames = 1, limit; 414 struct ieee80211_tx_rate tx_rate; 415 bool ampdu; 416 bool probe; 417 int idx; 418 419 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 420 return 0; 421 422 skb = mt76_txq_dequeue(phy, mtxq, false); 423 if (!skb) 424 return 0; 425 426 info = IEEE80211_SKB_CB(skb); 427 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 428 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 429 info->control.rates, 1); 430 tx_rate = info->control.rates[0]; 431 432 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 433 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; 434 limit = ampdu ? 16 : 3; 435 436 if (ampdu) 437 mt76_check_agg_ssn(mtxq, skb); 438 439 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta); 440 441 if (idx < 0) 442 return idx; 443 444 do { 445 bool cur_ampdu; 446 447 if (probe) 448 break; 449 450 if (test_bit(MT76_RESET, &phy->state)) 451 return -EBUSY; 452 453 skb = mt76_txq_dequeue(phy, mtxq, false); 454 if (!skb) 455 break; 456 457 info = IEEE80211_SKB_CB(skb); 458 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 459 460 if (ampdu != cur_ampdu || 461 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 462 skb_queue_tail(&mtxq->retry_q, skb); 463 break; 464 } 465 466 info->control.rates[0] = tx_rate; 467 468 if (cur_ampdu) 469 mt76_check_agg_ssn(mtxq, skb); 470 471 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, 472 txq->sta); 473 if (idx < 0) 474 return idx; 475 476 n_frames++; 477 } while (n_frames < limit); 478 479 if (!probe) { 480 hwq->entry[idx].qid = sq - dev->q_tx; 481 hwq->entry[idx].schedule = true; 482 sq->swq_queued++; 483 } 484 485 dev->queue_ops->kick(dev, hwq); 486 487 return n_frames; 488 } 489 490 static int 491 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 492 { 493 struct mt76_dev *dev = phy->dev; 494 struct mt76_sw_queue *sq = &dev->q_tx[qid]; 495 struct mt76_queue *hwq = sq->q; 496 struct ieee80211_txq *txq; 497 struct mt76_txq *mtxq; 498 struct mt76_wcid *wcid; 499 int ret = 0; 500 501 spin_lock_bh(&hwq->lock); 502 while (1) { 503 if (sq->swq_queued >= 4) 504 break; 505 506 if (test_bit(MT76_RESET, &phy->state)) { 507 ret = -EBUSY; 508 break; 509 } 510 511 txq = ieee80211_next_txq(phy->hw, qid); 512 if (!txq) 513 break; 514 515 mtxq = (struct mt76_txq *)txq->drv_priv; 516 wcid = mtxq->wcid; 517 if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 518 continue; 519 520 if (mtxq->send_bar && mtxq->aggr) { 521 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 522 struct ieee80211_sta *sta = txq->sta; 523 struct ieee80211_vif *vif = txq->vif; 524 u16 agg_ssn = mtxq->agg_ssn; 525 u8 tid = txq->tid; 526 527 mtxq->send_bar = false; 528 spin_unlock_bh(&hwq->lock); 529 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 530 spin_lock_bh(&hwq->lock); 531 } 532 533 ret += mt76_txq_send_burst(phy, sq, mtxq); 534 ieee80211_return_txq(phy->hw, txq, 535 !skb_queue_empty(&mtxq->retry_q)); 536 } 537 spin_unlock_bh(&hwq->lock); 538 539 return ret; 540 } 541 542 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 543 { 544 struct mt76_dev *dev = phy->dev; 545 struct mt76_sw_queue *sq = &dev->q_tx[qid]; 546 int len; 547 548 if (qid >= 4) 549 return; 550 551 if (sq->swq_queued >= 4) 552 return; 553 554 rcu_read_lock(); 555 556 do { 557 ieee80211_txq_schedule_start(phy->hw, qid); 558 len = mt76_txq_schedule_list(phy, qid); 559 ieee80211_txq_schedule_end(phy->hw, qid); 560 } while (len > 0); 561 562 rcu_read_unlock(); 563 } 564 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 565 566 void mt76_txq_schedule_all(struct mt76_phy *phy) 567 { 568 int i; 569 570 for (i = 0; i <= MT_TXQ_BK; i++) 571 mt76_txq_schedule(phy, i); 572 } 573 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 574 575 void mt76_tx_tasklet(unsigned long data) 576 { 577 struct mt76_dev *dev = (struct mt76_dev *)data; 578 579 mt76_txq_schedule_all(&dev->phy); 580 if (dev->phy2) 581 mt76_txq_schedule_all(dev->phy2); 582 } 583 584 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 585 bool send_bar) 586 { 587 int i; 588 589 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 590 struct ieee80211_txq *txq = sta->txq[i]; 591 struct mt76_queue *hwq; 592 struct mt76_txq *mtxq; 593 594 if (!txq) 595 continue; 596 597 mtxq = (struct mt76_txq *)txq->drv_priv; 598 hwq = mtxq->swq->q; 599 600 spin_lock_bh(&hwq->lock); 601 mtxq->send_bar = mtxq->aggr && send_bar; 602 spin_unlock_bh(&hwq->lock); 603 } 604 } 605 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 606 607 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 608 { 609 struct mt76_phy *phy = hw->priv; 610 struct mt76_dev *dev = phy->dev; 611 612 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) 613 return; 614 615 tasklet_schedule(&dev->tx_tasklet); 616 } 617 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 618 619 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) 620 { 621 struct ieee80211_hw *hw; 622 struct mt76_txq *mtxq; 623 struct sk_buff *skb; 624 625 if (!txq) 626 return; 627 628 mtxq = (struct mt76_txq *)txq->drv_priv; 629 630 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) { 631 hw = mt76_tx_status_get_hw(dev, skb); 632 ieee80211_free_txskb(hw, skb); 633 } 634 } 635 EXPORT_SYMBOL_GPL(mt76_txq_remove); 636 637 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 638 { 639 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 640 641 skb_queue_head_init(&mtxq->retry_q); 642 643 mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)]; 644 } 645 EXPORT_SYMBOL_GPL(mt76_txq_init); 646 647 u8 mt76_ac_to_hwq(u8 ac) 648 { 649 static const u8 wmm_queue_map[] = { 650 [IEEE80211_AC_BE] = 0, 651 [IEEE80211_AC_BK] = 1, 652 [IEEE80211_AC_VI] = 2, 653 [IEEE80211_AC_VO] = 3, 654 }; 655 656 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 657 return 0; 658 659 return wmm_queue_map[ac]; 660 } 661 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 662