1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static int 9 mt76_txq_get_qid(struct ieee80211_txq *txq) 10 { 11 if (!txq->sta) 12 return MT_TXQ_BE; 13 14 return txq->ac; 15 } 16 17 void 18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) 19 { 20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 21 struct ieee80211_txq *txq; 22 struct mt76_txq *mtxq; 23 u8 tid; 24 25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) || 26 !ieee80211_is_data_present(hdr->frame_control)) 27 return; 28 29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 30 txq = sta->txq[tid]; 31 mtxq = (struct mt76_txq *)txq->drv_priv; 32 if (!mtxq->aggr) 33 return; 34 35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 36 } 37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); 38 39 void 40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 41 __acquires(&dev->status_lock) 42 { 43 __skb_queue_head_init(list); 44 spin_lock_bh(&dev->status_lock); 45 } 46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 47 48 void 49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 50 __releases(&dev->status_lock) 51 { 52 struct ieee80211_hw *hw; 53 struct sk_buff *skb; 54 55 spin_unlock_bh(&dev->status_lock); 56 57 rcu_read_lock(); 58 while ((skb = __skb_dequeue(list)) != NULL) { 59 struct ieee80211_tx_status status = { 60 .skb = skb, 61 .info = IEEE80211_SKB_CB(skb), 62 }; 63 struct ieee80211_rate_status rs = {}; 64 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 65 struct mt76_wcid *wcid; 66 67 wcid = rcu_dereference(dev->wcid[cb->wcid]); 68 if (wcid) { 69 status.sta = wcid_to_sta(wcid); 70 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { 71 rs.rate_idx = wcid->rate; 72 status.rates = &rs; 73 status.n_rates = 1; 74 } else { 75 status.n_rates = 0; 76 } 77 } 78 79 hw = mt76_tx_status_get_hw(dev, skb); 80 spin_lock_bh(&dev->rx_lock); 81 ieee80211_tx_status_ext(hw, &status); 82 spin_unlock_bh(&dev->rx_lock); 83 } 84 rcu_read_unlock(); 85 } 86 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 87 88 static void 89 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 90 struct sk_buff_head *list) 91 { 92 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 93 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 94 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 95 96 flags |= cb->flags; 97 cb->flags = flags; 98 99 if ((flags & done) != done) 100 return; 101 102 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 103 if (flags & MT_TX_CB_TXS_FAILED) { 104 info->status.rates[0].count = 0; 105 info->status.rates[0].idx = -1; 106 info->flags |= IEEE80211_TX_STAT_ACK; 107 } 108 109 __skb_queue_tail(list, skb); 110 } 111 112 void 113 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 114 struct sk_buff_head *list) 115 { 116 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 117 } 118 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 119 120 int 121 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 122 struct sk_buff *skb) 123 { 124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 125 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 126 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 127 int pid; 128 129 memset(cb, 0, sizeof(*cb)); 130 131 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) 132 return MT_PACKET_ID_NO_ACK; 133 134 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 135 return MT_PACKET_ID_NO_ACK; 136 137 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 138 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) { 139 if (mtk_wed_device_active(&dev->mmio.wed) && 140 ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) || 141 ieee80211_is_data(hdr->frame_control))) 142 return MT_PACKET_ID_WED; 143 144 return MT_PACKET_ID_NO_SKB; 145 } 146 147 spin_lock_bh(&dev->status_lock); 148 149 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, 150 MT_PACKET_ID_MASK, GFP_ATOMIC); 151 if (pid < 0) { 152 pid = MT_PACKET_ID_NO_SKB; 153 goto out; 154 } 155 156 cb->wcid = wcid->idx; 157 cb->pktid = pid; 158 159 if (list_empty(&wcid->list)) 160 list_add_tail(&wcid->list, &dev->wcid_list); 161 162 out: 163 spin_unlock_bh(&dev->status_lock); 164 165 return pid; 166 } 167 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 168 169 struct sk_buff * 170 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 171 struct sk_buff_head *list) 172 { 173 struct sk_buff *skb; 174 int id; 175 176 lockdep_assert_held(&dev->status_lock); 177 178 skb = idr_remove(&wcid->pktid, pktid); 179 if (skb) 180 goto out; 181 182 /* look for stale entries in the wcid idr queue */ 183 idr_for_each_entry(&wcid->pktid, skb, id) { 184 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 185 186 if (pktid >= 0) { 187 if (!(cb->flags & MT_TX_CB_DMA_DONE)) 188 continue; 189 190 if (time_is_after_jiffies(cb->jiffies + 191 MT_TX_STATUS_SKB_TIMEOUT)) 192 continue; 193 } 194 195 /* It has been too long since DMA_DONE, time out this packet 196 * and stop waiting for TXS callback. 197 */ 198 idr_remove(&wcid->pktid, cb->pktid); 199 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 200 MT_TX_CB_TXS_DONE, list); 201 } 202 203 out: 204 if (idr_is_empty(&wcid->pktid)) 205 list_del_init(&wcid->list); 206 207 return skb; 208 } 209 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 210 211 void 212 mt76_tx_status_check(struct mt76_dev *dev, bool flush) 213 { 214 struct mt76_wcid *wcid, *tmp; 215 struct sk_buff_head list; 216 217 mt76_tx_status_lock(dev, &list); 218 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) 219 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 220 mt76_tx_status_unlock(dev, &list); 221 } 222 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 223 224 static void 225 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, 226 struct sk_buff *skb) 227 { 228 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 229 int pending; 230 231 if (!wcid || info->tx_time_est) 232 return; 233 234 pending = atomic_dec_return(&wcid->non_aql_packets); 235 if (pending < 0) 236 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 237 } 238 239 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, 240 struct list_head *free_list) 241 { 242 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 243 struct ieee80211_tx_status status = { 244 .skb = skb, 245 .free_list = free_list, 246 }; 247 struct mt76_wcid *wcid = NULL; 248 struct ieee80211_hw *hw; 249 struct sk_buff_head list; 250 251 rcu_read_lock(); 252 253 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 254 wcid = rcu_dereference(dev->wcid[wcid_idx]); 255 256 mt76_tx_check_non_aql(dev, wcid, skb); 257 258 #ifdef CONFIG_NL80211_TESTMODE 259 if (mt76_is_testmode_skb(dev, skb, &hw)) { 260 struct mt76_phy *phy = hw->priv; 261 262 if (skb == phy->test.tx_skb) 263 phy->test.tx_done++; 264 if (phy->test.tx_queued == phy->test.tx_done) 265 wake_up(&dev->tx_wait); 266 267 dev_kfree_skb_any(skb); 268 goto out; 269 } 270 #endif 271 272 if (cb->pktid < MT_PACKET_ID_FIRST) { 273 struct ieee80211_rate_status rs = {}; 274 275 hw = mt76_tx_status_get_hw(dev, skb); 276 status.sta = wcid_to_sta(wcid); 277 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { 278 rs.rate_idx = wcid->rate; 279 status.rates = &rs; 280 status.n_rates = 1; 281 } 282 spin_lock_bh(&dev->rx_lock); 283 ieee80211_tx_status_ext(hw, &status); 284 spin_unlock_bh(&dev->rx_lock); 285 goto out; 286 } 287 288 mt76_tx_status_lock(dev, &list); 289 cb->jiffies = jiffies; 290 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 291 mt76_tx_status_unlock(dev, &list); 292 293 out: 294 rcu_read_unlock(); 295 } 296 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb); 297 298 static int 299 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, 300 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 301 bool *stop) 302 { 303 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 304 struct mt76_queue *q = phy->q_tx[qid]; 305 struct mt76_dev *dev = phy->dev; 306 bool non_aql; 307 int pending; 308 int idx; 309 310 non_aql = !info->tx_time_est; 311 idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta); 312 if (idx < 0 || !sta) 313 return idx; 314 315 wcid = (struct mt76_wcid *)sta->drv_priv; 316 q->entry[idx].wcid = wcid->idx; 317 318 if (!non_aql) 319 return idx; 320 321 pending = atomic_inc_return(&wcid->non_aql_packets); 322 if (stop && pending >= MT_MAX_NON_AQL_PKT) 323 *stop = true; 324 325 return idx; 326 } 327 328 void 329 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, 330 struct mt76_wcid *wcid, struct sk_buff *skb) 331 { 332 struct mt76_dev *dev = phy->dev; 333 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 334 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 335 struct mt76_queue *q; 336 int qid = skb_get_queue_mapping(skb); 337 338 if (mt76_testmode_enabled(phy)) { 339 ieee80211_free_txskb(phy->hw, skb); 340 return; 341 } 342 343 if (WARN_ON(qid >= MT_TXQ_PSD)) { 344 qid = MT_TXQ_BE; 345 skb_set_queue_mapping(skb, qid); 346 } 347 348 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 349 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 350 !ieee80211_is_data(hdr->frame_control) && 351 !ieee80211_is_bufferable_mmpdu(skb)) { 352 qid = MT_TXQ_PSD; 353 } 354 355 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) 356 ieee80211_get_tx_rates(info->control.vif, sta, skb, 357 info->control.rates, 1); 358 359 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 360 q = phy->q_tx[qid]; 361 362 spin_lock_bh(&q->lock); 363 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); 364 dev->queue_ops->kick(dev, q); 365 spin_unlock_bh(&q->lock); 366 } 367 EXPORT_SYMBOL_GPL(mt76_tx); 368 369 static struct sk_buff * 370 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) 371 { 372 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 373 struct ieee80211_tx_info *info; 374 struct sk_buff *skb; 375 376 skb = ieee80211_tx_dequeue(phy->hw, txq); 377 if (!skb) 378 return NULL; 379 380 info = IEEE80211_SKB_CB(skb); 381 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 382 383 return skb; 384 } 385 386 static void 387 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, 388 struct sk_buff *skb, bool last) 389 { 390 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 391 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 392 393 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 394 if (last) 395 info->flags |= IEEE80211_TX_STATUS_EOSP | 396 IEEE80211_TX_CTL_REQ_TX_STATUS; 397 398 mt76_skb_set_moredata(skb, !last); 399 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); 400 } 401 402 void 403 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 404 u16 tids, int nframes, 405 enum ieee80211_frame_release_type reason, 406 bool more_data) 407 { 408 struct mt76_phy *phy = hw->priv; 409 struct mt76_dev *dev = phy->dev; 410 struct sk_buff *last_skb = NULL; 411 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; 412 int i; 413 414 spin_lock_bh(&hwq->lock); 415 for (i = 0; tids && nframes; i++, tids >>= 1) { 416 struct ieee80211_txq *txq = sta->txq[i]; 417 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 418 struct sk_buff *skb; 419 420 if (!(tids & 1)) 421 continue; 422 423 do { 424 skb = mt76_txq_dequeue(phy, mtxq); 425 if (!skb) 426 break; 427 428 nframes--; 429 if (last_skb) 430 mt76_queue_ps_skb(phy, sta, last_skb, false); 431 432 last_skb = skb; 433 } while (nframes); 434 } 435 436 if (last_skb) { 437 mt76_queue_ps_skb(phy, sta, last_skb, true); 438 dev->queue_ops->kick(dev, hwq); 439 } else { 440 ieee80211_sta_eosp(sta); 441 } 442 443 spin_unlock_bh(&hwq->lock); 444 } 445 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 446 447 static bool 448 mt76_txq_stopped(struct mt76_queue *q) 449 { 450 return q->stopped || q->blocked || 451 q->queued + MT_TXQ_FREE_THR >= q->ndesc; 452 } 453 454 static int 455 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, 456 struct mt76_txq *mtxq, struct mt76_wcid *wcid) 457 { 458 struct mt76_dev *dev = phy->dev; 459 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 460 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 461 struct ieee80211_tx_info *info; 462 struct sk_buff *skb; 463 int n_frames = 1; 464 bool stop = false; 465 int idx; 466 467 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 468 return 0; 469 470 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) 471 return 0; 472 473 skb = mt76_txq_dequeue(phy, mtxq); 474 if (!skb) 475 return 0; 476 477 info = IEEE80211_SKB_CB(skb); 478 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 479 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 480 info->control.rates, 1); 481 482 spin_lock(&q->lock); 483 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 484 spin_unlock(&q->lock); 485 if (idx < 0) 486 return idx; 487 488 do { 489 if (test_bit(MT76_RESET, &phy->state)) 490 return -EBUSY; 491 492 if (stop || mt76_txq_stopped(q)) 493 break; 494 495 skb = mt76_txq_dequeue(phy, mtxq); 496 if (!skb) 497 break; 498 499 info = IEEE80211_SKB_CB(skb); 500 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 501 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 502 info->control.rates, 1); 503 504 spin_lock(&q->lock); 505 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 506 spin_unlock(&q->lock); 507 if (idx < 0) 508 break; 509 510 n_frames++; 511 } while (1); 512 513 spin_lock(&q->lock); 514 dev->queue_ops->kick(dev, q); 515 spin_unlock(&q->lock); 516 517 return n_frames; 518 } 519 520 static int 521 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 522 { 523 struct mt76_queue *q = phy->q_tx[qid]; 524 struct mt76_dev *dev = phy->dev; 525 struct ieee80211_txq *txq; 526 struct mt76_txq *mtxq; 527 struct mt76_wcid *wcid; 528 int ret = 0; 529 530 while (1) { 531 int n_frames = 0; 532 533 if (test_bit(MT76_RESET, &phy->state)) 534 return -EBUSY; 535 536 if (dev->queue_ops->tx_cleanup && 537 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { 538 dev->queue_ops->tx_cleanup(dev, q, false); 539 } 540 541 txq = ieee80211_next_txq(phy->hw, qid); 542 if (!txq) 543 break; 544 545 mtxq = (struct mt76_txq *)txq->drv_priv; 546 wcid = rcu_dereference(dev->wcid[mtxq->wcid]); 547 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 548 continue; 549 550 if (mtxq->send_bar && mtxq->aggr) { 551 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 552 struct ieee80211_sta *sta = txq->sta; 553 struct ieee80211_vif *vif = txq->vif; 554 u16 agg_ssn = mtxq->agg_ssn; 555 u8 tid = txq->tid; 556 557 mtxq->send_bar = false; 558 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 559 } 560 561 if (!mt76_txq_stopped(q)) 562 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); 563 564 ieee80211_return_txq(phy->hw, txq, false); 565 566 if (unlikely(n_frames < 0)) 567 return n_frames; 568 569 ret += n_frames; 570 } 571 572 return ret; 573 } 574 575 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 576 { 577 int len; 578 579 if (qid >= 4) 580 return; 581 582 local_bh_disable(); 583 rcu_read_lock(); 584 585 do { 586 ieee80211_txq_schedule_start(phy->hw, qid); 587 len = mt76_txq_schedule_list(phy, qid); 588 ieee80211_txq_schedule_end(phy->hw, qid); 589 } while (len > 0); 590 591 rcu_read_unlock(); 592 local_bh_enable(); 593 } 594 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 595 596 void mt76_txq_schedule_all(struct mt76_phy *phy) 597 { 598 int i; 599 600 for (i = 0; i <= MT_TXQ_BK; i++) 601 mt76_txq_schedule(phy, i); 602 } 603 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 604 605 void mt76_tx_worker_run(struct mt76_dev *dev) 606 { 607 struct mt76_phy *phy; 608 int i; 609 610 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 611 phy = dev->phys[i]; 612 if (!phy) 613 continue; 614 615 mt76_txq_schedule_all(phy); 616 } 617 618 #ifdef CONFIG_NL80211_TESTMODE 619 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 620 phy = dev->phys[i]; 621 if (!phy || !phy->test.tx_pending) 622 continue; 623 624 mt76_testmode_tx_pending(phy); 625 } 626 #endif 627 } 628 EXPORT_SYMBOL_GPL(mt76_tx_worker_run); 629 630 void mt76_tx_worker(struct mt76_worker *w) 631 { 632 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 633 634 mt76_tx_worker_run(dev); 635 } 636 637 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 638 bool send_bar) 639 { 640 int i; 641 642 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 643 struct ieee80211_txq *txq = sta->txq[i]; 644 struct mt76_queue *hwq; 645 struct mt76_txq *mtxq; 646 647 if (!txq) 648 continue; 649 650 hwq = phy->q_tx[mt76_txq_get_qid(txq)]; 651 mtxq = (struct mt76_txq *)txq->drv_priv; 652 653 spin_lock_bh(&hwq->lock); 654 mtxq->send_bar = mtxq->aggr && send_bar; 655 spin_unlock_bh(&hwq->lock); 656 } 657 } 658 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 659 660 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 661 { 662 struct mt76_phy *phy = hw->priv; 663 struct mt76_dev *dev = phy->dev; 664 665 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) 666 return; 667 668 mt76_worker_schedule(&dev->tx_worker); 669 } 670 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 671 672 u8 mt76_ac_to_hwq(u8 ac) 673 { 674 static const u8 wmm_queue_map[] = { 675 [IEEE80211_AC_BE] = 0, 676 [IEEE80211_AC_BK] = 1, 677 [IEEE80211_AC_VI] = 2, 678 [IEEE80211_AC_VO] = 3, 679 }; 680 681 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 682 return 0; 683 684 return wmm_queue_map[ac]; 685 } 686 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 687 688 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) 689 { 690 struct sk_buff *iter, *last = skb; 691 692 /* First packet of a A-MSDU burst keeps track of the whole burst 693 * length, need to update length of it and the last packet. 694 */ 695 skb_walk_frags(skb, iter) { 696 last = iter; 697 if (!iter->next) { 698 skb->data_len += pad; 699 skb->len += pad; 700 break; 701 } 702 } 703 704 if (skb_pad(last, pad)) 705 return -ENOMEM; 706 707 __skb_put(last, pad); 708 709 return 0; 710 } 711 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); 712 713 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 714 struct mt76_queue_entry *e) 715 { 716 if (e->skb) 717 dev->drv->tx_complete_skb(dev, e); 718 719 spin_lock_bh(&q->lock); 720 q->tail = (q->tail + 1) % q->ndesc; 721 q->queued--; 722 spin_unlock_bh(&q->lock); 723 } 724 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); 725 726 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 727 { 728 struct mt76_phy *phy = &dev->phy; 729 struct mt76_queue *q = phy->q_tx[0]; 730 731 if (blocked == q->blocked) 732 return; 733 734 q->blocked = blocked; 735 736 phy = dev->phys[MT_BAND1]; 737 if (phy) { 738 q = phy->q_tx[0]; 739 q->blocked = blocked; 740 } 741 phy = dev->phys[MT_BAND2]; 742 if (phy) { 743 q = phy->q_tx[0]; 744 q->blocked = blocked; 745 } 746 747 if (!blocked) 748 mt76_worker_schedule(&dev->tx_worker); 749 } 750 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); 751 752 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 753 { 754 int token; 755 756 spin_lock_bh(&dev->token_lock); 757 758 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 759 if (token >= 0) 760 dev->token_count++; 761 762 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 763 if (mtk_wed_device_active(&dev->mmio.wed) && 764 token >= dev->mmio.wed.wlan.token_start) 765 dev->wed_token_count++; 766 #endif 767 768 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR) 769 __mt76_set_tx_blocked(dev, true); 770 771 spin_unlock_bh(&dev->token_lock); 772 773 return token; 774 } 775 EXPORT_SYMBOL_GPL(mt76_token_consume); 776 777 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, 778 struct mt76_txwi_cache *t, dma_addr_t phys) 779 { 780 int token; 781 782 spin_lock_bh(&dev->rx_token_lock); 783 token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, 784 GFP_ATOMIC); 785 if (token >= 0) { 786 t->ptr = ptr; 787 t->dma_addr = phys; 788 } 789 spin_unlock_bh(&dev->rx_token_lock); 790 791 return token; 792 } 793 EXPORT_SYMBOL_GPL(mt76_rx_token_consume); 794 795 struct mt76_txwi_cache * 796 mt76_token_release(struct mt76_dev *dev, int token, bool *wake) 797 { 798 struct mt76_txwi_cache *txwi; 799 800 spin_lock_bh(&dev->token_lock); 801 802 txwi = idr_remove(&dev->token, token); 803 if (txwi) { 804 dev->token_count--; 805 806 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 807 if (mtk_wed_device_active(&dev->mmio.wed) && 808 token >= dev->mmio.wed.wlan.token_start && 809 --dev->wed_token_count == 0) 810 wake_up(&dev->tx_wait); 811 #endif 812 } 813 814 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR && 815 dev->phy.q_tx[0]->blocked) 816 *wake = true; 817 818 spin_unlock_bh(&dev->token_lock); 819 820 return txwi; 821 } 822 EXPORT_SYMBOL_GPL(mt76_token_release); 823 824 struct mt76_txwi_cache * 825 mt76_rx_token_release(struct mt76_dev *dev, int token) 826 { 827 struct mt76_txwi_cache *t; 828 829 spin_lock_bh(&dev->rx_token_lock); 830 t = idr_remove(&dev->rx_token, token); 831 spin_unlock_bh(&dev->rx_token_lock); 832 833 return t; 834 } 835 EXPORT_SYMBOL_GPL(mt76_rx_token_release); 836