1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static int 9 mt76_txq_get_qid(struct ieee80211_txq *txq) 10 { 11 if (!txq->sta) 12 return MT_TXQ_BE; 13 14 return txq->ac; 15 } 16 17 void 18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) 19 { 20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 21 struct ieee80211_txq *txq; 22 struct mt76_txq *mtxq; 23 u8 tid; 24 25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) || 26 !ieee80211_is_data_present(hdr->frame_control)) 27 return; 28 29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 30 txq = sta->txq[tid]; 31 mtxq = (struct mt76_txq *)txq->drv_priv; 32 if (!mtxq->aggr) 33 return; 34 35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 36 } 37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); 38 39 void 40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 41 __acquires(&dev->status_lock) 42 { 43 __skb_queue_head_init(list); 44 spin_lock_bh(&dev->status_lock); 45 } 46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 47 48 void 49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 50 __releases(&dev->status_lock) 51 { 52 struct ieee80211_hw *hw; 53 struct sk_buff *skb; 54 55 spin_unlock_bh(&dev->status_lock); 56 57 rcu_read_lock(); 58 while ((skb = __skb_dequeue(list)) != NULL) { 59 struct ieee80211_tx_status status = { 60 .skb = skb, 61 .info = IEEE80211_SKB_CB(skb), 62 }; 63 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 64 struct mt76_wcid *wcid; 65 66 wcid = rcu_dereference(dev->wcid[cb->wcid]); 67 if (wcid) { 68 status.sta = wcid_to_sta(wcid); 69 status.rates = NULL; 70 status.n_rates = 0; 71 } 72 73 hw = mt76_tx_status_get_hw(dev, skb); 74 ieee80211_tx_status_ext(hw, &status); 75 } 76 rcu_read_unlock(); 77 } 78 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 79 80 static void 81 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 82 struct sk_buff_head *list) 83 { 84 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 85 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 86 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 87 88 flags |= cb->flags; 89 cb->flags = flags; 90 91 if ((flags & done) != done) 92 return; 93 94 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 95 if (flags & MT_TX_CB_TXS_FAILED) { 96 info->status.rates[0].count = 0; 97 info->status.rates[0].idx = -1; 98 info->flags |= IEEE80211_TX_STAT_ACK; 99 } 100 101 __skb_queue_tail(list, skb); 102 } 103 104 void 105 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 106 struct sk_buff_head *list) 107 { 108 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 109 } 110 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 111 112 int 113 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 114 struct sk_buff *skb) 115 { 116 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 117 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 118 int pid; 119 120 memset(cb, 0, sizeof(*cb)); 121 122 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) 123 return MT_PACKET_ID_NO_ACK; 124 125 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 126 return MT_PACKET_ID_NO_ACK; 127 128 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 129 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) 130 return MT_PACKET_ID_NO_SKB; 131 132 spin_lock_bh(&dev->status_lock); 133 134 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, 135 MT_PACKET_ID_MASK, GFP_ATOMIC); 136 if (pid < 0) { 137 pid = MT_PACKET_ID_NO_SKB; 138 goto out; 139 } 140 141 cb->wcid = wcid->idx; 142 cb->pktid = pid; 143 144 if (list_empty(&wcid->list)) 145 list_add_tail(&wcid->list, &dev->wcid_list); 146 147 out: 148 spin_unlock_bh(&dev->status_lock); 149 150 return pid; 151 } 152 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 153 154 struct sk_buff * 155 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 156 struct sk_buff_head *list) 157 { 158 struct sk_buff *skb; 159 int id; 160 161 lockdep_assert_held(&dev->status_lock); 162 163 skb = idr_remove(&wcid->pktid, pktid); 164 if (skb) 165 goto out; 166 167 /* look for stale entries in the wcid idr queue */ 168 idr_for_each_entry(&wcid->pktid, skb, id) { 169 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 170 171 if (pktid >= 0) { 172 if (!(cb->flags & MT_TX_CB_DMA_DONE)) 173 continue; 174 175 if (time_is_after_jiffies(cb->jiffies + 176 MT_TX_STATUS_SKB_TIMEOUT)) 177 continue; 178 } 179 180 /* It has been too long since DMA_DONE, time out this packet 181 * and stop waiting for TXS callback. 182 */ 183 idr_remove(&wcid->pktid, cb->pktid); 184 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 185 MT_TX_CB_TXS_DONE, list); 186 } 187 188 out: 189 if (idr_is_empty(&wcid->pktid)) 190 list_del_init(&wcid->list); 191 192 return skb; 193 } 194 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 195 196 void 197 mt76_tx_status_check(struct mt76_dev *dev, bool flush) 198 { 199 struct mt76_wcid *wcid, *tmp; 200 struct sk_buff_head list; 201 202 mt76_tx_status_lock(dev, &list); 203 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) 204 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 205 mt76_tx_status_unlock(dev, &list); 206 } 207 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 208 209 static void 210 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, 211 struct sk_buff *skb) 212 { 213 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 214 int pending; 215 216 if (!wcid || info->tx_time_est) 217 return; 218 219 pending = atomic_dec_return(&wcid->non_aql_packets); 220 if (pending < 0) 221 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 222 } 223 224 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, 225 struct list_head *free_list) 226 { 227 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 228 struct ieee80211_tx_status status = { 229 .skb = skb, 230 .free_list = free_list, 231 }; 232 struct mt76_wcid *wcid = NULL; 233 struct ieee80211_hw *hw; 234 struct sk_buff_head list; 235 236 rcu_read_lock(); 237 238 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 239 wcid = rcu_dereference(dev->wcid[wcid_idx]); 240 241 mt76_tx_check_non_aql(dev, wcid, skb); 242 243 #ifdef CONFIG_NL80211_TESTMODE 244 if (mt76_is_testmode_skb(dev, skb, &hw)) { 245 struct mt76_phy *phy = hw->priv; 246 247 if (skb == phy->test.tx_skb) 248 phy->test.tx_done++; 249 if (phy->test.tx_queued == phy->test.tx_done) 250 wake_up(&dev->tx_wait); 251 252 dev_kfree_skb_any(skb); 253 goto out; 254 } 255 #endif 256 257 if (cb->pktid < MT_PACKET_ID_FIRST) { 258 hw = mt76_tx_status_get_hw(dev, skb); 259 status.sta = wcid_to_sta(wcid); 260 ieee80211_tx_status_ext(hw, &status); 261 goto out; 262 } 263 264 mt76_tx_status_lock(dev, &list); 265 cb->jiffies = jiffies; 266 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 267 mt76_tx_status_unlock(dev, &list); 268 269 out: 270 rcu_read_unlock(); 271 } 272 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb); 273 274 static int 275 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, 276 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 277 bool *stop) 278 { 279 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 280 struct mt76_queue *q = phy->q_tx[qid]; 281 struct mt76_dev *dev = phy->dev; 282 bool non_aql; 283 int pending; 284 int idx; 285 286 non_aql = !info->tx_time_est; 287 idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta); 288 if (idx < 0 || !sta) 289 return idx; 290 291 wcid = (struct mt76_wcid *)sta->drv_priv; 292 q->entry[idx].wcid = wcid->idx; 293 294 if (!non_aql) 295 return idx; 296 297 pending = atomic_inc_return(&wcid->non_aql_packets); 298 if (stop && pending >= MT_MAX_NON_AQL_PKT) 299 *stop = true; 300 301 return idx; 302 } 303 304 void 305 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, 306 struct mt76_wcid *wcid, struct sk_buff *skb) 307 { 308 struct mt76_dev *dev = phy->dev; 309 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 310 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 311 struct mt76_queue *q; 312 int qid = skb_get_queue_mapping(skb); 313 314 if (mt76_testmode_enabled(phy)) { 315 ieee80211_free_txskb(phy->hw, skb); 316 return; 317 } 318 319 if (WARN_ON(qid >= MT_TXQ_PSD)) { 320 qid = MT_TXQ_BE; 321 skb_set_queue_mapping(skb, qid); 322 } 323 324 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 325 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 326 !ieee80211_is_data(hdr->frame_control) && 327 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { 328 qid = MT_TXQ_PSD; 329 } 330 331 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) 332 ieee80211_get_tx_rates(info->control.vif, sta, skb, 333 info->control.rates, 1); 334 335 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 336 q = phy->q_tx[qid]; 337 338 spin_lock_bh(&q->lock); 339 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); 340 dev->queue_ops->kick(dev, q); 341 spin_unlock_bh(&q->lock); 342 } 343 EXPORT_SYMBOL_GPL(mt76_tx); 344 345 static struct sk_buff * 346 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) 347 { 348 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 349 struct ieee80211_tx_info *info; 350 struct sk_buff *skb; 351 352 skb = ieee80211_tx_dequeue(phy->hw, txq); 353 if (!skb) 354 return NULL; 355 356 info = IEEE80211_SKB_CB(skb); 357 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 358 359 return skb; 360 } 361 362 static void 363 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, 364 struct sk_buff *skb, bool last) 365 { 366 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 367 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 368 369 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 370 if (last) 371 info->flags |= IEEE80211_TX_STATUS_EOSP | 372 IEEE80211_TX_CTL_REQ_TX_STATUS; 373 374 mt76_skb_set_moredata(skb, !last); 375 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); 376 } 377 378 void 379 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 380 u16 tids, int nframes, 381 enum ieee80211_frame_release_type reason, 382 bool more_data) 383 { 384 struct mt76_phy *phy = hw->priv; 385 struct mt76_dev *dev = phy->dev; 386 struct sk_buff *last_skb = NULL; 387 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; 388 int i; 389 390 spin_lock_bh(&hwq->lock); 391 for (i = 0; tids && nframes; i++, tids >>= 1) { 392 struct ieee80211_txq *txq = sta->txq[i]; 393 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 394 struct sk_buff *skb; 395 396 if (!(tids & 1)) 397 continue; 398 399 do { 400 skb = mt76_txq_dequeue(phy, mtxq); 401 if (!skb) 402 break; 403 404 nframes--; 405 if (last_skb) 406 mt76_queue_ps_skb(phy, sta, last_skb, false); 407 408 last_skb = skb; 409 } while (nframes); 410 } 411 412 if (last_skb) { 413 mt76_queue_ps_skb(phy, sta, last_skb, true); 414 dev->queue_ops->kick(dev, hwq); 415 } else { 416 ieee80211_sta_eosp(sta); 417 } 418 419 spin_unlock_bh(&hwq->lock); 420 } 421 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 422 423 static bool 424 mt76_txq_stopped(struct mt76_queue *q) 425 { 426 return q->stopped || q->blocked || 427 q->queued + MT_TXQ_FREE_THR >= q->ndesc; 428 } 429 430 static int 431 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, 432 struct mt76_txq *mtxq, struct mt76_wcid *wcid) 433 { 434 struct mt76_dev *dev = phy->dev; 435 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 436 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 437 struct ieee80211_tx_info *info; 438 struct sk_buff *skb; 439 int n_frames = 1; 440 bool stop = false; 441 int idx; 442 443 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 444 return 0; 445 446 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) 447 return 0; 448 449 skb = mt76_txq_dequeue(phy, mtxq); 450 if (!skb) 451 return 0; 452 453 info = IEEE80211_SKB_CB(skb); 454 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 455 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 456 info->control.rates, 1); 457 458 spin_lock(&q->lock); 459 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 460 spin_unlock(&q->lock); 461 if (idx < 0) 462 return idx; 463 464 do { 465 if (test_bit(MT76_RESET, &phy->state)) 466 return -EBUSY; 467 468 if (stop || mt76_txq_stopped(q)) 469 break; 470 471 skb = mt76_txq_dequeue(phy, mtxq); 472 if (!skb) 473 break; 474 475 info = IEEE80211_SKB_CB(skb); 476 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 477 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 478 info->control.rates, 1); 479 480 spin_lock(&q->lock); 481 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 482 spin_unlock(&q->lock); 483 if (idx < 0) 484 break; 485 486 n_frames++; 487 } while (1); 488 489 spin_lock(&q->lock); 490 dev->queue_ops->kick(dev, q); 491 spin_unlock(&q->lock); 492 493 return n_frames; 494 } 495 496 static int 497 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 498 { 499 struct mt76_queue *q = phy->q_tx[qid]; 500 struct mt76_dev *dev = phy->dev; 501 struct ieee80211_txq *txq; 502 struct mt76_txq *mtxq; 503 struct mt76_wcid *wcid; 504 int ret = 0; 505 506 while (1) { 507 int n_frames = 0; 508 509 if (test_bit(MT76_RESET, &phy->state)) 510 return -EBUSY; 511 512 if (dev->queue_ops->tx_cleanup && 513 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { 514 dev->queue_ops->tx_cleanup(dev, q, false); 515 } 516 517 txq = ieee80211_next_txq(phy->hw, qid); 518 if (!txq) 519 break; 520 521 mtxq = (struct mt76_txq *)txq->drv_priv; 522 wcid = rcu_dereference(dev->wcid[mtxq->wcid]); 523 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 524 continue; 525 526 if (mtxq->send_bar && mtxq->aggr) { 527 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 528 struct ieee80211_sta *sta = txq->sta; 529 struct ieee80211_vif *vif = txq->vif; 530 u16 agg_ssn = mtxq->agg_ssn; 531 u8 tid = txq->tid; 532 533 mtxq->send_bar = false; 534 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 535 } 536 537 if (!mt76_txq_stopped(q)) 538 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); 539 540 ieee80211_return_txq(phy->hw, txq, false); 541 542 if (unlikely(n_frames < 0)) 543 return n_frames; 544 545 ret += n_frames; 546 } 547 548 return ret; 549 } 550 551 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 552 { 553 int len; 554 555 if (qid >= 4) 556 return; 557 558 local_bh_disable(); 559 rcu_read_lock(); 560 561 do { 562 ieee80211_txq_schedule_start(phy->hw, qid); 563 len = mt76_txq_schedule_list(phy, qid); 564 ieee80211_txq_schedule_end(phy->hw, qid); 565 } while (len > 0); 566 567 rcu_read_unlock(); 568 local_bh_enable(); 569 } 570 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 571 572 void mt76_txq_schedule_all(struct mt76_phy *phy) 573 { 574 int i; 575 576 for (i = 0; i <= MT_TXQ_BK; i++) 577 mt76_txq_schedule(phy, i); 578 } 579 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 580 581 void mt76_tx_worker_run(struct mt76_dev *dev) 582 { 583 struct mt76_phy *phy; 584 int i; 585 586 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 587 phy = dev->phys[i]; 588 if (!phy) 589 continue; 590 591 mt76_txq_schedule_all(phy); 592 } 593 594 #ifdef CONFIG_NL80211_TESTMODE 595 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 596 phy = dev->phys[i]; 597 if (!phy || !phy->test.tx_pending) 598 continue; 599 600 mt76_testmode_tx_pending(phy); 601 } 602 #endif 603 } 604 EXPORT_SYMBOL_GPL(mt76_tx_worker_run); 605 606 void mt76_tx_worker(struct mt76_worker *w) 607 { 608 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 609 610 mt76_tx_worker_run(dev); 611 } 612 613 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 614 bool send_bar) 615 { 616 int i; 617 618 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 619 struct ieee80211_txq *txq = sta->txq[i]; 620 struct mt76_queue *hwq; 621 struct mt76_txq *mtxq; 622 623 if (!txq) 624 continue; 625 626 hwq = phy->q_tx[mt76_txq_get_qid(txq)]; 627 mtxq = (struct mt76_txq *)txq->drv_priv; 628 629 spin_lock_bh(&hwq->lock); 630 mtxq->send_bar = mtxq->aggr && send_bar; 631 spin_unlock_bh(&hwq->lock); 632 } 633 } 634 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 635 636 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 637 { 638 struct mt76_phy *phy = hw->priv; 639 struct mt76_dev *dev = phy->dev; 640 641 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) 642 return; 643 644 mt76_worker_schedule(&dev->tx_worker); 645 } 646 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 647 648 u8 mt76_ac_to_hwq(u8 ac) 649 { 650 static const u8 wmm_queue_map[] = { 651 [IEEE80211_AC_BE] = 0, 652 [IEEE80211_AC_BK] = 1, 653 [IEEE80211_AC_VI] = 2, 654 [IEEE80211_AC_VO] = 3, 655 }; 656 657 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 658 return 0; 659 660 return wmm_queue_map[ac]; 661 } 662 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 663 664 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) 665 { 666 struct sk_buff *iter, *last = skb; 667 668 /* First packet of a A-MSDU burst keeps track of the whole burst 669 * length, need to update length of it and the last packet. 670 */ 671 skb_walk_frags(skb, iter) { 672 last = iter; 673 if (!iter->next) { 674 skb->data_len += pad; 675 skb->len += pad; 676 break; 677 } 678 } 679 680 if (skb_pad(last, pad)) 681 return -ENOMEM; 682 683 __skb_put(last, pad); 684 685 return 0; 686 } 687 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); 688 689 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 690 struct mt76_queue_entry *e) 691 { 692 if (e->skb) 693 dev->drv->tx_complete_skb(dev, e); 694 695 spin_lock_bh(&q->lock); 696 q->tail = (q->tail + 1) % q->ndesc; 697 q->queued--; 698 spin_unlock_bh(&q->lock); 699 } 700 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); 701 702 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 703 { 704 struct mt76_phy *phy = &dev->phy; 705 struct mt76_queue *q = phy->q_tx[0]; 706 707 if (blocked == q->blocked) 708 return; 709 710 q->blocked = blocked; 711 712 phy = dev->phys[MT_BAND1]; 713 if (phy) { 714 q = phy->q_tx[0]; 715 q->blocked = blocked; 716 } 717 phy = dev->phys[MT_BAND2]; 718 if (phy) { 719 q = phy->q_tx[0]; 720 q->blocked = blocked; 721 } 722 723 if (!blocked) 724 mt76_worker_schedule(&dev->tx_worker); 725 } 726 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); 727 728 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 729 { 730 int token; 731 732 spin_lock_bh(&dev->token_lock); 733 734 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 735 if (token >= 0) 736 dev->token_count++; 737 738 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 739 if (mtk_wed_device_active(&dev->mmio.wed) && 740 token >= dev->mmio.wed.wlan.token_start) 741 dev->wed_token_count++; 742 #endif 743 744 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR) 745 __mt76_set_tx_blocked(dev, true); 746 747 spin_unlock_bh(&dev->token_lock); 748 749 return token; 750 } 751 EXPORT_SYMBOL_GPL(mt76_token_consume); 752 753 struct mt76_txwi_cache * 754 mt76_token_release(struct mt76_dev *dev, int token, bool *wake) 755 { 756 struct mt76_txwi_cache *txwi; 757 758 spin_lock_bh(&dev->token_lock); 759 760 txwi = idr_remove(&dev->token, token); 761 if (txwi) { 762 dev->token_count--; 763 764 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 765 if (mtk_wed_device_active(&dev->mmio.wed) && 766 token >= dev->mmio.wed.wlan.token_start && 767 --dev->wed_token_count == 0) 768 wake_up(&dev->tx_wait); 769 #endif 770 } 771 772 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR && 773 dev->phy.q_tx[0]->blocked) 774 *wake = true; 775 776 spin_unlock_bh(&dev->token_lock); 777 778 return txwi; 779 } 780 EXPORT_SYMBOL_GPL(mt76_token_release); 781