1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static int 9 mt76_txq_get_qid(struct ieee80211_txq *txq) 10 { 11 if (!txq->sta) 12 return MT_TXQ_BE; 13 14 return txq->ac; 15 } 16 17 void 18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) 19 { 20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 21 struct ieee80211_txq *txq; 22 struct mt76_txq *mtxq; 23 u8 tid; 24 25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) || 26 !ieee80211_is_data_present(hdr->frame_control)) 27 return; 28 29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 30 txq = sta->txq[tid]; 31 mtxq = (struct mt76_txq *)txq->drv_priv; 32 if (!mtxq->aggr) 33 return; 34 35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 36 } 37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); 38 39 void 40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 41 __acquires(&dev->status_list.lock) 42 { 43 __skb_queue_head_init(list); 44 spin_lock_bh(&dev->status_list.lock); 45 } 46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 47 48 void 49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 50 __releases(&dev->status_list.lock) 51 { 52 struct ieee80211_hw *hw; 53 struct sk_buff *skb; 54 55 spin_unlock_bh(&dev->status_list.lock); 56 57 while ((skb = __skb_dequeue(list)) != NULL) { 58 hw = mt76_tx_status_get_hw(dev, skb); 59 ieee80211_tx_status(hw, skb); 60 } 61 62 } 63 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 64 65 static void 66 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 67 struct sk_buff_head *list) 68 { 69 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 70 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 71 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 72 73 flags |= cb->flags; 74 cb->flags = flags; 75 76 if ((flags & done) != done) 77 return; 78 79 __skb_unlink(skb, &dev->status_list); 80 81 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 82 if (flags & MT_TX_CB_TXS_FAILED) { 83 ieee80211_tx_info_clear_status(info); 84 info->status.rates[0].idx = -1; 85 info->flags |= IEEE80211_TX_STAT_ACK; 86 } 87 88 __skb_queue_tail(list, skb); 89 } 90 91 void 92 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 93 struct sk_buff_head *list) 94 { 95 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 96 } 97 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 98 99 int 100 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 101 struct sk_buff *skb) 102 { 103 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 104 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 105 int pid; 106 107 if (!wcid) 108 return MT_PACKET_ID_NO_ACK; 109 110 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 111 return MT_PACKET_ID_NO_ACK; 112 113 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 114 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) 115 return MT_PACKET_ID_NO_SKB; 116 117 spin_lock_bh(&dev->status_list.lock); 118 119 memset(cb, 0, sizeof(*cb)); 120 wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; 121 if (wcid->packet_id == MT_PACKET_ID_NO_ACK || 122 wcid->packet_id == MT_PACKET_ID_NO_SKB) 123 wcid->packet_id = MT_PACKET_ID_FIRST; 124 125 pid = wcid->packet_id; 126 cb->wcid = wcid->idx; 127 cb->pktid = pid; 128 cb->jiffies = jiffies; 129 130 __skb_queue_tail(&dev->status_list, skb); 131 spin_unlock_bh(&dev->status_list.lock); 132 133 return pid; 134 } 135 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 136 137 struct sk_buff * 138 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 139 struct sk_buff_head *list) 140 { 141 struct sk_buff *skb, *tmp; 142 143 skb_queue_walk_safe(&dev->status_list, skb, tmp) { 144 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 145 146 if (wcid && cb->wcid != wcid->idx) 147 continue; 148 149 if (cb->pktid == pktid) 150 return skb; 151 152 if (pktid >= 0 && !time_after(jiffies, cb->jiffies + 153 MT_TX_STATUS_SKB_TIMEOUT)) 154 continue; 155 156 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 157 MT_TX_CB_TXS_DONE, list); 158 } 159 160 return NULL; 161 } 162 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 163 164 void 165 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) 166 { 167 struct sk_buff_head list; 168 169 mt76_tx_status_lock(dev, &list); 170 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 171 mt76_tx_status_unlock(dev, &list); 172 } 173 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 174 175 static void 176 mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb) 177 { 178 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 179 struct mt76_wcid *wcid; 180 int pending; 181 182 if (info->tx_time_est) 183 return; 184 185 if (wcid_idx >= ARRAY_SIZE(dev->wcid)) 186 return; 187 188 rcu_read_lock(); 189 190 wcid = rcu_dereference(dev->wcid[wcid_idx]); 191 if (wcid) { 192 pending = atomic_dec_return(&wcid->non_aql_packets); 193 if (pending < 0) 194 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 195 } 196 197 rcu_read_unlock(); 198 } 199 200 void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb) 201 { 202 struct ieee80211_hw *hw; 203 struct sk_buff_head list; 204 205 mt76_tx_check_non_aql(dev, wcid_idx, skb); 206 207 #ifdef CONFIG_NL80211_TESTMODE 208 if (mt76_is_testmode_skb(dev, skb, &hw)) { 209 struct mt76_phy *phy = hw->priv; 210 211 if (skb == phy->test.tx_skb) 212 phy->test.tx_done++; 213 if (phy->test.tx_queued == phy->test.tx_done) 214 wake_up(&dev->tx_wait); 215 216 ieee80211_free_txskb(hw, skb); 217 return; 218 } 219 #endif 220 221 if (!skb->prev) { 222 hw = mt76_tx_status_get_hw(dev, skb); 223 ieee80211_free_txskb(hw, skb); 224 return; 225 } 226 227 mt76_tx_status_lock(dev, &list); 228 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 229 mt76_tx_status_unlock(dev, &list); 230 } 231 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); 232 233 static int 234 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, 235 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 236 bool *stop) 237 { 238 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 239 struct mt76_queue *q = phy->q_tx[qid]; 240 struct mt76_dev *dev = phy->dev; 241 bool non_aql; 242 int pending; 243 int idx; 244 245 non_aql = !info->tx_time_est; 246 idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); 247 if (idx < 0 || !sta || !non_aql) 248 return idx; 249 250 wcid = (struct mt76_wcid *)sta->drv_priv; 251 q->entry[idx].wcid = wcid->idx; 252 pending = atomic_inc_return(&wcid->non_aql_packets); 253 if (stop && pending >= MT_MAX_NON_AQL_PKT) 254 *stop = true; 255 256 return idx; 257 } 258 259 void 260 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, 261 struct mt76_wcid *wcid, struct sk_buff *skb) 262 { 263 struct mt76_dev *dev = phy->dev; 264 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 265 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 266 struct mt76_queue *q; 267 int qid = skb_get_queue_mapping(skb); 268 bool ext_phy = phy != &dev->phy; 269 270 if (mt76_testmode_enabled(phy)) { 271 ieee80211_free_txskb(phy->hw, skb); 272 return; 273 } 274 275 if (WARN_ON(qid >= MT_TXQ_PSD)) { 276 qid = MT_TXQ_BE; 277 skb_set_queue_mapping(skb, qid); 278 } 279 280 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 281 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 282 !ieee80211_is_data(hdr->frame_control) && 283 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { 284 qid = MT_TXQ_PSD; 285 skb_set_queue_mapping(skb, qid); 286 } 287 288 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 289 ieee80211_get_tx_rates(info->control.vif, sta, skb, 290 info->control.rates, 1); 291 292 if (ext_phy) 293 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 294 295 q = phy->q_tx[qid]; 296 297 spin_lock_bh(&q->lock); 298 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); 299 dev->queue_ops->kick(dev, q); 300 spin_unlock_bh(&q->lock); 301 } 302 EXPORT_SYMBOL_GPL(mt76_tx); 303 304 static struct sk_buff * 305 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) 306 { 307 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 308 struct ieee80211_tx_info *info; 309 bool ext_phy = phy != &phy->dev->phy; 310 struct sk_buff *skb; 311 312 skb = ieee80211_tx_dequeue(phy->hw, txq); 313 if (!skb) 314 return NULL; 315 316 info = IEEE80211_SKB_CB(skb); 317 if (ext_phy) 318 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 319 320 return skb; 321 } 322 323 static void 324 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, 325 struct sk_buff *skb, bool last) 326 { 327 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 328 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 329 330 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 331 if (last) 332 info->flags |= IEEE80211_TX_STATUS_EOSP | 333 IEEE80211_TX_CTL_REQ_TX_STATUS; 334 335 mt76_skb_set_moredata(skb, !last); 336 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); 337 } 338 339 void 340 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 341 u16 tids, int nframes, 342 enum ieee80211_frame_release_type reason, 343 bool more_data) 344 { 345 struct mt76_phy *phy = hw->priv; 346 struct mt76_dev *dev = phy->dev; 347 struct sk_buff *last_skb = NULL; 348 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; 349 int i; 350 351 spin_lock_bh(&hwq->lock); 352 for (i = 0; tids && nframes; i++, tids >>= 1) { 353 struct ieee80211_txq *txq = sta->txq[i]; 354 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 355 struct sk_buff *skb; 356 357 if (!(tids & 1)) 358 continue; 359 360 do { 361 skb = mt76_txq_dequeue(phy, mtxq); 362 if (!skb) 363 break; 364 365 nframes--; 366 if (last_skb) 367 mt76_queue_ps_skb(phy, sta, last_skb, false); 368 369 last_skb = skb; 370 } while (nframes); 371 } 372 373 if (last_skb) { 374 mt76_queue_ps_skb(phy, sta, last_skb, true); 375 dev->queue_ops->kick(dev, hwq); 376 } else { 377 ieee80211_sta_eosp(sta); 378 } 379 380 spin_unlock_bh(&hwq->lock); 381 } 382 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 383 384 static bool 385 mt76_txq_stopped(struct mt76_queue *q) 386 { 387 return q->stopped || q->blocked || 388 q->queued + MT_TXQ_FREE_THR >= q->ndesc; 389 } 390 391 static int 392 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, 393 struct mt76_txq *mtxq) 394 { 395 struct mt76_dev *dev = phy->dev; 396 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 397 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 398 struct mt76_wcid *wcid = mtxq->wcid; 399 struct ieee80211_tx_info *info; 400 struct sk_buff *skb; 401 int n_frames = 1; 402 bool stop = false; 403 int idx; 404 405 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 406 return 0; 407 408 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) 409 return 0; 410 411 skb = mt76_txq_dequeue(phy, mtxq); 412 if (!skb) 413 return 0; 414 415 info = IEEE80211_SKB_CB(skb); 416 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 417 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 418 info->control.rates, 1); 419 420 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 421 if (idx < 0) 422 return idx; 423 424 do { 425 if (test_bit(MT76_STATE_PM, &phy->state) || 426 test_bit(MT76_RESET, &phy->state)) 427 return -EBUSY; 428 429 if (stop || mt76_txq_stopped(q)) 430 break; 431 432 skb = mt76_txq_dequeue(phy, mtxq); 433 if (!skb) 434 break; 435 436 info = IEEE80211_SKB_CB(skb); 437 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 438 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 439 info->control.rates, 1); 440 441 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 442 if (idx < 0) 443 break; 444 445 n_frames++; 446 } while (1); 447 448 dev->queue_ops->kick(dev, q); 449 450 return n_frames; 451 } 452 453 static int 454 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 455 { 456 struct mt76_queue *q = phy->q_tx[qid]; 457 struct mt76_dev *dev = phy->dev; 458 struct ieee80211_txq *txq; 459 struct mt76_txq *mtxq; 460 struct mt76_wcid *wcid; 461 int ret = 0; 462 463 while (1) { 464 if (test_bit(MT76_STATE_PM, &phy->state) || 465 test_bit(MT76_RESET, &phy->state)) { 466 ret = -EBUSY; 467 break; 468 } 469 470 if (dev->queue_ops->tx_cleanup && 471 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { 472 dev->queue_ops->tx_cleanup(dev, q, false); 473 } 474 475 txq = ieee80211_next_txq(phy->hw, qid); 476 if (!txq) 477 break; 478 479 mtxq = (struct mt76_txq *)txq->drv_priv; 480 wcid = mtxq->wcid; 481 if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 482 continue; 483 484 spin_lock_bh(&q->lock); 485 486 if (mtxq->send_bar && mtxq->aggr) { 487 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 488 struct ieee80211_sta *sta = txq->sta; 489 struct ieee80211_vif *vif = txq->vif; 490 u16 agg_ssn = mtxq->agg_ssn; 491 u8 tid = txq->tid; 492 493 mtxq->send_bar = false; 494 spin_unlock_bh(&q->lock); 495 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 496 spin_lock_bh(&q->lock); 497 } 498 499 if (!mt76_txq_stopped(q)) 500 ret += mt76_txq_send_burst(phy, q, mtxq); 501 502 spin_unlock_bh(&q->lock); 503 504 ieee80211_return_txq(phy->hw, txq, false); 505 } 506 507 return ret; 508 } 509 510 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 511 { 512 int len; 513 514 if (qid >= 4) 515 return; 516 517 rcu_read_lock(); 518 519 do { 520 ieee80211_txq_schedule_start(phy->hw, qid); 521 len = mt76_txq_schedule_list(phy, qid); 522 ieee80211_txq_schedule_end(phy->hw, qid); 523 } while (len > 0); 524 525 rcu_read_unlock(); 526 } 527 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 528 529 void mt76_txq_schedule_all(struct mt76_phy *phy) 530 { 531 int i; 532 533 for (i = 0; i <= MT_TXQ_BK; i++) 534 mt76_txq_schedule(phy, i); 535 } 536 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 537 538 void mt76_tx_worker(struct mt76_worker *w) 539 { 540 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 541 542 mt76_txq_schedule_all(&dev->phy); 543 if (dev->phy2) 544 mt76_txq_schedule_all(dev->phy2); 545 546 #ifdef CONFIG_NL80211_TESTMODE 547 if (dev->phy.test.tx_pending) 548 mt76_testmode_tx_pending(&dev->phy); 549 if (dev->phy2 && dev->phy2->test.tx_pending) 550 mt76_testmode_tx_pending(dev->phy2); 551 #endif 552 } 553 554 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 555 bool send_bar) 556 { 557 int i; 558 559 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 560 struct ieee80211_txq *txq = sta->txq[i]; 561 struct mt76_queue *hwq; 562 struct mt76_txq *mtxq; 563 564 if (!txq) 565 continue; 566 567 hwq = phy->q_tx[mt76_txq_get_qid(txq)]; 568 mtxq = (struct mt76_txq *)txq->drv_priv; 569 570 spin_lock_bh(&hwq->lock); 571 mtxq->send_bar = mtxq->aggr && send_bar; 572 spin_unlock_bh(&hwq->lock); 573 } 574 } 575 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 576 577 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 578 { 579 struct mt76_phy *phy = hw->priv; 580 struct mt76_dev *dev = phy->dev; 581 582 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) 583 return; 584 585 mt76_worker_schedule(&dev->tx_worker); 586 } 587 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 588 589 u8 mt76_ac_to_hwq(u8 ac) 590 { 591 static const u8 wmm_queue_map[] = { 592 [IEEE80211_AC_BE] = 0, 593 [IEEE80211_AC_BK] = 1, 594 [IEEE80211_AC_VI] = 2, 595 [IEEE80211_AC_VO] = 3, 596 }; 597 598 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 599 return 0; 600 601 return wmm_queue_map[ac]; 602 } 603 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 604 605 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) 606 { 607 struct sk_buff *iter, *last = skb; 608 609 /* First packet of a A-MSDU burst keeps track of the whole burst 610 * length, need to update length of it and the last packet. 611 */ 612 skb_walk_frags(skb, iter) { 613 last = iter; 614 if (!iter->next) { 615 skb->data_len += pad; 616 skb->len += pad; 617 break; 618 } 619 } 620 621 if (skb_pad(last, pad)) 622 return -ENOMEM; 623 624 __skb_put(last, pad); 625 626 return 0; 627 } 628 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); 629 630 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 631 struct mt76_queue_entry *e) 632 { 633 if (e->skb) 634 dev->drv->tx_complete_skb(dev, e); 635 636 spin_lock_bh(&q->lock); 637 q->tail = (q->tail + 1) % q->ndesc; 638 q->queued--; 639 spin_unlock_bh(&q->lock); 640 } 641 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); 642