1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static struct mt76_txwi_cache * 9 mt76_alloc_txwi(struct mt76_dev *dev) 10 { 11 struct mt76_txwi_cache *t; 12 dma_addr_t addr; 13 u8 *txwi; 14 int size; 15 16 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 17 txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 18 if (!txwi) 19 return NULL; 20 21 addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, 22 DMA_TO_DEVICE); 23 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 24 t->dma_addr = addr; 25 26 return t; 27 } 28 29 static struct mt76_txwi_cache * 30 __mt76_get_txwi(struct mt76_dev *dev) 31 { 32 struct mt76_txwi_cache *t = NULL; 33 34 spin_lock_bh(&dev->lock); 35 if (!list_empty(&dev->txwi_cache)) { 36 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 37 list); 38 list_del(&t->list); 39 } 40 spin_unlock_bh(&dev->lock); 41 42 return t; 43 } 44 45 struct mt76_txwi_cache * 46 mt76_get_txwi(struct mt76_dev *dev) 47 { 48 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 49 50 if (t) 51 return t; 52 53 return mt76_alloc_txwi(dev); 54 } 55 56 void 57 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 58 { 59 if (!t) 60 return; 61 62 spin_lock_bh(&dev->lock); 63 list_add(&t->list, &dev->txwi_cache); 64 spin_unlock_bh(&dev->lock); 65 } 66 EXPORT_SYMBOL_GPL(mt76_put_txwi); 67 68 void mt76_tx_free(struct mt76_dev *dev) 69 { 70 struct mt76_txwi_cache *t; 71 72 while ((t = __mt76_get_txwi(dev)) != NULL) 73 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, 74 DMA_TO_DEVICE); 75 } 76 77 static int 78 mt76_txq_get_qid(struct ieee80211_txq *txq) 79 { 80 if (!txq->sta) 81 return MT_TXQ_BE; 82 83 return txq->ac; 84 } 85 86 static void 87 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) 88 { 89 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 90 91 if (!ieee80211_is_data_qos(hdr->frame_control) || 92 !ieee80211_is_data_present(hdr->frame_control)) 93 return; 94 95 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 96 } 97 98 void 99 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 100 __acquires(&dev->status_list.lock) 101 { 102 __skb_queue_head_init(list); 103 spin_lock_bh(&dev->status_list.lock); 104 } 105 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 106 107 void 108 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 109 __releases(&dev->status_list.lock) 110 { 111 struct ieee80211_hw *hw; 112 struct sk_buff *skb; 113 114 spin_unlock_bh(&dev->status_list.lock); 115 116 while ((skb = __skb_dequeue(list)) != NULL) { 117 hw = mt76_tx_status_get_hw(dev, skb); 118 ieee80211_tx_status(hw, skb); 119 } 120 121 } 122 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 123 124 static void 125 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 126 struct sk_buff_head *list) 127 { 128 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 129 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 130 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 131 132 flags |= cb->flags; 133 cb->flags = flags; 134 135 if ((flags & done) != done) 136 return; 137 138 __skb_unlink(skb, &dev->status_list); 139 140 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 141 if (flags & MT_TX_CB_TXS_FAILED) { 142 ieee80211_tx_info_clear_status(info); 143 info->status.rates[0].idx = -1; 144 info->flags |= IEEE80211_TX_STAT_ACK; 145 } 146 147 __skb_queue_tail(list, skb); 148 } 149 150 void 151 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 152 struct sk_buff_head *list) 153 { 154 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 155 } 156 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 157 158 int 159 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 160 struct sk_buff *skb) 161 { 162 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 163 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 164 int pid; 165 166 if (!wcid) 167 return MT_PACKET_ID_NO_ACK; 168 169 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 170 return MT_PACKET_ID_NO_ACK; 171 172 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 173 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) 174 return MT_PACKET_ID_NO_SKB; 175 176 spin_lock_bh(&dev->status_list.lock); 177 178 memset(cb, 0, sizeof(*cb)); 179 wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; 180 if (wcid->packet_id == MT_PACKET_ID_NO_ACK || 181 wcid->packet_id == MT_PACKET_ID_NO_SKB) 182 wcid->packet_id = MT_PACKET_ID_FIRST; 183 184 pid = wcid->packet_id; 185 cb->wcid = wcid->idx; 186 cb->pktid = pid; 187 cb->jiffies = jiffies; 188 189 __skb_queue_tail(&dev->status_list, skb); 190 spin_unlock_bh(&dev->status_list.lock); 191 192 return pid; 193 } 194 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 195 196 struct sk_buff * 197 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 198 struct sk_buff_head *list) 199 { 200 struct sk_buff *skb, *tmp; 201 202 skb_queue_walk_safe(&dev->status_list, skb, tmp) { 203 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 204 205 if (wcid && cb->wcid != wcid->idx) 206 continue; 207 208 if (cb->pktid == pktid) 209 return skb; 210 211 if (pktid >= 0 && !time_after(jiffies, cb->jiffies + 212 MT_TX_STATUS_SKB_TIMEOUT)) 213 continue; 214 215 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 216 MT_TX_CB_TXS_DONE, list); 217 } 218 219 return NULL; 220 } 221 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 222 223 void 224 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) 225 { 226 struct sk_buff_head list; 227 228 mt76_tx_status_lock(dev, &list); 229 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 230 mt76_tx_status_unlock(dev, &list); 231 } 232 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 233 234 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) 235 { 236 struct ieee80211_hw *hw; 237 struct sk_buff_head list; 238 239 if (!skb->prev) { 240 hw = mt76_tx_status_get_hw(dev, skb); 241 ieee80211_free_txskb(hw, skb); 242 return; 243 } 244 245 mt76_tx_status_lock(dev, &list); 246 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 247 mt76_tx_status_unlock(dev, &list); 248 } 249 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); 250 251 void 252 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, 253 struct mt76_wcid *wcid, struct sk_buff *skb) 254 { 255 struct mt76_dev *dev = phy->dev; 256 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 257 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 258 struct mt76_queue *q; 259 int qid = skb_get_queue_mapping(skb); 260 bool ext_phy = phy != &dev->phy; 261 262 if (WARN_ON(qid >= MT_TXQ_PSD)) { 263 qid = MT_TXQ_BE; 264 skb_set_queue_mapping(skb, qid); 265 } 266 267 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 268 ieee80211_get_tx_rates(info->control.vif, sta, skb, 269 info->control.rates, 1); 270 271 if (sta && ieee80211_is_data_qos(hdr->frame_control)) { 272 struct ieee80211_txq *txq; 273 struct mt76_txq *mtxq; 274 u8 tid; 275 276 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 277 txq = sta->txq[tid]; 278 mtxq = (struct mt76_txq *)txq->drv_priv; 279 280 if (mtxq->aggr) 281 mt76_check_agg_ssn(mtxq, skb); 282 } 283 284 if (ext_phy) 285 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 286 287 q = dev->q_tx[qid].q; 288 289 spin_lock_bh(&q->lock); 290 dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta); 291 dev->queue_ops->kick(dev, q); 292 293 if (q->queued > q->ndesc - 8 && !q->stopped) { 294 ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb)); 295 q->stopped = true; 296 } 297 298 spin_unlock_bh(&q->lock); 299 } 300 EXPORT_SYMBOL_GPL(mt76_tx); 301 302 static struct sk_buff * 303 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps) 304 { 305 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 306 struct ieee80211_tx_info *info; 307 bool ext_phy = phy != &phy->dev->phy; 308 struct sk_buff *skb; 309 310 skb = skb_dequeue(&mtxq->retry_q); 311 if (skb) { 312 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 313 314 if (ps && skb_queue_empty(&mtxq->retry_q)) 315 ieee80211_sta_set_buffered(txq->sta, tid, false); 316 317 return skb; 318 } 319 320 skb = ieee80211_tx_dequeue(phy->hw, txq); 321 if (!skb) 322 return NULL; 323 324 info = IEEE80211_SKB_CB(skb); 325 if (ext_phy) 326 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 327 328 return skb; 329 } 330 331 static void 332 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, 333 struct sk_buff *skb, bool last) 334 { 335 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 336 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 337 338 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 339 if (last) 340 info->flags |= IEEE80211_TX_STATUS_EOSP | 341 IEEE80211_TX_CTL_REQ_TX_STATUS; 342 343 mt76_skb_set_moredata(skb, !last); 344 dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta); 345 } 346 347 void 348 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 349 u16 tids, int nframes, 350 enum ieee80211_frame_release_type reason, 351 bool more_data) 352 { 353 struct mt76_phy *phy = hw->priv; 354 struct mt76_dev *dev = phy->dev; 355 struct sk_buff *last_skb = NULL; 356 struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q; 357 int i; 358 359 spin_lock_bh(&hwq->lock); 360 for (i = 0; tids && nframes; i++, tids >>= 1) { 361 struct ieee80211_txq *txq = sta->txq[i]; 362 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 363 struct sk_buff *skb; 364 365 if (!(tids & 1)) 366 continue; 367 368 do { 369 skb = mt76_txq_dequeue(phy, mtxq, true); 370 if (!skb) 371 break; 372 373 if (mtxq->aggr) 374 mt76_check_agg_ssn(mtxq, skb); 375 376 nframes--; 377 if (last_skb) 378 mt76_queue_ps_skb(dev, sta, last_skb, false); 379 380 last_skb = skb; 381 } while (nframes); 382 } 383 384 if (last_skb) { 385 mt76_queue_ps_skb(dev, sta, last_skb, true); 386 dev->queue_ops->kick(dev, hwq); 387 } else { 388 ieee80211_sta_eosp(sta); 389 } 390 391 spin_unlock_bh(&hwq->lock); 392 } 393 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 394 395 static int 396 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq, 397 struct mt76_txq *mtxq) 398 { 399 struct mt76_dev *dev = phy->dev; 400 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 401 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 402 struct mt76_wcid *wcid = mtxq->wcid; 403 struct mt76_queue *hwq = sq->q; 404 struct ieee80211_tx_info *info; 405 struct sk_buff *skb; 406 int n_frames = 1, limit; 407 struct ieee80211_tx_rate tx_rate; 408 bool ampdu; 409 bool probe; 410 int idx; 411 412 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 413 return 0; 414 415 skb = mt76_txq_dequeue(phy, mtxq, false); 416 if (!skb) 417 return 0; 418 419 info = IEEE80211_SKB_CB(skb); 420 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 421 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 422 info->control.rates, 1); 423 tx_rate = info->control.rates[0]; 424 425 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 426 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; 427 limit = ampdu ? 16 : 3; 428 429 if (ampdu) 430 mt76_check_agg_ssn(mtxq, skb); 431 432 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta); 433 434 if (idx < 0) 435 return idx; 436 437 do { 438 bool cur_ampdu; 439 440 if (probe) 441 break; 442 443 if (test_bit(MT76_RESET, &phy->state)) 444 return -EBUSY; 445 446 skb = mt76_txq_dequeue(phy, mtxq, false); 447 if (!skb) 448 break; 449 450 info = IEEE80211_SKB_CB(skb); 451 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 452 453 if (ampdu != cur_ampdu || 454 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 455 skb_queue_tail(&mtxq->retry_q, skb); 456 break; 457 } 458 459 info->control.rates[0] = tx_rate; 460 461 if (cur_ampdu) 462 mt76_check_agg_ssn(mtxq, skb); 463 464 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, 465 txq->sta); 466 if (idx < 0) 467 return idx; 468 469 n_frames++; 470 } while (n_frames < limit); 471 472 if (!probe) { 473 hwq->entry[idx].qid = sq - dev->q_tx; 474 hwq->entry[idx].schedule = true; 475 sq->swq_queued++; 476 } 477 478 dev->queue_ops->kick(dev, hwq); 479 480 return n_frames; 481 } 482 483 static int 484 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 485 { 486 struct mt76_dev *dev = phy->dev; 487 struct mt76_sw_queue *sq = &dev->q_tx[qid]; 488 struct mt76_queue *hwq = sq->q; 489 struct ieee80211_txq *txq; 490 struct mt76_txq *mtxq; 491 struct mt76_wcid *wcid; 492 int ret = 0; 493 494 spin_lock_bh(&hwq->lock); 495 while (1) { 496 if (sq->swq_queued >= 4) 497 break; 498 499 if (test_bit(MT76_RESET, &phy->state)) { 500 ret = -EBUSY; 501 break; 502 } 503 504 txq = ieee80211_next_txq(phy->hw, qid); 505 if (!txq) 506 break; 507 508 mtxq = (struct mt76_txq *)txq->drv_priv; 509 wcid = mtxq->wcid; 510 if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 511 continue; 512 513 if (mtxq->send_bar && mtxq->aggr) { 514 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 515 struct ieee80211_sta *sta = txq->sta; 516 struct ieee80211_vif *vif = txq->vif; 517 u16 agg_ssn = mtxq->agg_ssn; 518 u8 tid = txq->tid; 519 520 mtxq->send_bar = false; 521 spin_unlock_bh(&hwq->lock); 522 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 523 spin_lock_bh(&hwq->lock); 524 } 525 526 ret += mt76_txq_send_burst(phy, sq, mtxq); 527 ieee80211_return_txq(phy->hw, txq, 528 !skb_queue_empty(&mtxq->retry_q)); 529 } 530 spin_unlock_bh(&hwq->lock); 531 532 return ret; 533 } 534 535 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 536 { 537 struct mt76_dev *dev = phy->dev; 538 struct mt76_sw_queue *sq = &dev->q_tx[qid]; 539 int len; 540 541 if (qid >= 4) 542 return; 543 544 if (sq->swq_queued >= 4) 545 return; 546 547 rcu_read_lock(); 548 549 do { 550 ieee80211_txq_schedule_start(phy->hw, qid); 551 len = mt76_txq_schedule_list(phy, qid); 552 ieee80211_txq_schedule_end(phy->hw, qid); 553 } while (len > 0); 554 555 rcu_read_unlock(); 556 } 557 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 558 559 void mt76_txq_schedule_all(struct mt76_phy *phy) 560 { 561 int i; 562 563 for (i = 0; i <= MT_TXQ_BK; i++) 564 mt76_txq_schedule(phy, i); 565 } 566 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 567 568 void mt76_tx_tasklet(unsigned long data) 569 { 570 struct mt76_dev *dev = (struct mt76_dev *)data; 571 572 mt76_txq_schedule_all(&dev->phy); 573 if (dev->phy2) 574 mt76_txq_schedule_all(dev->phy2); 575 } 576 577 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 578 bool send_bar) 579 { 580 int i; 581 582 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 583 struct ieee80211_txq *txq = sta->txq[i]; 584 struct mt76_queue *hwq; 585 struct mt76_txq *mtxq; 586 587 if (!txq) 588 continue; 589 590 mtxq = (struct mt76_txq *)txq->drv_priv; 591 hwq = mtxq->swq->q; 592 593 spin_lock_bh(&hwq->lock); 594 mtxq->send_bar = mtxq->aggr && send_bar; 595 spin_unlock_bh(&hwq->lock); 596 } 597 } 598 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 599 600 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 601 { 602 struct mt76_phy *phy = hw->priv; 603 struct mt76_dev *dev = phy->dev; 604 605 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) 606 return; 607 608 tasklet_schedule(&dev->tx_tasklet); 609 } 610 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 611 612 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) 613 { 614 struct ieee80211_hw *hw; 615 struct mt76_txq *mtxq; 616 struct sk_buff *skb; 617 618 if (!txq) 619 return; 620 621 mtxq = (struct mt76_txq *)txq->drv_priv; 622 623 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) { 624 hw = mt76_tx_status_get_hw(dev, skb); 625 ieee80211_free_txskb(hw, skb); 626 } 627 } 628 EXPORT_SYMBOL_GPL(mt76_txq_remove); 629 630 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 631 { 632 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 633 634 skb_queue_head_init(&mtxq->retry_q); 635 636 mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)]; 637 } 638 EXPORT_SYMBOL_GPL(mt76_txq_init); 639 640 u8 mt76_ac_to_hwq(u8 ac) 641 { 642 static const u8 wmm_queue_map[] = { 643 [IEEE80211_AC_BE] = 0, 644 [IEEE80211_AC_BK] = 1, 645 [IEEE80211_AC_VI] = 2, 646 [IEEE80211_AC_VO] = 3, 647 }; 648 649 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 650 return 0; 651 652 return wmm_queue_map[ac]; 653 } 654 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 655