1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static struct mt76_txwi_cache * 9 mt76_alloc_txwi(struct mt76_dev *dev) 10 { 11 struct mt76_txwi_cache *t; 12 dma_addr_t addr; 13 u8 *txwi; 14 int size; 15 16 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 17 txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 18 if (!txwi) 19 return NULL; 20 21 addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, 22 DMA_TO_DEVICE); 23 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 24 t->dma_addr = addr; 25 26 return t; 27 } 28 29 static struct mt76_txwi_cache * 30 __mt76_get_txwi(struct mt76_dev *dev) 31 { 32 struct mt76_txwi_cache *t = NULL; 33 34 spin_lock_bh(&dev->lock); 35 if (!list_empty(&dev->txwi_cache)) { 36 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 37 list); 38 list_del(&t->list); 39 } 40 spin_unlock_bh(&dev->lock); 41 42 return t; 43 } 44 45 struct mt76_txwi_cache * 46 mt76_get_txwi(struct mt76_dev *dev) 47 { 48 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 49 50 if (t) 51 return t; 52 53 return mt76_alloc_txwi(dev); 54 } 55 56 void 57 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 58 { 59 if (!t) 60 return; 61 62 spin_lock_bh(&dev->lock); 63 list_add(&t->list, &dev->txwi_cache); 64 spin_unlock_bh(&dev->lock); 65 } 66 EXPORT_SYMBOL_GPL(mt76_put_txwi); 67 68 void mt76_tx_free(struct mt76_dev *dev) 69 { 70 struct mt76_txwi_cache *t; 71 72 while ((t = __mt76_get_txwi(dev)) != NULL) 73 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, 74 DMA_TO_DEVICE); 75 } 76 77 static int 78 mt76_txq_get_qid(struct ieee80211_txq *txq) 79 { 80 if (!txq->sta) 81 return MT_TXQ_BE; 82 83 return txq->ac; 84 } 85 86 static void 87 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) 88 { 89 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 90 91 if (!ieee80211_is_data_qos(hdr->frame_control) || 92 !ieee80211_is_data_present(hdr->frame_control)) 93 return; 94 95 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 96 } 97 98 void 99 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 100 __acquires(&dev->status_list.lock) 101 { 102 __skb_queue_head_init(list); 103 spin_lock_bh(&dev->status_list.lock); 104 __acquire(&dev->status_list.lock); 105 } 106 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 107 108 void 109 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 110 __releases(&dev->status_list.unlock) 111 { 112 struct sk_buff *skb; 113 114 spin_unlock_bh(&dev->status_list.lock); 115 __release(&dev->status_list.unlock); 116 117 while ((skb = __skb_dequeue(list)) != NULL) 118 ieee80211_tx_status(dev->hw, skb); 119 } 120 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 121 122 static void 123 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 124 struct sk_buff_head *list) 125 { 126 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 127 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 128 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 129 130 flags |= cb->flags; 131 cb->flags = flags; 132 133 if ((flags & done) != done) 134 return; 135 136 __skb_unlink(skb, &dev->status_list); 137 138 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 139 if (flags & MT_TX_CB_TXS_FAILED) { 140 ieee80211_tx_info_clear_status(info); 141 info->status.rates[0].idx = -1; 142 info->flags |= IEEE80211_TX_STAT_ACK; 143 } 144 145 __skb_queue_tail(list, skb); 146 } 147 148 void 149 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 150 struct sk_buff_head *list) 151 { 152 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 153 } 154 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 155 156 int 157 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 158 struct sk_buff *skb) 159 { 160 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 161 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 162 int pid; 163 164 if (!wcid) 165 return MT_PACKET_ID_NO_ACK; 166 167 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 168 return MT_PACKET_ID_NO_ACK; 169 170 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 171 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) 172 return MT_PACKET_ID_NO_SKB; 173 174 spin_lock_bh(&dev->status_list.lock); 175 176 memset(cb, 0, sizeof(*cb)); 177 wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; 178 if (wcid->packet_id == MT_PACKET_ID_NO_ACK || 179 wcid->packet_id == MT_PACKET_ID_NO_SKB) 180 wcid->packet_id = MT_PACKET_ID_FIRST; 181 182 pid = wcid->packet_id; 183 cb->wcid = wcid->idx; 184 cb->pktid = pid; 185 cb->jiffies = jiffies; 186 187 __skb_queue_tail(&dev->status_list, skb); 188 spin_unlock_bh(&dev->status_list.lock); 189 190 return pid; 191 } 192 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 193 194 struct sk_buff * 195 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 196 struct sk_buff_head *list) 197 { 198 struct sk_buff *skb, *tmp; 199 200 skb_queue_walk_safe(&dev->status_list, skb, tmp) { 201 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 202 203 if (wcid && cb->wcid != wcid->idx) 204 continue; 205 206 if (cb->pktid == pktid) 207 return skb; 208 209 if (pktid >= 0 && !time_after(jiffies, cb->jiffies + 210 MT_TX_STATUS_SKB_TIMEOUT)) 211 continue; 212 213 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 214 MT_TX_CB_TXS_DONE, list); 215 } 216 217 return NULL; 218 } 219 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 220 221 void 222 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) 223 { 224 struct sk_buff_head list; 225 226 mt76_tx_status_lock(dev, &list); 227 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 228 mt76_tx_status_unlock(dev, &list); 229 } 230 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 231 232 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) 233 { 234 struct sk_buff_head list; 235 236 if (!skb->prev) { 237 ieee80211_free_txskb(dev->hw, skb); 238 return; 239 } 240 241 mt76_tx_status_lock(dev, &list); 242 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 243 mt76_tx_status_unlock(dev, &list); 244 } 245 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); 246 247 void 248 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 249 struct mt76_wcid *wcid, struct sk_buff *skb) 250 { 251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 252 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 253 struct mt76_queue *q; 254 int qid = skb_get_queue_mapping(skb); 255 256 if (WARN_ON(qid >= MT_TXQ_PSD)) { 257 qid = MT_TXQ_BE; 258 skb_set_queue_mapping(skb, qid); 259 } 260 261 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 262 ieee80211_get_tx_rates(info->control.vif, sta, skb, 263 info->control.rates, 1); 264 265 if (sta && ieee80211_is_data_qos(hdr->frame_control)) { 266 struct ieee80211_txq *txq; 267 struct mt76_txq *mtxq; 268 u8 tid; 269 270 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 271 txq = sta->txq[tid]; 272 mtxq = (struct mt76_txq *)txq->drv_priv; 273 274 if (mtxq->aggr) 275 mt76_check_agg_ssn(mtxq, skb); 276 } 277 278 q = dev->q_tx[qid].q; 279 280 spin_lock_bh(&q->lock); 281 dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta); 282 dev->queue_ops->kick(dev, q); 283 284 if (q->queued > q->ndesc - 8 && !q->stopped) { 285 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); 286 q->stopped = true; 287 } 288 289 spin_unlock_bh(&q->lock); 290 } 291 EXPORT_SYMBOL_GPL(mt76_tx); 292 293 static struct sk_buff * 294 mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps) 295 { 296 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 297 struct sk_buff *skb; 298 299 skb = skb_dequeue(&mtxq->retry_q); 300 if (skb) { 301 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 302 303 if (ps && skb_queue_empty(&mtxq->retry_q)) 304 ieee80211_sta_set_buffered(txq->sta, tid, false); 305 306 return skb; 307 } 308 309 skb = ieee80211_tx_dequeue(dev->hw, txq); 310 if (!skb) 311 return NULL; 312 313 return skb; 314 } 315 316 static void 317 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, 318 struct sk_buff *skb, bool last) 319 { 320 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 321 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 322 323 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 324 if (last) 325 info->flags |= IEEE80211_TX_STATUS_EOSP | 326 IEEE80211_TX_CTL_REQ_TX_STATUS; 327 328 mt76_skb_set_moredata(skb, !last); 329 dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta); 330 } 331 332 void 333 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 334 u16 tids, int nframes, 335 enum ieee80211_frame_release_type reason, 336 bool more_data) 337 { 338 struct mt76_dev *dev = hw->priv; 339 struct sk_buff *last_skb = NULL; 340 struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q; 341 int i; 342 343 spin_lock_bh(&hwq->lock); 344 for (i = 0; tids && nframes; i++, tids >>= 1) { 345 struct ieee80211_txq *txq = sta->txq[i]; 346 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 347 struct sk_buff *skb; 348 349 if (!(tids & 1)) 350 continue; 351 352 do { 353 skb = mt76_txq_dequeue(dev, mtxq, true); 354 if (!skb) 355 break; 356 357 if (mtxq->aggr) 358 mt76_check_agg_ssn(mtxq, skb); 359 360 nframes--; 361 if (last_skb) 362 mt76_queue_ps_skb(dev, sta, last_skb, false); 363 364 last_skb = skb; 365 } while (nframes); 366 } 367 368 if (last_skb) { 369 mt76_queue_ps_skb(dev, sta, last_skb, true); 370 dev->queue_ops->kick(dev, hwq); 371 } else { 372 ieee80211_sta_eosp(sta); 373 } 374 375 spin_unlock_bh(&hwq->lock); 376 } 377 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 378 379 static int 380 mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq, 381 struct mt76_txq *mtxq) 382 { 383 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 384 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 385 struct mt76_wcid *wcid = mtxq->wcid; 386 struct mt76_queue *hwq = sq->q; 387 struct ieee80211_tx_info *info; 388 struct sk_buff *skb; 389 int n_frames = 1, limit; 390 struct ieee80211_tx_rate tx_rate; 391 bool ampdu; 392 bool probe; 393 int idx; 394 395 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 396 return 0; 397 398 skb = mt76_txq_dequeue(dev, mtxq, false); 399 if (!skb) 400 return 0; 401 402 info = IEEE80211_SKB_CB(skb); 403 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 404 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 405 info->control.rates, 1); 406 tx_rate = info->control.rates[0]; 407 408 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 409 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; 410 limit = ampdu ? 16 : 3; 411 412 if (ampdu) 413 mt76_check_agg_ssn(mtxq, skb); 414 415 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta); 416 417 if (idx < 0) 418 return idx; 419 420 do { 421 bool cur_ampdu; 422 423 if (probe) 424 break; 425 426 if (test_bit(MT76_RESET, &dev->state)) 427 return -EBUSY; 428 429 skb = mt76_txq_dequeue(dev, mtxq, false); 430 if (!skb) 431 break; 432 433 info = IEEE80211_SKB_CB(skb); 434 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 435 436 if (ampdu != cur_ampdu || 437 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 438 skb_queue_tail(&mtxq->retry_q, skb); 439 break; 440 } 441 442 info->control.rates[0] = tx_rate; 443 444 if (cur_ampdu) 445 mt76_check_agg_ssn(mtxq, skb); 446 447 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, 448 txq->sta); 449 if (idx < 0) 450 return idx; 451 452 n_frames++; 453 } while (n_frames < limit); 454 455 if (!probe) { 456 hwq->entry[idx].qid = sq - dev->q_tx; 457 hwq->entry[idx].schedule = true; 458 sq->swq_queued++; 459 } 460 461 dev->queue_ops->kick(dev, hwq); 462 463 return n_frames; 464 } 465 466 static int 467 mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid) 468 { 469 struct mt76_sw_queue *sq = &dev->q_tx[qid]; 470 struct mt76_queue *hwq = sq->q; 471 struct ieee80211_txq *txq; 472 struct mt76_txq *mtxq; 473 struct mt76_wcid *wcid; 474 int ret = 0; 475 476 spin_lock_bh(&hwq->lock); 477 while (1) { 478 if (sq->swq_queued >= 4) 479 break; 480 481 if (test_bit(MT76_RESET, &dev->state)) { 482 ret = -EBUSY; 483 break; 484 } 485 486 txq = ieee80211_next_txq(dev->hw, qid); 487 if (!txq) 488 break; 489 490 mtxq = (struct mt76_txq *)txq->drv_priv; 491 wcid = mtxq->wcid; 492 if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 493 continue; 494 495 if (mtxq->send_bar && mtxq->aggr) { 496 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 497 struct ieee80211_sta *sta = txq->sta; 498 struct ieee80211_vif *vif = txq->vif; 499 u16 agg_ssn = mtxq->agg_ssn; 500 u8 tid = txq->tid; 501 502 mtxq->send_bar = false; 503 spin_unlock_bh(&hwq->lock); 504 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 505 spin_lock_bh(&hwq->lock); 506 } 507 508 ret += mt76_txq_send_burst(dev, sq, mtxq); 509 ieee80211_return_txq(dev->hw, txq, 510 !skb_queue_empty(&mtxq->retry_q)); 511 } 512 spin_unlock_bh(&hwq->lock); 513 514 return ret; 515 } 516 517 void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid) 518 { 519 struct mt76_sw_queue *sq = &dev->q_tx[qid]; 520 int len; 521 522 if (qid >= 4) 523 return; 524 525 if (sq->swq_queued >= 4) 526 return; 527 528 rcu_read_lock(); 529 530 do { 531 ieee80211_txq_schedule_start(dev->hw, qid); 532 len = mt76_txq_schedule_list(dev, qid); 533 ieee80211_txq_schedule_end(dev->hw, qid); 534 } while (len > 0); 535 536 rcu_read_unlock(); 537 } 538 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 539 540 void mt76_txq_schedule_all(struct mt76_dev *dev) 541 { 542 int i; 543 544 for (i = 0; i <= MT_TXQ_BK; i++) 545 mt76_txq_schedule(dev, i); 546 } 547 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 548 549 void mt76_tx_tasklet(unsigned long data) 550 { 551 struct mt76_dev *dev = (struct mt76_dev *)data; 552 553 mt76_txq_schedule_all(dev); 554 } 555 556 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 557 bool send_bar) 558 { 559 int i; 560 561 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 562 struct ieee80211_txq *txq = sta->txq[i]; 563 struct mt76_queue *hwq; 564 struct mt76_txq *mtxq; 565 566 if (!txq) 567 continue; 568 569 mtxq = (struct mt76_txq *)txq->drv_priv; 570 hwq = mtxq->swq->q; 571 572 spin_lock_bh(&hwq->lock); 573 mtxq->send_bar = mtxq->aggr && send_bar; 574 spin_unlock_bh(&hwq->lock); 575 } 576 } 577 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 578 579 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 580 { 581 struct mt76_dev *dev = hw->priv; 582 583 if (!test_bit(MT76_STATE_RUNNING, &dev->state)) 584 return; 585 586 tasklet_schedule(&dev->tx_tasklet); 587 } 588 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 589 590 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) 591 { 592 struct mt76_txq *mtxq; 593 struct sk_buff *skb; 594 595 if (!txq) 596 return; 597 598 mtxq = (struct mt76_txq *)txq->drv_priv; 599 600 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) 601 ieee80211_free_txskb(dev->hw, skb); 602 } 603 EXPORT_SYMBOL_GPL(mt76_txq_remove); 604 605 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 606 { 607 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 608 609 skb_queue_head_init(&mtxq->retry_q); 610 611 mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)]; 612 } 613 EXPORT_SYMBOL_GPL(mt76_txq_init); 614 615 u8 mt76_ac_to_hwq(u8 ac) 616 { 617 static const u8 wmm_queue_map[] = { 618 [IEEE80211_AC_BE] = 0, 619 [IEEE80211_AC_BK] = 1, 620 [IEEE80211_AC_VI] = 2, 621 [IEEE80211_AC_VO] = 3, 622 }; 623 624 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 625 return 0; 626 627 return wmm_queue_map[ac]; 628 } 629 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 630