1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "mt76.h" 18 19 static struct mt76_txwi_cache * 20 mt76_alloc_txwi(struct mt76_dev *dev) 21 { 22 struct mt76_txwi_cache *t; 23 dma_addr_t addr; 24 int size; 25 26 size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1); 27 t = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 28 if (!t) 29 return NULL; 30 31 addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi), 32 DMA_TO_DEVICE); 33 t->dma_addr = addr; 34 35 return t; 36 } 37 38 static struct mt76_txwi_cache * 39 __mt76_get_txwi(struct mt76_dev *dev) 40 { 41 struct mt76_txwi_cache *t = NULL; 42 43 spin_lock_bh(&dev->lock); 44 if (!list_empty(&dev->txwi_cache)) { 45 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 46 list); 47 list_del(&t->list); 48 } 49 spin_unlock_bh(&dev->lock); 50 51 return t; 52 } 53 54 struct mt76_txwi_cache * 55 mt76_get_txwi(struct mt76_dev *dev) 56 { 57 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 58 59 if (t) 60 return t; 61 62 return mt76_alloc_txwi(dev); 63 } 64 65 void 66 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 67 { 68 if (!t) 69 return; 70 71 spin_lock_bh(&dev->lock); 72 list_add(&t->list, &dev->txwi_cache); 73 spin_unlock_bh(&dev->lock); 74 } 75 76 void mt76_tx_free(struct mt76_dev *dev) 77 { 78 struct mt76_txwi_cache *t; 79 80 while ((t = __mt76_get_txwi(dev)) != NULL) 81 dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi), 82 DMA_TO_DEVICE); 83 } 84 85 static int 86 mt76_txq_get_qid(struct ieee80211_txq *txq) 87 { 88 if (!txq->sta) 89 return MT_TXQ_BE; 90 91 return txq->ac; 92 } 93 94 static void 95 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) 96 { 97 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 98 99 if (!ieee80211_is_data_qos(hdr->frame_control) || 100 !ieee80211_is_data_present(hdr->frame_control)) 101 return; 102 103 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 104 } 105 106 void 107 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 108 __acquires(&dev->status_list.lock) 109 { 110 __skb_queue_head_init(list); 111 spin_lock_bh(&dev->status_list.lock); 112 __acquire(&dev->status_list.lock); 113 } 114 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 115 116 void 117 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 118 __releases(&dev->status_list.unlock) 119 { 120 struct sk_buff *skb; 121 122 spin_unlock_bh(&dev->status_list.lock); 123 __release(&dev->status_list.unlock); 124 125 while ((skb = __skb_dequeue(list)) != NULL) 126 ieee80211_tx_status(dev->hw, skb); 127 } 128 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 129 130 static void 131 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 132 struct sk_buff_head *list) 133 { 134 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 135 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 136 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 137 138 flags |= cb->flags; 139 cb->flags = flags; 140 141 if ((flags & done) != done) 142 return; 143 144 __skb_unlink(skb, &dev->status_list); 145 146 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 147 if (flags & MT_TX_CB_TXS_FAILED) { 148 ieee80211_tx_info_clear_status(info); 149 info->status.rates[0].idx = -1; 150 info->flags |= IEEE80211_TX_STAT_ACK; 151 } 152 153 __skb_queue_tail(list, skb); 154 } 155 156 void 157 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 158 struct sk_buff_head *list) 159 { 160 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 161 } 162 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 163 164 int 165 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 166 struct sk_buff *skb) 167 { 168 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 169 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 170 int pid; 171 172 if (!wcid) 173 return MT_PACKET_ID_NO_ACK; 174 175 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 176 return MT_PACKET_ID_NO_ACK; 177 178 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 179 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) 180 return MT_PACKET_ID_NO_SKB; 181 182 spin_lock_bh(&dev->status_list.lock); 183 184 memset(cb, 0, sizeof(*cb)); 185 wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; 186 if (wcid->packet_id == MT_PACKET_ID_NO_ACK || 187 wcid->packet_id == MT_PACKET_ID_NO_SKB) 188 wcid->packet_id = MT_PACKET_ID_FIRST; 189 190 pid = wcid->packet_id; 191 cb->wcid = wcid->idx; 192 cb->pktid = pid; 193 cb->jiffies = jiffies; 194 195 __skb_queue_tail(&dev->status_list, skb); 196 spin_unlock_bh(&dev->status_list.lock); 197 198 return pid; 199 } 200 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 201 202 struct sk_buff * 203 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 204 struct sk_buff_head *list) 205 { 206 struct sk_buff *skb, *tmp; 207 208 skb_queue_walk_safe(&dev->status_list, skb, tmp) { 209 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 210 211 if (wcid && cb->wcid != wcid->idx) 212 continue; 213 214 if (cb->pktid == pktid) 215 return skb; 216 217 if (pktid >= 0 && 218 !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT)) 219 continue; 220 221 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 222 MT_TX_CB_TXS_DONE, list); 223 } 224 225 return NULL; 226 } 227 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 228 229 void 230 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) 231 { 232 struct sk_buff_head list; 233 234 mt76_tx_status_lock(dev, &list); 235 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 236 mt76_tx_status_unlock(dev, &list); 237 } 238 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 239 240 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) 241 { 242 struct sk_buff_head list; 243 244 if (!skb->prev) { 245 ieee80211_free_txskb(dev->hw, skb); 246 return; 247 } 248 249 mt76_tx_status_lock(dev, &list); 250 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 251 mt76_tx_status_unlock(dev, &list); 252 } 253 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); 254 255 void 256 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 257 struct mt76_wcid *wcid, struct sk_buff *skb) 258 { 259 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 260 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 261 struct mt76_queue *q; 262 int qid = skb_get_queue_mapping(skb); 263 264 if (WARN_ON(qid >= MT_TXQ_PSD)) { 265 qid = MT_TXQ_BE; 266 skb_set_queue_mapping(skb, qid); 267 } 268 269 if (!wcid->tx_rate_set) 270 ieee80211_get_tx_rates(info->control.vif, sta, skb, 271 info->control.rates, 1); 272 273 if (sta && ieee80211_is_data_qos(hdr->frame_control)) { 274 struct ieee80211_txq *txq; 275 struct mt76_txq *mtxq; 276 u8 tid; 277 278 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 279 txq = sta->txq[tid]; 280 mtxq = (struct mt76_txq *) txq->drv_priv; 281 282 if (mtxq->aggr) 283 mt76_check_agg_ssn(mtxq, skb); 284 } 285 286 q = &dev->q_tx[qid]; 287 288 spin_lock_bh(&q->lock); 289 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); 290 dev->queue_ops->kick(dev, q); 291 292 if (q->queued > q->ndesc - 8) 293 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); 294 spin_unlock_bh(&q->lock); 295 } 296 EXPORT_SYMBOL_GPL(mt76_tx); 297 298 static struct sk_buff * 299 mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps) 300 { 301 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 302 struct sk_buff *skb; 303 304 skb = skb_dequeue(&mtxq->retry_q); 305 if (skb) { 306 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 307 308 if (ps && skb_queue_empty(&mtxq->retry_q)) 309 ieee80211_sta_set_buffered(txq->sta, tid, false); 310 311 return skb; 312 } 313 314 skb = ieee80211_tx_dequeue(dev->hw, txq); 315 if (!skb) 316 return NULL; 317 318 return skb; 319 } 320 321 static void 322 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, 323 struct sk_buff *skb, bool last) 324 { 325 struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv; 326 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 327 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; 328 329 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 330 if (last) 331 info->flags |= IEEE80211_TX_STATUS_EOSP | 332 IEEE80211_TX_CTL_REQ_TX_STATUS; 333 334 mt76_skb_set_moredata(skb, !last); 335 dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta); 336 } 337 338 void 339 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 340 u16 tids, int nframes, 341 enum ieee80211_frame_release_type reason, 342 bool more_data) 343 { 344 struct mt76_dev *dev = hw->priv; 345 struct sk_buff *last_skb = NULL; 346 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; 347 int i; 348 349 spin_lock_bh(&hwq->lock); 350 for (i = 0; tids && nframes; i++, tids >>= 1) { 351 struct ieee80211_txq *txq = sta->txq[i]; 352 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 353 struct sk_buff *skb; 354 355 if (!(tids & 1)) 356 continue; 357 358 do { 359 skb = mt76_txq_dequeue(dev, mtxq, true); 360 if (!skb) 361 break; 362 363 if (mtxq->aggr) 364 mt76_check_agg_ssn(mtxq, skb); 365 366 nframes--; 367 if (last_skb) 368 mt76_queue_ps_skb(dev, sta, last_skb, false); 369 370 last_skb = skb; 371 } while (nframes); 372 } 373 374 if (last_skb) { 375 mt76_queue_ps_skb(dev, sta, last_skb, true); 376 dev->queue_ops->kick(dev, hwq); 377 } 378 spin_unlock_bh(&hwq->lock); 379 } 380 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 381 382 static int 383 mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, 384 struct mt76_txq *mtxq, bool *empty) 385 { 386 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 387 struct ieee80211_tx_info *info; 388 struct mt76_wcid *wcid = mtxq->wcid; 389 struct sk_buff *skb; 390 int n_frames = 1, limit; 391 struct ieee80211_tx_rate tx_rate; 392 bool ampdu; 393 bool probe; 394 int idx; 395 396 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) { 397 *empty = true; 398 return 0; 399 } 400 401 skb = mt76_txq_dequeue(dev, mtxq, false); 402 if (!skb) { 403 *empty = true; 404 return 0; 405 } 406 407 info = IEEE80211_SKB_CB(skb); 408 if (!wcid->tx_rate_set) 409 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 410 info->control.rates, 1); 411 tx_rate = info->control.rates[0]; 412 413 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 414 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; 415 limit = ampdu ? 16 : 3; 416 417 if (ampdu) 418 mt76_check_agg_ssn(mtxq, skb); 419 420 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta); 421 422 if (idx < 0) 423 return idx; 424 425 do { 426 bool cur_ampdu; 427 428 if (probe) 429 break; 430 431 if (test_bit(MT76_OFFCHANNEL, &dev->state) || 432 test_bit(MT76_RESET, &dev->state)) 433 return -EBUSY; 434 435 skb = mt76_txq_dequeue(dev, mtxq, false); 436 if (!skb) { 437 *empty = true; 438 break; 439 } 440 441 info = IEEE80211_SKB_CB(skb); 442 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 443 444 if (ampdu != cur_ampdu || 445 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 446 skb_queue_tail(&mtxq->retry_q, skb); 447 break; 448 } 449 450 info->control.rates[0] = tx_rate; 451 452 if (cur_ampdu) 453 mt76_check_agg_ssn(mtxq, skb); 454 455 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, 456 txq->sta); 457 if (idx < 0) 458 return idx; 459 460 n_frames++; 461 } while (n_frames < limit); 462 463 if (!probe) { 464 hwq->swq_queued++; 465 hwq->entry[idx].schedule = true; 466 } 467 468 dev->queue_ops->kick(dev, hwq); 469 470 return n_frames; 471 } 472 473 static int 474 mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq) 475 { 476 struct mt76_txq *mtxq, *mtxq_last; 477 int len = 0; 478 479 restart: 480 mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list); 481 while (!list_empty(&hwq->swq)) { 482 bool empty = false; 483 int cur; 484 485 if (test_bit(MT76_OFFCHANNEL, &dev->state) || 486 test_bit(MT76_RESET, &dev->state)) 487 return -EBUSY; 488 489 mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list); 490 if (mtxq->send_bar && mtxq->aggr) { 491 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 492 struct ieee80211_sta *sta = txq->sta; 493 struct ieee80211_vif *vif = txq->vif; 494 u16 agg_ssn = mtxq->agg_ssn; 495 u8 tid = txq->tid; 496 497 mtxq->send_bar = false; 498 spin_unlock_bh(&hwq->lock); 499 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 500 spin_lock_bh(&hwq->lock); 501 goto restart; 502 } 503 504 list_del_init(&mtxq->list); 505 506 cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty); 507 if (!empty) 508 list_add_tail(&mtxq->list, &hwq->swq); 509 510 if (cur < 0) 511 return cur; 512 513 len += cur; 514 515 if (mtxq == mtxq_last) 516 break; 517 } 518 519 return len; 520 } 521 522 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq) 523 { 524 int len; 525 526 rcu_read_lock(); 527 do { 528 if (hwq->swq_queued >= 4 || list_empty(&hwq->swq)) 529 break; 530 531 len = mt76_txq_schedule_list(dev, hwq); 532 } while (len > 0); 533 rcu_read_unlock(); 534 } 535 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 536 537 void mt76_txq_schedule_all(struct mt76_dev *dev) 538 { 539 int i; 540 541 for (i = 0; i <= MT_TXQ_BK; i++) { 542 struct mt76_queue *q = &dev->q_tx[i]; 543 544 spin_lock_bh(&q->lock); 545 mt76_txq_schedule(dev, q); 546 spin_unlock_bh(&q->lock); 547 } 548 } 549 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 550 551 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 552 bool send_bar) 553 { 554 int i; 555 556 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 557 struct ieee80211_txq *txq = sta->txq[i]; 558 struct mt76_txq *mtxq; 559 560 if (!txq) 561 continue; 562 563 mtxq = (struct mt76_txq *)txq->drv_priv; 564 565 spin_lock_bh(&mtxq->hwq->lock); 566 mtxq->send_bar = mtxq->aggr && send_bar; 567 if (!list_empty(&mtxq->list)) 568 list_del_init(&mtxq->list); 569 spin_unlock_bh(&mtxq->hwq->lock); 570 } 571 } 572 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 573 574 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 575 { 576 struct mt76_dev *dev = hw->priv; 577 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 578 struct mt76_queue *hwq = mtxq->hwq; 579 580 spin_lock_bh(&hwq->lock); 581 if (list_empty(&mtxq->list)) 582 list_add_tail(&mtxq->list, &hwq->swq); 583 mt76_txq_schedule(dev, hwq); 584 spin_unlock_bh(&hwq->lock); 585 } 586 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 587 588 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) 589 { 590 struct mt76_txq *mtxq; 591 struct mt76_queue *hwq; 592 struct sk_buff *skb; 593 594 if (!txq) 595 return; 596 597 mtxq = (struct mt76_txq *) txq->drv_priv; 598 hwq = mtxq->hwq; 599 600 spin_lock_bh(&hwq->lock); 601 if (!list_empty(&mtxq->list)) 602 list_del_init(&mtxq->list); 603 spin_unlock_bh(&hwq->lock); 604 605 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) 606 ieee80211_free_txskb(dev->hw, skb); 607 } 608 EXPORT_SYMBOL_GPL(mt76_txq_remove); 609 610 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 611 { 612 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 613 614 INIT_LIST_HEAD(&mtxq->list); 615 skb_queue_head_init(&mtxq->retry_q); 616 617 mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)]; 618 } 619 EXPORT_SYMBOL_GPL(mt76_txq_init); 620 621 u8 mt76_ac_to_hwq(u8 ac) 622 { 623 static const u8 wmm_queue_map[] = { 624 [IEEE80211_AC_BE] = 0, 625 [IEEE80211_AC_BK] = 1, 626 [IEEE80211_AC_VI] = 2, 627 [IEEE80211_AC_VO] = 3, 628 }; 629 630 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 631 return 0; 632 633 return wmm_queue_map[ac]; 634 } 635 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 636