1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "mt76.h" 18 19 static struct mt76_txwi_cache * 20 mt76_alloc_txwi(struct mt76_dev *dev) 21 { 22 struct mt76_txwi_cache *t; 23 dma_addr_t addr; 24 int size; 25 26 size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1); 27 t = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 28 if (!t) 29 return NULL; 30 31 addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi), 32 DMA_TO_DEVICE); 33 t->dma_addr = addr; 34 35 return t; 36 } 37 38 static struct mt76_txwi_cache * 39 __mt76_get_txwi(struct mt76_dev *dev) 40 { 41 struct mt76_txwi_cache *t = NULL; 42 43 spin_lock_bh(&dev->lock); 44 if (!list_empty(&dev->txwi_cache)) { 45 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 46 list); 47 list_del(&t->list); 48 } 49 spin_unlock_bh(&dev->lock); 50 51 return t; 52 } 53 54 static struct mt76_txwi_cache * 55 mt76_get_txwi(struct mt76_dev *dev) 56 { 57 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 58 59 if (t) 60 return t; 61 62 return mt76_alloc_txwi(dev); 63 } 64 65 void 66 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 67 { 68 if (!t) 69 return; 70 71 spin_lock_bh(&dev->lock); 72 list_add(&t->list, &dev->txwi_cache); 73 spin_unlock_bh(&dev->lock); 74 } 75 76 void mt76_tx_free(struct mt76_dev *dev) 77 { 78 struct mt76_txwi_cache *t; 79 80 while ((t = __mt76_get_txwi(dev)) != NULL) 81 dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi), 82 DMA_TO_DEVICE); 83 } 84 85 static int 86 mt76_txq_get_qid(struct ieee80211_txq *txq) 87 { 88 if (!txq->sta) 89 return MT_TXQ_BE; 90 91 return txq->ac; 92 } 93 94 int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 95 struct sk_buff *skb, struct mt76_wcid *wcid, 96 struct ieee80211_sta *sta) 97 { 98 struct mt76_queue_entry e; 99 struct mt76_txwi_cache *t; 100 struct mt76_queue_buf buf[32]; 101 struct sk_buff *iter; 102 dma_addr_t addr; 103 int len; 104 u32 tx_info = 0; 105 int n, ret; 106 107 t = mt76_get_txwi(dev); 108 if (!t) { 109 ieee80211_free_txskb(dev->hw, skb); 110 return -ENOMEM; 111 } 112 113 dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi), 114 DMA_TO_DEVICE); 115 ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta, 116 &tx_info); 117 dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi), 118 DMA_TO_DEVICE); 119 if (ret < 0) 120 goto free; 121 122 len = skb->len - skb->data_len; 123 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); 124 if (dma_mapping_error(dev->dev, addr)) { 125 ret = -ENOMEM; 126 goto free; 127 } 128 129 n = 0; 130 buf[n].addr = t->dma_addr; 131 buf[n++].len = dev->drv->txwi_size; 132 buf[n].addr = addr; 133 buf[n++].len = len; 134 135 skb_walk_frags(skb, iter) { 136 if (n == ARRAY_SIZE(buf)) 137 goto unmap; 138 139 addr = dma_map_single(dev->dev, iter->data, iter->len, 140 DMA_TO_DEVICE); 141 if (dma_mapping_error(dev->dev, addr)) 142 goto unmap; 143 144 buf[n].addr = addr; 145 buf[n++].len = iter->len; 146 } 147 148 if (q->queued + (n + 1) / 2 >= q->ndesc - 1) 149 goto unmap; 150 151 return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t); 152 153 unmap: 154 ret = -ENOMEM; 155 for (n--; n > 0; n--) 156 dma_unmap_single(dev->dev, buf[n].addr, buf[n].len, 157 DMA_TO_DEVICE); 158 159 free: 160 e.skb = skb; 161 e.txwi = t; 162 dev->drv->tx_complete_skb(dev, q, &e, true); 163 mt76_put_txwi(dev, t); 164 return ret; 165 } 166 EXPORT_SYMBOL_GPL(mt76_tx_queue_skb); 167 168 void 169 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 170 struct mt76_wcid *wcid, struct sk_buff *skb) 171 { 172 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 173 struct mt76_queue *q; 174 int qid = skb_get_queue_mapping(skb); 175 176 if (WARN_ON(qid >= MT_TXQ_PSD)) { 177 qid = MT_TXQ_BE; 178 skb_set_queue_mapping(skb, qid); 179 } 180 181 if (!wcid->tx_rate_set) 182 ieee80211_get_tx_rates(info->control.vif, sta, skb, 183 info->control.rates, 1); 184 185 q = &dev->q_tx[qid]; 186 187 spin_lock_bh(&q->lock); 188 mt76_tx_queue_skb(dev, q, skb, wcid, sta); 189 dev->queue_ops->kick(dev, q); 190 191 if (q->queued > q->ndesc - 8) 192 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); 193 spin_unlock_bh(&q->lock); 194 } 195 EXPORT_SYMBOL_GPL(mt76_tx); 196 197 static struct sk_buff * 198 mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps) 199 { 200 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 201 struct sk_buff *skb; 202 203 skb = skb_dequeue(&mtxq->retry_q); 204 if (skb) { 205 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 206 207 if (ps && skb_queue_empty(&mtxq->retry_q)) 208 ieee80211_sta_set_buffered(txq->sta, tid, false); 209 210 return skb; 211 } 212 213 skb = ieee80211_tx_dequeue(dev->hw, txq); 214 if (!skb) 215 return NULL; 216 217 return skb; 218 } 219 220 static void 221 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) 222 { 223 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 224 225 if (!ieee80211_is_data_qos(hdr->frame_control)) 226 return; 227 228 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 229 } 230 231 static void 232 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, 233 struct sk_buff *skb, bool last) 234 { 235 struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv; 236 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 237 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; 238 239 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 240 if (last) 241 info->flags |= IEEE80211_TX_STATUS_EOSP; 242 243 mt76_skb_set_moredata(skb, !last); 244 mt76_tx_queue_skb(dev, hwq, skb, wcid, sta); 245 } 246 247 void 248 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 249 u16 tids, int nframes, 250 enum ieee80211_frame_release_type reason, 251 bool more_data) 252 { 253 struct mt76_dev *dev = hw->priv; 254 struct sk_buff *last_skb = NULL; 255 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; 256 int i; 257 258 spin_lock_bh(&hwq->lock); 259 for (i = 0; tids && nframes; i++, tids >>= 1) { 260 struct ieee80211_txq *txq = sta->txq[i]; 261 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 262 struct sk_buff *skb; 263 264 if (!(tids & 1)) 265 continue; 266 267 do { 268 skb = mt76_txq_dequeue(dev, mtxq, true); 269 if (!skb) 270 break; 271 272 if (mtxq->aggr) 273 mt76_check_agg_ssn(mtxq, skb); 274 275 nframes--; 276 if (last_skb) 277 mt76_queue_ps_skb(dev, sta, last_skb, false); 278 279 last_skb = skb; 280 } while (nframes); 281 } 282 283 if (last_skb) { 284 mt76_queue_ps_skb(dev, sta, last_skb, true); 285 dev->queue_ops->kick(dev, hwq); 286 } 287 spin_unlock_bh(&hwq->lock); 288 } 289 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 290 291 static int 292 mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, 293 struct mt76_txq *mtxq, bool *empty) 294 { 295 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 296 struct ieee80211_tx_info *info; 297 struct mt76_wcid *wcid = mtxq->wcid; 298 struct sk_buff *skb; 299 int n_frames = 1, limit; 300 struct ieee80211_tx_rate tx_rate; 301 bool ampdu; 302 bool probe; 303 int idx; 304 305 skb = mt76_txq_dequeue(dev, mtxq, false); 306 if (!skb) { 307 *empty = true; 308 return 0; 309 } 310 311 info = IEEE80211_SKB_CB(skb); 312 if (!wcid->tx_rate_set) 313 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 314 info->control.rates, 1); 315 tx_rate = info->control.rates[0]; 316 317 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 318 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; 319 limit = ampdu ? 16 : 3; 320 321 if (ampdu) 322 mt76_check_agg_ssn(mtxq, skb); 323 324 idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); 325 326 if (idx < 0) 327 return idx; 328 329 do { 330 bool cur_ampdu; 331 332 if (probe) 333 break; 334 335 if (test_bit(MT76_OFFCHANNEL, &dev->state) || 336 test_bit(MT76_RESET, &dev->state)) 337 return -EBUSY; 338 339 skb = mt76_txq_dequeue(dev, mtxq, false); 340 if (!skb) { 341 *empty = true; 342 break; 343 } 344 345 info = IEEE80211_SKB_CB(skb); 346 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 347 348 if (ampdu != cur_ampdu || 349 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 350 skb_queue_tail(&mtxq->retry_q, skb); 351 break; 352 } 353 354 info->control.rates[0] = tx_rate; 355 356 if (cur_ampdu) 357 mt76_check_agg_ssn(mtxq, skb); 358 359 idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); 360 if (idx < 0) 361 return idx; 362 363 n_frames++; 364 } while (n_frames < limit); 365 366 if (!probe) { 367 hwq->swq_queued++; 368 hwq->entry[idx].schedule = true; 369 } 370 371 dev->queue_ops->kick(dev, hwq); 372 373 return n_frames; 374 } 375 376 static int 377 mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq) 378 { 379 struct mt76_txq *mtxq, *mtxq_last; 380 int len = 0; 381 382 restart: 383 mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list); 384 while (!list_empty(&hwq->swq)) { 385 bool empty = false; 386 int cur; 387 388 if (test_bit(MT76_OFFCHANNEL, &dev->state) || 389 test_bit(MT76_RESET, &dev->state)) 390 return -EBUSY; 391 392 mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list); 393 if (mtxq->send_bar && mtxq->aggr) { 394 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 395 struct ieee80211_sta *sta = txq->sta; 396 struct ieee80211_vif *vif = txq->vif; 397 u16 agg_ssn = mtxq->agg_ssn; 398 u8 tid = txq->tid; 399 400 mtxq->send_bar = false; 401 spin_unlock_bh(&hwq->lock); 402 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 403 spin_lock_bh(&hwq->lock); 404 goto restart; 405 } 406 407 list_del_init(&mtxq->list); 408 409 cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty); 410 if (!empty) 411 list_add_tail(&mtxq->list, &hwq->swq); 412 413 if (cur < 0) 414 return cur; 415 416 len += cur; 417 418 if (mtxq == mtxq_last) 419 break; 420 } 421 422 return len; 423 } 424 425 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq) 426 { 427 int len; 428 429 rcu_read_lock(); 430 do { 431 if (hwq->swq_queued >= 4 || list_empty(&hwq->swq)) 432 break; 433 434 len = mt76_txq_schedule_list(dev, hwq); 435 } while (len > 0); 436 rcu_read_unlock(); 437 } 438 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 439 440 void mt76_txq_schedule_all(struct mt76_dev *dev) 441 { 442 int i; 443 444 for (i = 0; i <= MT_TXQ_BK; i++) { 445 struct mt76_queue *q = &dev->q_tx[i]; 446 447 spin_lock_bh(&q->lock); 448 mt76_txq_schedule(dev, q); 449 spin_unlock_bh(&q->lock); 450 } 451 } 452 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 453 454 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 455 bool send_bar) 456 { 457 int i; 458 459 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 460 struct ieee80211_txq *txq = sta->txq[i]; 461 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 462 463 spin_lock_bh(&mtxq->hwq->lock); 464 mtxq->send_bar = mtxq->aggr && send_bar; 465 if (!list_empty(&mtxq->list)) 466 list_del_init(&mtxq->list); 467 spin_unlock_bh(&mtxq->hwq->lock); 468 } 469 } 470 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 471 472 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 473 { 474 struct mt76_dev *dev = hw->priv; 475 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 476 struct mt76_queue *hwq = mtxq->hwq; 477 478 spin_lock_bh(&hwq->lock); 479 if (list_empty(&mtxq->list)) 480 list_add_tail(&mtxq->list, &hwq->swq); 481 mt76_txq_schedule(dev, hwq); 482 spin_unlock_bh(&hwq->lock); 483 } 484 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 485 486 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) 487 { 488 struct mt76_txq *mtxq; 489 struct mt76_queue *hwq; 490 struct sk_buff *skb; 491 492 if (!txq) 493 return; 494 495 mtxq = (struct mt76_txq *) txq->drv_priv; 496 hwq = mtxq->hwq; 497 498 spin_lock_bh(&hwq->lock); 499 if (!list_empty(&mtxq->list)) 500 list_del(&mtxq->list); 501 spin_unlock_bh(&hwq->lock); 502 503 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) 504 ieee80211_free_txskb(dev->hw, skb); 505 } 506 EXPORT_SYMBOL_GPL(mt76_txq_remove); 507 508 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 509 { 510 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 511 512 INIT_LIST_HEAD(&mtxq->list); 513 skb_queue_head_init(&mtxq->retry_q); 514 515 mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)]; 516 } 517 EXPORT_SYMBOL_GPL(mt76_txq_init); 518