1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "mt76.h" 18 19 static struct mt76_txwi_cache * 20 mt76_alloc_txwi(struct mt76_dev *dev) 21 { 22 struct mt76_txwi_cache *t; 23 dma_addr_t addr; 24 int size; 25 26 size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1); 27 t = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 28 if (!t) 29 return NULL; 30 31 addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi), 32 DMA_TO_DEVICE); 33 t->dma_addr = addr; 34 35 return t; 36 } 37 38 static struct mt76_txwi_cache * 39 __mt76_get_txwi(struct mt76_dev *dev) 40 { 41 struct mt76_txwi_cache *t = NULL; 42 43 spin_lock_bh(&dev->lock); 44 if (!list_empty(&dev->txwi_cache)) { 45 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 46 list); 47 list_del(&t->list); 48 } 49 spin_unlock_bh(&dev->lock); 50 51 return t; 52 } 53 54 struct mt76_txwi_cache * 55 mt76_get_txwi(struct mt76_dev *dev) 56 { 57 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 58 59 if (t) 60 return t; 61 62 return mt76_alloc_txwi(dev); 63 } 64 65 void 66 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 67 { 68 if (!t) 69 return; 70 71 spin_lock_bh(&dev->lock); 72 list_add(&t->list, &dev->txwi_cache); 73 spin_unlock_bh(&dev->lock); 74 } 75 76 void mt76_tx_free(struct mt76_dev *dev) 77 { 78 struct mt76_txwi_cache *t; 79 80 while ((t = __mt76_get_txwi(dev)) != NULL) 81 dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi), 82 DMA_TO_DEVICE); 83 } 84 85 static int 86 mt76_txq_get_qid(struct ieee80211_txq *txq) 87 { 88 if (!txq->sta) 89 return MT_TXQ_BE; 90 91 return txq->ac; 92 } 93 94 static void 95 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) 96 { 97 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 98 99 if (!ieee80211_is_data_qos(hdr->frame_control) || 100 !ieee80211_is_data_present(hdr->frame_control)) 101 return; 102 103 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 104 } 105 106 void 107 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 108 struct mt76_wcid *wcid, struct sk_buff *skb) 109 { 110 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 111 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 112 struct mt76_queue *q; 113 int qid = skb_get_queue_mapping(skb); 114 115 if (WARN_ON(qid >= MT_TXQ_PSD)) { 116 qid = MT_TXQ_BE; 117 skb_set_queue_mapping(skb, qid); 118 } 119 120 if (!wcid->tx_rate_set) 121 ieee80211_get_tx_rates(info->control.vif, sta, skb, 122 info->control.rates, 1); 123 124 if (sta && ieee80211_is_data_qos(hdr->frame_control)) { 125 struct ieee80211_txq *txq; 126 struct mt76_txq *mtxq; 127 u8 tid; 128 129 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 130 txq = sta->txq[tid]; 131 mtxq = (struct mt76_txq *) txq->drv_priv; 132 133 if (mtxq->aggr) 134 mt76_check_agg_ssn(mtxq, skb); 135 } 136 137 q = &dev->q_tx[qid]; 138 139 spin_lock_bh(&q->lock); 140 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); 141 dev->queue_ops->kick(dev, q); 142 143 if (q->queued > q->ndesc - 8) 144 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); 145 spin_unlock_bh(&q->lock); 146 } 147 EXPORT_SYMBOL_GPL(mt76_tx); 148 149 static struct sk_buff * 150 mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps) 151 { 152 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 153 struct sk_buff *skb; 154 155 skb = skb_dequeue(&mtxq->retry_q); 156 if (skb) { 157 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 158 159 if (ps && skb_queue_empty(&mtxq->retry_q)) 160 ieee80211_sta_set_buffered(txq->sta, tid, false); 161 162 return skb; 163 } 164 165 skb = ieee80211_tx_dequeue(dev->hw, txq); 166 if (!skb) 167 return NULL; 168 169 return skb; 170 } 171 172 static void 173 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, 174 struct sk_buff *skb, bool last) 175 { 176 struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv; 177 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 178 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; 179 180 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 181 if (last) 182 info->flags |= IEEE80211_TX_STATUS_EOSP; 183 184 mt76_skb_set_moredata(skb, !last); 185 dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta); 186 } 187 188 void 189 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 190 u16 tids, int nframes, 191 enum ieee80211_frame_release_type reason, 192 bool more_data) 193 { 194 struct mt76_dev *dev = hw->priv; 195 struct sk_buff *last_skb = NULL; 196 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; 197 int i; 198 199 spin_lock_bh(&hwq->lock); 200 for (i = 0; tids && nframes; i++, tids >>= 1) { 201 struct ieee80211_txq *txq = sta->txq[i]; 202 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 203 struct sk_buff *skb; 204 205 if (!(tids & 1)) 206 continue; 207 208 do { 209 skb = mt76_txq_dequeue(dev, mtxq, true); 210 if (!skb) 211 break; 212 213 if (mtxq->aggr) 214 mt76_check_agg_ssn(mtxq, skb); 215 216 nframes--; 217 if (last_skb) 218 mt76_queue_ps_skb(dev, sta, last_skb, false); 219 220 last_skb = skb; 221 } while (nframes); 222 } 223 224 if (last_skb) { 225 mt76_queue_ps_skb(dev, sta, last_skb, true); 226 dev->queue_ops->kick(dev, hwq); 227 } 228 spin_unlock_bh(&hwq->lock); 229 } 230 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 231 232 static int 233 mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, 234 struct mt76_txq *mtxq, bool *empty) 235 { 236 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 237 struct ieee80211_tx_info *info; 238 struct mt76_wcid *wcid = mtxq->wcid; 239 struct sk_buff *skb; 240 int n_frames = 1, limit; 241 struct ieee80211_tx_rate tx_rate; 242 bool ampdu; 243 bool probe; 244 int idx; 245 246 skb = mt76_txq_dequeue(dev, mtxq, false); 247 if (!skb) { 248 *empty = true; 249 return 0; 250 } 251 252 info = IEEE80211_SKB_CB(skb); 253 if (!wcid->tx_rate_set) 254 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 255 info->control.rates, 1); 256 tx_rate = info->control.rates[0]; 257 258 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 259 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; 260 limit = ampdu ? 16 : 3; 261 262 if (ampdu) 263 mt76_check_agg_ssn(mtxq, skb); 264 265 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta); 266 267 if (idx < 0) 268 return idx; 269 270 do { 271 bool cur_ampdu; 272 273 if (probe) 274 break; 275 276 if (test_bit(MT76_OFFCHANNEL, &dev->state) || 277 test_bit(MT76_RESET, &dev->state)) 278 return -EBUSY; 279 280 skb = mt76_txq_dequeue(dev, mtxq, false); 281 if (!skb) { 282 *empty = true; 283 break; 284 } 285 286 info = IEEE80211_SKB_CB(skb); 287 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 288 289 if (ampdu != cur_ampdu || 290 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 291 skb_queue_tail(&mtxq->retry_q, skb); 292 break; 293 } 294 295 info->control.rates[0] = tx_rate; 296 297 if (cur_ampdu) 298 mt76_check_agg_ssn(mtxq, skb); 299 300 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, 301 txq->sta); 302 if (idx < 0) 303 return idx; 304 305 n_frames++; 306 } while (n_frames < limit); 307 308 if (!probe) { 309 hwq->swq_queued++; 310 hwq->entry[idx].schedule = true; 311 } 312 313 dev->queue_ops->kick(dev, hwq); 314 315 return n_frames; 316 } 317 318 static int 319 mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq) 320 { 321 struct mt76_txq *mtxq, *mtxq_last; 322 int len = 0; 323 324 restart: 325 mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list); 326 while (!list_empty(&hwq->swq)) { 327 bool empty = false; 328 int cur; 329 330 if (test_bit(MT76_OFFCHANNEL, &dev->state) || 331 test_bit(MT76_RESET, &dev->state)) 332 return -EBUSY; 333 334 mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list); 335 if (mtxq->send_bar && mtxq->aggr) { 336 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 337 struct ieee80211_sta *sta = txq->sta; 338 struct ieee80211_vif *vif = txq->vif; 339 u16 agg_ssn = mtxq->agg_ssn; 340 u8 tid = txq->tid; 341 342 mtxq->send_bar = false; 343 spin_unlock_bh(&hwq->lock); 344 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 345 spin_lock_bh(&hwq->lock); 346 goto restart; 347 } 348 349 list_del_init(&mtxq->list); 350 351 cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty); 352 if (!empty) 353 list_add_tail(&mtxq->list, &hwq->swq); 354 355 if (cur < 0) 356 return cur; 357 358 len += cur; 359 360 if (mtxq == mtxq_last) 361 break; 362 } 363 364 return len; 365 } 366 367 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq) 368 { 369 int len; 370 371 rcu_read_lock(); 372 do { 373 if (hwq->swq_queued >= 4 || list_empty(&hwq->swq)) 374 break; 375 376 len = mt76_txq_schedule_list(dev, hwq); 377 } while (len > 0); 378 rcu_read_unlock(); 379 } 380 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 381 382 void mt76_txq_schedule_all(struct mt76_dev *dev) 383 { 384 int i; 385 386 for (i = 0; i <= MT_TXQ_BK; i++) { 387 struct mt76_queue *q = &dev->q_tx[i]; 388 389 spin_lock_bh(&q->lock); 390 mt76_txq_schedule(dev, q); 391 spin_unlock_bh(&q->lock); 392 } 393 } 394 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 395 396 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 397 bool send_bar) 398 { 399 int i; 400 401 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 402 struct ieee80211_txq *txq = sta->txq[i]; 403 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 404 405 spin_lock_bh(&mtxq->hwq->lock); 406 mtxq->send_bar = mtxq->aggr && send_bar; 407 if (!list_empty(&mtxq->list)) 408 list_del_init(&mtxq->list); 409 spin_unlock_bh(&mtxq->hwq->lock); 410 } 411 } 412 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 413 414 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 415 { 416 struct mt76_dev *dev = hw->priv; 417 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 418 struct mt76_queue *hwq = mtxq->hwq; 419 420 spin_lock_bh(&hwq->lock); 421 if (list_empty(&mtxq->list)) 422 list_add_tail(&mtxq->list, &hwq->swq); 423 mt76_txq_schedule(dev, hwq); 424 spin_unlock_bh(&hwq->lock); 425 } 426 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 427 428 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) 429 { 430 struct mt76_txq *mtxq; 431 struct mt76_queue *hwq; 432 struct sk_buff *skb; 433 434 if (!txq) 435 return; 436 437 mtxq = (struct mt76_txq *) txq->drv_priv; 438 hwq = mtxq->hwq; 439 440 spin_lock_bh(&hwq->lock); 441 if (!list_empty(&mtxq->list)) 442 list_del(&mtxq->list); 443 spin_unlock_bh(&hwq->lock); 444 445 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) 446 ieee80211_free_txskb(dev->hw, skb); 447 } 448 EXPORT_SYMBOL_GPL(mt76_txq_remove); 449 450 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 451 { 452 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 453 454 INIT_LIST_HEAD(&mtxq->list); 455 skb_queue_head_init(&mtxq->retry_q); 456 457 mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)]; 458 } 459 EXPORT_SYMBOL_GPL(mt76_txq_init); 460 461 u8 mt76_ac_to_hwq(u8 ac) 462 { 463 static const u8 wmm_queue_map[] = { 464 [IEEE80211_AC_BE] = 0, 465 [IEEE80211_AC_BK] = 1, 466 [IEEE80211_AC_VI] = 2, 467 [IEEE80211_AC_VO] = 3, 468 }; 469 470 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 471 return 0; 472 473 return wmm_queue_map[ac]; 474 } 475 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 476