1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * HT handling 4 * 5 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> 6 * Copyright 2002-2005, Instant802 Networks, Inc. 7 * Copyright 2005-2006, Devicescape Software, Inc. 8 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 9 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 10 * Copyright 2007-2010, Intel Corporation 11 * Copyright(c) 2015-2017 Intel Deutschland GmbH 12 * Copyright (C) 2018 - 2019 Intel Corporation 13 */ 14 15 #include <linux/ieee80211.h> 16 #include <linux/slab.h> 17 #include <linux/export.h> 18 #include <net/mac80211.h> 19 #include "ieee80211_i.h" 20 #include "driver-ops.h" 21 #include "wme.h" 22 23 /** 24 * DOC: TX A-MPDU aggregation 25 * 26 * Aggregation on the TX side requires setting the hardware flag 27 * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed 28 * packets with a flag indicating A-MPDU aggregation. The driver 29 * or device is responsible for actually aggregating the frames, 30 * as well as deciding how many and which to aggregate. 31 * 32 * When TX aggregation is started by some subsystem (usually the rate 33 * control algorithm would be appropriate) by calling the 34 * ieee80211_start_tx_ba_session() function, the driver will be 35 * notified via its @ampdu_action function, with the 36 * %IEEE80211_AMPDU_TX_START action. 37 * 38 * In response to that, the driver is later required to call the 39 * ieee80211_start_tx_ba_cb_irqsafe() function, which will really 40 * start the aggregation session after the peer has also responded. 41 * If the peer responds negatively, the session will be stopped 42 * again right away. Note that it is possible for the aggregation 43 * session to be stopped before the driver has indicated that it 44 * is done setting it up, in which case it must not indicate the 45 * setup completion. 46 * 47 * Also note that, since we also need to wait for a response from 48 * the peer, the driver is notified of the completion of the 49 * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the 50 * @ampdu_action callback. 51 * 52 * Similarly, when the aggregation session is stopped by the peer 53 * or something calling ieee80211_stop_tx_ba_session(), the driver's 54 * @ampdu_action function will be called with the action 55 * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, 56 * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). 57 * Note that the sta can get destroyed before the BA tear down is 58 * complete. 59 */ 60 61 static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, 62 const u8 *da, u16 tid, 63 u8 dialog_token, u16 start_seq_num, 64 u16 agg_size, u16 timeout) 65 { 66 struct ieee80211_local *local = sdata->local; 67 struct sk_buff *skb; 68 struct ieee80211_mgmt *mgmt; 69 u16 capab; 70 71 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 72 73 if (!skb) 74 return; 75 76 skb_reserve(skb, local->hw.extra_tx_headroom); 77 mgmt = skb_put_zero(skb, 24); 78 memcpy(mgmt->da, da, ETH_ALEN); 79 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 80 if (sdata->vif.type == NL80211_IFTYPE_AP || 81 sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 82 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 83 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 84 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 85 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 86 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 87 memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); 88 89 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 90 IEEE80211_STYPE_ACTION); 91 92 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); 93 94 mgmt->u.action.category = WLAN_CATEGORY_BACK; 95 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; 96 97 mgmt->u.action.u.addba_req.dialog_token = dialog_token; 98 capab = (u16)(1 << 0); /* bit 0 A-MSDU support */ 99 capab |= (u16)(1 << 1); /* bit 1 aggregation policy */ 100 capab |= (u16)(tid << 2); /* bit 5:2 TID number */ 101 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ 102 103 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); 104 105 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); 106 mgmt->u.action.u.addba_req.start_seq_num = 107 cpu_to_le16(start_seq_num << 4); 108 109 ieee80211_tx_skb(sdata, skb); 110 } 111 112 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) 113 { 114 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 115 struct ieee80211_local *local = sdata->local; 116 struct sk_buff *skb; 117 struct ieee80211_bar *bar; 118 u16 bar_control = 0; 119 120 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 121 if (!skb) 122 return; 123 124 skb_reserve(skb, local->hw.extra_tx_headroom); 125 bar = skb_put_zero(skb, sizeof(*bar)); 126 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 127 IEEE80211_STYPE_BACK_REQ); 128 memcpy(bar->ra, ra, ETH_ALEN); 129 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); 130 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 131 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 132 bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT); 133 bar->control = cpu_to_le16(bar_control); 134 bar->start_seq_num = cpu_to_le16(ssn); 135 136 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | 137 IEEE80211_TX_CTL_REQ_TX_STATUS; 138 ieee80211_tx_skb_tid(sdata, skb, tid); 139 } 140 EXPORT_SYMBOL(ieee80211_send_bar); 141 142 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, 143 struct tid_ampdu_tx *tid_tx) 144 { 145 lockdep_assert_held(&sta->ampdu_mlme.mtx); 146 lockdep_assert_held(&sta->lock); 147 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); 148 } 149 150 /* 151 * When multiple aggregation sessions on multiple stations 152 * are being created/destroyed simultaneously, we need to 153 * refcount the global queue stop caused by that in order 154 * to not get into a situation where one of the aggregation 155 * setup or teardown re-enables queues before the other is 156 * ready to handle that. 157 * 158 * These two functions take care of this issue by keeping 159 * a global "agg_queue_stop" refcount. 160 */ 161 static void __acquires(agg_queue) 162 ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) 163 { 164 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 165 166 /* we do refcounting here, so don't use the queue reason refcounting */ 167 168 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) 169 ieee80211_stop_queue_by_reason( 170 &sdata->local->hw, queue, 171 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 172 false); 173 __acquire(agg_queue); 174 } 175 176 static void __releases(agg_queue) 177 ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) 178 { 179 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 180 181 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) 182 ieee80211_wake_queue_by_reason( 183 &sdata->local->hw, queue, 184 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 185 false); 186 __release(agg_queue); 187 } 188 189 static void 190 ieee80211_agg_stop_txq(struct sta_info *sta, int tid) 191 { 192 struct ieee80211_txq *txq = sta->sta.txq[tid]; 193 struct ieee80211_sub_if_data *sdata; 194 struct fq *fq; 195 struct txq_info *txqi; 196 197 if (!txq) 198 return; 199 200 txqi = to_txq_info(txq); 201 sdata = vif_to_sdata(txq->vif); 202 fq = &sdata->local->fq; 203 204 /* Lock here to protect against further seqno updates on dequeue */ 205 spin_lock_bh(&fq->lock); 206 set_bit(IEEE80211_TXQ_STOP, &txqi->flags); 207 spin_unlock_bh(&fq->lock); 208 } 209 210 static void 211 ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable) 212 { 213 struct ieee80211_txq *txq = sta->sta.txq[tid]; 214 struct txq_info *txqi; 215 216 if (!txq) 217 return; 218 219 txqi = to_txq_info(txq); 220 221 if (enable) 222 set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); 223 else 224 clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); 225 226 clear_bit(IEEE80211_TXQ_STOP, &txqi->flags); 227 local_bh_disable(); 228 rcu_read_lock(); 229 schedule_and_wake_txq(sta->sdata->local, txqi); 230 rcu_read_unlock(); 231 local_bh_enable(); 232 } 233 234 /* 235 * splice packets from the STA's pending to the local pending, 236 * requires a call to ieee80211_agg_splice_finish later 237 */ 238 static void __acquires(agg_queue) 239 ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, 240 struct tid_ampdu_tx *tid_tx, u16 tid) 241 { 242 struct ieee80211_local *local = sdata->local; 243 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 244 unsigned long flags; 245 246 ieee80211_stop_queue_agg(sdata, tid); 247 248 if (WARN(!tid_tx, 249 "TID %d gone but expected when splicing aggregates from the pending queue\n", 250 tid)) 251 return; 252 253 if (!skb_queue_empty(&tid_tx->pending)) { 254 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 255 /* copy over remaining packets */ 256 skb_queue_splice_tail_init(&tid_tx->pending, 257 &local->pending[queue]); 258 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 259 } 260 } 261 262 static void __releases(agg_queue) 263 ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) 264 { 265 ieee80211_wake_queue_agg(sdata, tid); 266 } 267 268 static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) 269 { 270 struct tid_ampdu_tx *tid_tx; 271 272 lockdep_assert_held(&sta->ampdu_mlme.mtx); 273 lockdep_assert_held(&sta->lock); 274 275 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 276 277 /* 278 * When we get here, the TX path will not be lockless any more wrt. 279 * aggregation, since the OPERATIONAL bit has long been cleared. 280 * Thus it will block on getting the lock, if it occurs. So if we 281 * stop the queue now, we will not get any more packets, and any 282 * that might be being processed will wait for us here, thereby 283 * guaranteeing that no packets go to the tid_tx pending queue any 284 * more. 285 */ 286 287 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); 288 289 /* future packets must not find the tid_tx struct any more */ 290 ieee80211_assign_tid_tx(sta, tid, NULL); 291 292 ieee80211_agg_splice_finish(sta->sdata, tid); 293 ieee80211_agg_start_txq(sta, tid, false); 294 295 kfree_rcu(tid_tx, rcu_head); 296 } 297 298 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 299 enum ieee80211_agg_stop_reason reason) 300 { 301 struct ieee80211_local *local = sta->local; 302 struct tid_ampdu_tx *tid_tx; 303 struct ieee80211_ampdu_params params = { 304 .sta = &sta->sta, 305 .tid = tid, 306 .buf_size = 0, 307 .amsdu = false, 308 .timeout = 0, 309 .ssn = 0, 310 }; 311 int ret; 312 313 lockdep_assert_held(&sta->ampdu_mlme.mtx); 314 315 switch (reason) { 316 case AGG_STOP_DECLINED: 317 case AGG_STOP_LOCAL_REQUEST: 318 case AGG_STOP_PEER_REQUEST: 319 params.action = IEEE80211_AMPDU_TX_STOP_CONT; 320 break; 321 case AGG_STOP_DESTROY_STA: 322 params.action = IEEE80211_AMPDU_TX_STOP_FLUSH; 323 break; 324 default: 325 WARN_ON_ONCE(1); 326 return -EINVAL; 327 } 328 329 spin_lock_bh(&sta->lock); 330 331 /* free struct pending for start, if present */ 332 tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; 333 kfree(tid_tx); 334 sta->ampdu_mlme.tid_start_tx[tid] = NULL; 335 336 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 337 if (!tid_tx) { 338 spin_unlock_bh(&sta->lock); 339 return -ENOENT; 340 } 341 342 /* 343 * if we're already stopping ignore any new requests to stop 344 * unless we're destroying it in which case notify the driver 345 */ 346 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 347 spin_unlock_bh(&sta->lock); 348 if (reason != AGG_STOP_DESTROY_STA) 349 return -EALREADY; 350 params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT; 351 ret = drv_ampdu_action(local, sta->sdata, ¶ms); 352 WARN_ON_ONCE(ret); 353 return 0; 354 } 355 356 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 357 /* not even started yet! */ 358 ieee80211_assign_tid_tx(sta, tid, NULL); 359 spin_unlock_bh(&sta->lock); 360 kfree_rcu(tid_tx, rcu_head); 361 return 0; 362 } 363 364 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); 365 366 ieee80211_agg_stop_txq(sta, tid); 367 368 spin_unlock_bh(&sta->lock); 369 370 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", 371 sta->sta.addr, tid); 372 373 del_timer_sync(&tid_tx->addba_resp_timer); 374 del_timer_sync(&tid_tx->session_timer); 375 376 /* 377 * After this packets are no longer handed right through 378 * to the driver but are put onto tid_tx->pending instead, 379 * with locking to ensure proper access. 380 */ 381 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 382 383 /* 384 * There might be a few packets being processed right now (on 385 * another CPU) that have already gotten past the aggregation 386 * check when it was still OPERATIONAL and consequently have 387 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might 388 * call into the driver at the same time or even before the 389 * TX paths calls into it, which could confuse the driver. 390 * 391 * Wait for all currently running TX paths to finish before 392 * telling the driver. New packets will not go through since 393 * the aggregation session is no longer OPERATIONAL. 394 */ 395 if (!local->in_reconfig) 396 synchronize_net(); 397 398 tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ? 399 WLAN_BACK_RECIPIENT : 400 WLAN_BACK_INITIATOR; 401 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; 402 403 ret = drv_ampdu_action(local, sta->sdata, ¶ms); 404 405 /* HW shall not deny going back to legacy */ 406 if (WARN_ON(ret)) { 407 /* 408 * We may have pending packets get stuck in this case... 409 * Not bothering with a workaround for now. 410 */ 411 } 412 413 /* 414 * In the case of AGG_STOP_DESTROY_STA, the driver won't 415 * necessarily call ieee80211_stop_tx_ba_cb(), so this may 416 * seem like we can leave the tid_tx data pending forever. 417 * This is true, in a way, but "forever" is only until the 418 * station struct is actually destroyed. In the meantime, 419 * leaving it around ensures that we don't transmit packets 420 * to the driver on this TID which might confuse it. 421 */ 422 423 return 0; 424 } 425 426 /* 427 * After sending add Block Ack request we activated a timer until 428 * add Block Ack response will arrive from the recipient. 429 * If this timer expires sta_addba_resp_timer_expired will be executed. 430 */ 431 static void sta_addba_resp_timer_expired(struct timer_list *t) 432 { 433 struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer); 434 struct sta_info *sta = tid_tx->sta; 435 u8 tid = tid_tx->tid; 436 437 /* check if the TID waits for addBA response */ 438 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 439 ht_dbg(sta->sdata, 440 "timer expired on %pM tid %d not expecting addBA response\n", 441 sta->sta.addr, tid); 442 return; 443 } 444 445 ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n", 446 sta->sta.addr, tid); 447 448 ieee80211_stop_tx_ba_session(&sta->sta, tid); 449 } 450 451 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 452 { 453 struct tid_ampdu_tx *tid_tx; 454 struct ieee80211_local *local = sta->local; 455 struct ieee80211_sub_if_data *sdata = sta->sdata; 456 struct ieee80211_ampdu_params params = { 457 .sta = &sta->sta, 458 .action = IEEE80211_AMPDU_TX_START, 459 .tid = tid, 460 .buf_size = 0, 461 .amsdu = false, 462 .timeout = 0, 463 }; 464 int ret; 465 u16 buf_size; 466 467 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 468 469 /* 470 * Start queuing up packets for this aggregation session. 471 * We're going to release them once the driver is OK with 472 * that. 473 */ 474 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 475 476 ieee80211_agg_stop_txq(sta, tid); 477 478 /* 479 * Make sure no packets are being processed. This ensures that 480 * we have a valid starting sequence number and that in-flight 481 * packets have been flushed out and no packets for this TID 482 * will go into the driver during the ampdu_action call. 483 */ 484 synchronize_net(); 485 486 params.ssn = sta->tid_seq[tid] >> 4; 487 ret = drv_ampdu_action(local, sdata, ¶ms); 488 if (ret) { 489 ht_dbg(sdata, 490 "BA request denied - HW unavailable for %pM tid %d\n", 491 sta->sta.addr, tid); 492 spin_lock_bh(&sta->lock); 493 ieee80211_agg_splice_packets(sdata, tid_tx, tid); 494 ieee80211_assign_tid_tx(sta, tid, NULL); 495 ieee80211_agg_splice_finish(sdata, tid); 496 spin_unlock_bh(&sta->lock); 497 498 ieee80211_agg_start_txq(sta, tid, false); 499 500 kfree_rcu(tid_tx, rcu_head); 501 return; 502 } 503 504 /* activate the timer for the recipient's addBA response */ 505 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 506 ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n", 507 sta->sta.addr, tid); 508 509 spin_lock_bh(&sta->lock); 510 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; 511 sta->ampdu_mlme.addba_req_num[tid]++; 512 spin_unlock_bh(&sta->lock); 513 514 if (sta->sta.he_cap.has_he) { 515 buf_size = local->hw.max_tx_aggregation_subframes; 516 } else { 517 /* 518 * We really should use what the driver told us it will 519 * transmit as the maximum, but certain APs (e.g. the 520 * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012) 521 * will crash when we use a lower number. 522 */ 523 buf_size = IEEE80211_MAX_AMPDU_BUF_HT; 524 } 525 526 /* send AddBA request */ 527 ieee80211_send_addba_request(sdata, sta->sta.addr, tid, 528 tid_tx->dialog_token, params.ssn, 529 buf_size, tid_tx->timeout); 530 } 531 532 /* 533 * After accepting the AddBA Response we activated a timer, 534 * resetting it after each frame that we send. 535 */ 536 static void sta_tx_agg_session_timer_expired(struct timer_list *t) 537 { 538 struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer); 539 struct sta_info *sta = tid_tx->sta; 540 u8 tid = tid_tx->tid; 541 unsigned long timeout; 542 543 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 544 return; 545 } 546 547 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); 548 if (time_is_after_jiffies(timeout)) { 549 mod_timer(&tid_tx->session_timer, timeout); 550 return; 551 } 552 553 ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n", 554 sta->sta.addr, tid); 555 556 ieee80211_stop_tx_ba_session(&sta->sta, tid); 557 } 558 559 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, 560 u16 timeout) 561 { 562 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 563 struct ieee80211_sub_if_data *sdata = sta->sdata; 564 struct ieee80211_local *local = sdata->local; 565 struct tid_ampdu_tx *tid_tx; 566 int ret = 0; 567 568 trace_api_start_tx_ba_session(pubsta, tid); 569 570 if (WARN(sta->reserved_tid == tid, 571 "Requested to start BA session on reserved tid=%d", tid)) 572 return -EINVAL; 573 574 if (!pubsta->ht_cap.ht_supported) 575 return -EINVAL; 576 577 if (WARN_ON_ONCE(!local->ops->ampdu_action)) 578 return -EINVAL; 579 580 if ((tid >= IEEE80211_NUM_TIDS) || 581 !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) || 582 ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) 583 return -EINVAL; 584 585 if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID)) 586 return -EINVAL; 587 588 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n", 589 pubsta->addr, tid); 590 591 if (sdata->vif.type != NL80211_IFTYPE_STATION && 592 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 593 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 594 sdata->vif.type != NL80211_IFTYPE_AP && 595 sdata->vif.type != NL80211_IFTYPE_ADHOC) 596 return -EINVAL; 597 598 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 599 ht_dbg(sdata, 600 "BA sessions blocked - Denying BA session request %pM tid %d\n", 601 sta->sta.addr, tid); 602 return -EINVAL; 603 } 604 605 /* 606 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a 607 * member of an IBSS, and has no other existing Block Ack agreement 608 * with the recipient STA, then the initiating STA shall transmit a 609 * Probe Request frame to the recipient STA and shall not transmit an 610 * ADDBA Request frame unless it receives a Probe Response frame 611 * from the recipient within dot11ADDBAFailureTimeout. 612 * 613 * The probe request mechanism for ADDBA is currently not implemented, 614 * but we only build up Block Ack session with HT STAs. This information 615 * is set when we receive a bss info from a probe response or a beacon. 616 */ 617 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && 618 !sta->sta.ht_cap.ht_supported) { 619 ht_dbg(sdata, 620 "BA request denied - IBSS STA %pM does not advertise HT support\n", 621 pubsta->addr); 622 return -EINVAL; 623 } 624 625 spin_lock_bh(&sta->lock); 626 627 /* we have tried too many times, receiver does not want A-MPDU */ 628 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 629 ret = -EBUSY; 630 goto err_unlock_sta; 631 } 632 633 /* 634 * if we have tried more than HT_AGG_BURST_RETRIES times we 635 * will spread our requests in time to avoid stalling connection 636 * for too long 637 */ 638 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && 639 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 640 HT_AGG_RETRIES_PERIOD)) { 641 ht_dbg(sdata, 642 "BA request denied - %d failed requests on %pM tid %u\n", 643 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid); 644 ret = -EBUSY; 645 goto err_unlock_sta; 646 } 647 648 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 649 /* check if the TID is not in aggregation flow already */ 650 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { 651 ht_dbg(sdata, 652 "BA request denied - session is not idle on %pM tid %u\n", 653 sta->sta.addr, tid); 654 ret = -EAGAIN; 655 goto err_unlock_sta; 656 } 657 658 /* prepare A-MPDU MLME for Tx aggregation */ 659 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 660 if (!tid_tx) { 661 ret = -ENOMEM; 662 goto err_unlock_sta; 663 } 664 665 skb_queue_head_init(&tid_tx->pending); 666 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 667 668 tid_tx->timeout = timeout; 669 tid_tx->sta = sta; 670 tid_tx->tid = tid; 671 672 /* response timer */ 673 timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0); 674 675 /* tx timer */ 676 timer_setup(&tid_tx->session_timer, 677 sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE); 678 679 /* assign a dialog token */ 680 sta->ampdu_mlme.dialog_token_allocator++; 681 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; 682 683 /* 684 * Finally, assign it to the start array; the work item will 685 * collect it and move it to the normal array. 686 */ 687 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx; 688 689 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 690 691 /* this flow continues off the work */ 692 err_unlock_sta: 693 spin_unlock_bh(&sta->lock); 694 return ret; 695 } 696 EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 697 698 static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 699 struct sta_info *sta, u16 tid) 700 { 701 struct tid_ampdu_tx *tid_tx; 702 struct ieee80211_ampdu_params params = { 703 .sta = &sta->sta, 704 .action = IEEE80211_AMPDU_TX_OPERATIONAL, 705 .tid = tid, 706 .timeout = 0, 707 .ssn = 0, 708 }; 709 710 lockdep_assert_held(&sta->ampdu_mlme.mtx); 711 712 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 713 params.buf_size = tid_tx->buf_size; 714 params.amsdu = tid_tx->amsdu; 715 716 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n", 717 sta->sta.addr, tid); 718 719 drv_ampdu_action(local, sta->sdata, ¶ms); 720 721 /* 722 * synchronize with TX path, while splicing the TX path 723 * should block so it won't put more packets onto pending. 724 */ 725 spin_lock_bh(&sta->lock); 726 727 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); 728 /* 729 * Now mark as operational. This will be visible 730 * in the TX path, and lets it go lock-free in 731 * the common case. 732 */ 733 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 734 ieee80211_agg_splice_finish(sta->sdata, tid); 735 736 spin_unlock_bh(&sta->lock); 737 738 ieee80211_agg_start_txq(sta, tid, true); 739 } 740 741 void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, 742 struct tid_ampdu_tx *tid_tx) 743 { 744 struct ieee80211_sub_if_data *sdata = sta->sdata; 745 struct ieee80211_local *local = sdata->local; 746 747 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 748 return; 749 750 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) 751 ieee80211_agg_tx_operational(local, sta, tid); 752 } 753 754 static struct tid_ampdu_tx * 755 ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata, 756 const u8 *ra, u16 tid, struct sta_info **sta) 757 { 758 struct tid_ampdu_tx *tid_tx; 759 760 if (tid >= IEEE80211_NUM_TIDS) { 761 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 762 tid, IEEE80211_NUM_TIDS); 763 return NULL; 764 } 765 766 *sta = sta_info_get_bss(sdata, ra); 767 if (!*sta) { 768 ht_dbg(sdata, "Could not find station: %pM\n", ra); 769 return NULL; 770 } 771 772 tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]); 773 774 if (WARN_ON(!tid_tx)) 775 ht_dbg(sdata, "addBA was not requested!\n"); 776 777 return tid_tx; 778 } 779 780 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 781 const u8 *ra, u16 tid) 782 { 783 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 784 struct ieee80211_local *local = sdata->local; 785 struct sta_info *sta; 786 struct tid_ampdu_tx *tid_tx; 787 788 trace_api_start_tx_ba_cb(sdata, ra, tid); 789 790 rcu_read_lock(); 791 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); 792 if (!tid_tx) 793 goto out; 794 795 set_bit(HT_AGG_STATE_START_CB, &tid_tx->state); 796 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 797 out: 798 rcu_read_unlock(); 799 } 800 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 801 802 int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 803 enum ieee80211_agg_stop_reason reason) 804 { 805 int ret; 806 807 mutex_lock(&sta->ampdu_mlme.mtx); 808 809 ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason); 810 811 mutex_unlock(&sta->ampdu_mlme.mtx); 812 813 return ret; 814 } 815 816 int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) 817 { 818 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 819 struct ieee80211_sub_if_data *sdata = sta->sdata; 820 struct ieee80211_local *local = sdata->local; 821 struct tid_ampdu_tx *tid_tx; 822 int ret = 0; 823 824 trace_api_stop_tx_ba_session(pubsta, tid); 825 826 if (!local->ops->ampdu_action) 827 return -EINVAL; 828 829 if (tid >= IEEE80211_NUM_TIDS) 830 return -EINVAL; 831 832 spin_lock_bh(&sta->lock); 833 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 834 835 if (!tid_tx) { 836 ret = -ENOENT; 837 goto unlock; 838 } 839 840 WARN(sta->reserved_tid == tid, 841 "Requested to stop BA session on reserved tid=%d", tid); 842 843 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 844 /* already in progress stopping it */ 845 ret = 0; 846 goto unlock; 847 } 848 849 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); 850 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 851 852 unlock: 853 spin_unlock_bh(&sta->lock); 854 return ret; 855 } 856 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 857 858 void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, 859 struct tid_ampdu_tx *tid_tx) 860 { 861 struct ieee80211_sub_if_data *sdata = sta->sdata; 862 bool send_delba = false; 863 864 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", 865 sta->sta.addr, tid); 866 867 spin_lock_bh(&sta->lock); 868 869 if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 870 ht_dbg(sdata, 871 "unexpected callback to A-MPDU stop for %pM tid %d\n", 872 sta->sta.addr, tid); 873 goto unlock_sta; 874 } 875 876 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop) 877 send_delba = true; 878 879 ieee80211_remove_tid_tx(sta, tid); 880 881 unlock_sta: 882 spin_unlock_bh(&sta->lock); 883 884 if (send_delba) 885 ieee80211_send_delba(sdata, sta->sta.addr, tid, 886 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 887 } 888 889 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 890 const u8 *ra, u16 tid) 891 { 892 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 893 struct ieee80211_local *local = sdata->local; 894 struct sta_info *sta; 895 struct tid_ampdu_tx *tid_tx; 896 897 trace_api_stop_tx_ba_cb(sdata, ra, tid); 898 899 rcu_read_lock(); 900 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); 901 if (!tid_tx) 902 goto out; 903 904 set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state); 905 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 906 out: 907 rcu_read_unlock(); 908 } 909 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 910 911 912 void ieee80211_process_addba_resp(struct ieee80211_local *local, 913 struct sta_info *sta, 914 struct ieee80211_mgmt *mgmt, 915 size_t len) 916 { 917 struct tid_ampdu_tx *tid_tx; 918 struct ieee80211_txq *txq; 919 u16 capab, tid, buf_size; 920 bool amsdu; 921 922 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 923 amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; 924 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 925 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; 926 buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); 927 928 txq = sta->sta.txq[tid]; 929 if (!amsdu && txq) 930 set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags); 931 932 mutex_lock(&sta->ampdu_mlme.mtx); 933 934 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 935 if (!tid_tx) 936 goto out; 937 938 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { 939 ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n", 940 sta->sta.addr, tid); 941 goto out; 942 } 943 944 del_timer_sync(&tid_tx->addba_resp_timer); 945 946 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", 947 sta->sta.addr, tid); 948 949 /* 950 * addba_resp_timer may have fired before we got here, and 951 * caused WANT_STOP to be set. If the stop then was already 952 * processed further, STOPPING might be set. 953 */ 954 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || 955 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 956 ht_dbg(sta->sdata, 957 "got addBA resp for %pM tid %d but we already gave up\n", 958 sta->sta.addr, tid); 959 goto out; 960 } 961 962 /* 963 * IEEE 802.11-2007 7.3.1.14: 964 * In an ADDBA Response frame, when the Status Code field 965 * is set to 0, the Buffer Size subfield is set to a value 966 * of at least 1. 967 */ 968 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 969 == WLAN_STATUS_SUCCESS && buf_size) { 970 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, 971 &tid_tx->state)) { 972 /* ignore duplicate response */ 973 goto out; 974 } 975 976 tid_tx->buf_size = buf_size; 977 tid_tx->amsdu = amsdu; 978 979 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) 980 ieee80211_agg_tx_operational(local, sta, tid); 981 982 sta->ampdu_mlme.addba_req_num[tid] = 0; 983 984 tid_tx->timeout = 985 le16_to_cpu(mgmt->u.action.u.addba_resp.timeout); 986 987 if (tid_tx->timeout) { 988 mod_timer(&tid_tx->session_timer, 989 TU_TO_EXP_TIME(tid_tx->timeout)); 990 tid_tx->last_tx = jiffies; 991 } 992 993 } else { 994 ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED); 995 } 996 997 out: 998 mutex_unlock(&sta->ampdu_mlme.mtx); 999 } 1000