1 /* 2 * HT handling 3 * 4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> 5 * Copyright 2002-2005, Instant802 Networks, Inc. 6 * Copyright 2005-2006, Devicescape Software, Inc. 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 9 * Copyright 2007-2010, Intel Corporation 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16 #include <linux/ieee80211.h> 17 #include <linux/slab.h> 18 #include <linux/export.h> 19 #include <net/mac80211.h> 20 #include "ieee80211_i.h" 21 #include "driver-ops.h" 22 #include "wme.h" 23 24 /** 25 * DOC: TX A-MPDU aggregation 26 * 27 * Aggregation on the TX side requires setting the hardware flag 28 * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed 29 * packets with a flag indicating A-MPDU aggregation. The driver 30 * or device is responsible for actually aggregating the frames, 31 * as well as deciding how many and which to aggregate. 32 * 33 * When TX aggregation is started by some subsystem (usually the rate 34 * control algorithm would be appropriate) by calling the 35 * ieee80211_start_tx_ba_session() function, the driver will be 36 * notified via its @ampdu_action function, with the 37 * %IEEE80211_AMPDU_TX_START action. 38 * 39 * In response to that, the driver is later required to call the 40 * ieee80211_start_tx_ba_cb_irqsafe() function, which will really 41 * start the aggregation session after the peer has also responded. 42 * If the peer responds negatively, the session will be stopped 43 * again right away. Note that it is possible for the aggregation 44 * session to be stopped before the driver has indicated that it 45 * is done setting it up, in which case it must not indicate the 46 * setup completion. 47 * 48 * Also note that, since we also need to wait for a response from 49 * the peer, the driver is notified of the completion of the 50 * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the 51 * @ampdu_action callback. 52 * 53 * Similarly, when the aggregation session is stopped by the peer 54 * or something calling ieee80211_stop_tx_ba_session(), the driver's 55 * @ampdu_action function will be called with the action 56 * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, 57 * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). 58 * Note that the sta can get destroyed before the BA tear down is 59 * complete. 60 */ 61 62 static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, 63 const u8 *da, u16 tid, 64 u8 dialog_token, u16 start_seq_num, 65 u16 agg_size, u16 timeout) 66 { 67 struct ieee80211_local *local = sdata->local; 68 struct sk_buff *skb; 69 struct ieee80211_mgmt *mgmt; 70 u16 capab; 71 72 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 73 74 if (!skb) 75 return; 76 77 skb_reserve(skb, local->hw.extra_tx_headroom); 78 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 79 memset(mgmt, 0, 24); 80 memcpy(mgmt->da, da, ETH_ALEN); 81 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 82 if (sdata->vif.type == NL80211_IFTYPE_AP || 83 sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 84 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 85 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 86 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 87 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 88 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 89 memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); 90 91 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 92 IEEE80211_STYPE_ACTION); 93 94 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); 95 96 mgmt->u.action.category = WLAN_CATEGORY_BACK; 97 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; 98 99 mgmt->u.action.u.addba_req.dialog_token = dialog_token; 100 capab = (u16)(1 << 1); /* bit 1 aggregation policy */ 101 capab |= (u16)(tid << 2); /* bit 5:2 TID number */ 102 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ 103 104 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); 105 106 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); 107 mgmt->u.action.u.addba_req.start_seq_num = 108 cpu_to_le16(start_seq_num << 4); 109 110 ieee80211_tx_skb_tid(sdata, skb, tid); 111 } 112 113 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) 114 { 115 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 116 struct ieee80211_local *local = sdata->local; 117 struct sk_buff *skb; 118 struct ieee80211_bar *bar; 119 u16 bar_control = 0; 120 121 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 122 if (!skb) 123 return; 124 125 skb_reserve(skb, local->hw.extra_tx_headroom); 126 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); 127 memset(bar, 0, sizeof(*bar)); 128 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 129 IEEE80211_STYPE_BACK_REQ); 130 memcpy(bar->ra, ra, ETH_ALEN); 131 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); 132 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 133 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 134 bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT); 135 bar->control = cpu_to_le16(bar_control); 136 bar->start_seq_num = cpu_to_le16(ssn); 137 138 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | 139 IEEE80211_TX_CTL_REQ_TX_STATUS; 140 ieee80211_tx_skb_tid(sdata, skb, tid); 141 } 142 EXPORT_SYMBOL(ieee80211_send_bar); 143 144 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, 145 struct tid_ampdu_tx *tid_tx) 146 { 147 lockdep_assert_held(&sta->ampdu_mlme.mtx); 148 lockdep_assert_held(&sta->lock); 149 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); 150 } 151 152 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 153 enum ieee80211_back_parties initiator, 154 bool tx) 155 { 156 struct ieee80211_local *local = sta->local; 157 struct tid_ampdu_tx *tid_tx; 158 int ret; 159 160 lockdep_assert_held(&sta->ampdu_mlme.mtx); 161 162 spin_lock_bh(&sta->lock); 163 164 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 165 if (!tid_tx) { 166 spin_unlock_bh(&sta->lock); 167 return -ENOENT; 168 } 169 170 /* if we're already stopping ignore any new requests to stop */ 171 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 172 spin_unlock_bh(&sta->lock); 173 return -EALREADY; 174 } 175 176 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 177 /* not even started yet! */ 178 ieee80211_assign_tid_tx(sta, tid, NULL); 179 spin_unlock_bh(&sta->lock); 180 kfree_rcu(tid_tx, rcu_head); 181 return 0; 182 } 183 184 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); 185 186 spin_unlock_bh(&sta->lock); 187 188 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", 189 sta->sta.addr, tid); 190 191 del_timer_sync(&tid_tx->addba_resp_timer); 192 del_timer_sync(&tid_tx->session_timer); 193 194 /* 195 * After this packets are no longer handed right through 196 * to the driver but are put onto tid_tx->pending instead, 197 * with locking to ensure proper access. 198 */ 199 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 200 201 /* 202 * There might be a few packets being processed right now (on 203 * another CPU) that have already gotten past the aggregation 204 * check when it was still OPERATIONAL and consequently have 205 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might 206 * call into the driver at the same time or even before the 207 * TX paths calls into it, which could confuse the driver. 208 * 209 * Wait for all currently running TX paths to finish before 210 * telling the driver. New packets will not go through since 211 * the aggregation session is no longer OPERATIONAL. 212 */ 213 synchronize_net(); 214 215 tid_tx->stop_initiator = initiator; 216 tid_tx->tx_stop = tx; 217 218 ret = drv_ampdu_action(local, sta->sdata, 219 IEEE80211_AMPDU_TX_STOP, 220 &sta->sta, tid, NULL, 0); 221 222 /* HW shall not deny going back to legacy */ 223 if (WARN_ON(ret)) { 224 /* 225 * We may have pending packets get stuck in this case... 226 * Not bothering with a workaround for now. 227 */ 228 } 229 230 return ret; 231 } 232 233 /* 234 * After sending add Block Ack request we activated a timer until 235 * add Block Ack response will arrive from the recipient. 236 * If this timer expires sta_addba_resp_timer_expired will be executed. 237 */ 238 static void sta_addba_resp_timer_expired(unsigned long data) 239 { 240 /* not an elegant detour, but there is no choice as the timer passes 241 * only one argument, and both sta_info and TID are needed, so init 242 * flow in sta_info_create gives the TID as data, while the timer_to_id 243 * array gives the sta through container_of */ 244 u16 tid = *(u8 *)data; 245 struct sta_info *sta = container_of((void *)data, 246 struct sta_info, timer_to_tid[tid]); 247 struct tid_ampdu_tx *tid_tx; 248 249 /* check if the TID waits for addBA response */ 250 rcu_read_lock(); 251 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); 252 if (!tid_tx || 253 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 254 rcu_read_unlock(); 255 ht_dbg(sta->sdata, 256 "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n", 257 tid); 258 return; 259 } 260 261 ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid); 262 263 ieee80211_stop_tx_ba_session(&sta->sta, tid); 264 rcu_read_unlock(); 265 } 266 267 static inline int ieee80211_ac_from_tid(int tid) 268 { 269 return ieee802_1d_to_ac[tid & 7]; 270 } 271 272 /* 273 * When multiple aggregation sessions on multiple stations 274 * are being created/destroyed simultaneously, we need to 275 * refcount the global queue stop caused by that in order 276 * to not get into a situation where one of the aggregation 277 * setup or teardown re-enables queues before the other is 278 * ready to handle that. 279 * 280 * These two functions take care of this issue by keeping 281 * a global "agg_queue_stop" refcount. 282 */ 283 static void __acquires(agg_queue) 284 ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) 285 { 286 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 287 288 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) 289 ieee80211_stop_queue_by_reason( 290 &sdata->local->hw, queue, 291 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 292 __acquire(agg_queue); 293 } 294 295 static void __releases(agg_queue) 296 ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) 297 { 298 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 299 300 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) 301 ieee80211_wake_queue_by_reason( 302 &sdata->local->hw, queue, 303 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 304 __release(agg_queue); 305 } 306 307 /* 308 * splice packets from the STA's pending to the local pending, 309 * requires a call to ieee80211_agg_splice_finish later 310 */ 311 static void __acquires(agg_queue) 312 ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, 313 struct tid_ampdu_tx *tid_tx, u16 tid) 314 { 315 struct ieee80211_local *local = sdata->local; 316 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; 317 unsigned long flags; 318 319 ieee80211_stop_queue_agg(sdata, tid); 320 321 if (WARN(!tid_tx, 322 "TID %d gone but expected when splicing aggregates from the pending queue\n", 323 tid)) 324 return; 325 326 if (!skb_queue_empty(&tid_tx->pending)) { 327 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 328 /* copy over remaining packets */ 329 skb_queue_splice_tail_init(&tid_tx->pending, 330 &local->pending[queue]); 331 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 332 } 333 } 334 335 static void __releases(agg_queue) 336 ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) 337 { 338 ieee80211_wake_queue_agg(sdata, tid); 339 } 340 341 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 342 { 343 struct tid_ampdu_tx *tid_tx; 344 struct ieee80211_local *local = sta->local; 345 struct ieee80211_sub_if_data *sdata = sta->sdata; 346 u16 start_seq_num; 347 int ret; 348 349 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 350 351 /* 352 * Start queuing up packets for this aggregation session. 353 * We're going to release them once the driver is OK with 354 * that. 355 */ 356 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 357 358 /* 359 * Make sure no packets are being processed. This ensures that 360 * we have a valid starting sequence number and that in-flight 361 * packets have been flushed out and no packets for this TID 362 * will go into the driver during the ampdu_action call. 363 */ 364 synchronize_net(); 365 366 start_seq_num = sta->tid_seq[tid] >> 4; 367 368 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, 369 &sta->sta, tid, &start_seq_num, 0); 370 if (ret) { 371 ht_dbg(sdata, 372 "BA request denied - HW unavailable for tid %d\n", tid); 373 spin_lock_bh(&sta->lock); 374 ieee80211_agg_splice_packets(sdata, tid_tx, tid); 375 ieee80211_assign_tid_tx(sta, tid, NULL); 376 ieee80211_agg_splice_finish(sdata, tid); 377 spin_unlock_bh(&sta->lock); 378 379 kfree_rcu(tid_tx, rcu_head); 380 return; 381 } 382 383 /* activate the timer for the recipient's addBA response */ 384 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 385 ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid); 386 387 spin_lock_bh(&sta->lock); 388 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; 389 sta->ampdu_mlme.addba_req_num[tid]++; 390 spin_unlock_bh(&sta->lock); 391 392 /* send AddBA request */ 393 ieee80211_send_addba_request(sdata, sta->sta.addr, tid, 394 tid_tx->dialog_token, start_seq_num, 395 local->hw.max_tx_aggregation_subframes, 396 tid_tx->timeout); 397 } 398 399 /* 400 * After accepting the AddBA Response we activated a timer, 401 * resetting it after each frame that we send. 402 */ 403 static void sta_tx_agg_session_timer_expired(unsigned long data) 404 { 405 /* not an elegant detour, but there is no choice as the timer passes 406 * only one argument, and various sta_info are needed here, so init 407 * flow in sta_info_create gives the TID as data, while the timer_to_id 408 * array gives the sta through container_of */ 409 u8 *ptid = (u8 *)data; 410 u8 *timer_to_id = ptid - *ptid; 411 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 412 timer_to_tid[0]); 413 struct tid_ampdu_tx *tid_tx; 414 unsigned long timeout; 415 416 rcu_read_lock(); 417 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]); 418 if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 419 rcu_read_unlock(); 420 return; 421 } 422 423 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); 424 if (time_is_after_jiffies(timeout)) { 425 mod_timer(&tid_tx->session_timer, timeout); 426 rcu_read_unlock(); 427 return; 428 } 429 430 rcu_read_unlock(); 431 432 ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid); 433 434 ieee80211_stop_tx_ba_session(&sta->sta, *ptid); 435 } 436 437 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, 438 u16 timeout) 439 { 440 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 441 struct ieee80211_sub_if_data *sdata = sta->sdata; 442 struct ieee80211_local *local = sdata->local; 443 struct tid_ampdu_tx *tid_tx; 444 int ret = 0; 445 446 trace_api_start_tx_ba_session(pubsta, tid); 447 448 if (WARN_ON_ONCE(!local->ops->ampdu_action)) 449 return -EINVAL; 450 451 if ((tid >= IEEE80211_NUM_TIDS) || 452 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) || 453 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) 454 return -EINVAL; 455 456 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n", 457 pubsta->addr, tid); 458 459 if (sdata->vif.type != NL80211_IFTYPE_STATION && 460 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 461 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 462 sdata->vif.type != NL80211_IFTYPE_AP && 463 sdata->vif.type != NL80211_IFTYPE_ADHOC) 464 return -EINVAL; 465 466 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 467 ht_dbg(sdata, 468 "BA sessions blocked - Denying BA session request\n"); 469 return -EINVAL; 470 } 471 472 /* 473 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a 474 * member of an IBSS, and has no other existing Block Ack agreement 475 * with the recipient STA, then the initiating STA shall transmit a 476 * Probe Request frame to the recipient STA and shall not transmit an 477 * ADDBA Request frame unless it receives a Probe Response frame 478 * from the recipient within dot11ADDBAFailureTimeout. 479 * 480 * The probe request mechanism for ADDBA is currently not implemented, 481 * but we only build up Block Ack session with HT STAs. This information 482 * is set when we receive a bss info from a probe response or a beacon. 483 */ 484 if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && 485 !sta->sta.ht_cap.ht_supported) { 486 ht_dbg(sdata, 487 "BA request denied - IBSS STA %pM does not advertise HT support\n", 488 pubsta->addr); 489 return -EINVAL; 490 } 491 492 spin_lock_bh(&sta->lock); 493 494 /* we have tried too many times, receiver does not want A-MPDU */ 495 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 496 ret = -EBUSY; 497 goto err_unlock_sta; 498 } 499 500 /* 501 * if we have tried more than HT_AGG_BURST_RETRIES times we 502 * will spread our requests in time to avoid stalling connection 503 * for too long 504 */ 505 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES && 506 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 507 HT_AGG_RETRIES_PERIOD)) { 508 ht_dbg(sdata, 509 "BA request denied - waiting a grace period after %d failed requests on tid %u\n", 510 sta->ampdu_mlme.addba_req_num[tid], tid); 511 ret = -EBUSY; 512 goto err_unlock_sta; 513 } 514 515 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 516 /* check if the TID is not in aggregation flow already */ 517 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { 518 ht_dbg(sdata, 519 "BA request denied - session is not idle on tid %u\n", 520 tid); 521 ret = -EAGAIN; 522 goto err_unlock_sta; 523 } 524 525 /* prepare A-MPDU MLME for Tx aggregation */ 526 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 527 if (!tid_tx) { 528 ret = -ENOMEM; 529 goto err_unlock_sta; 530 } 531 532 skb_queue_head_init(&tid_tx->pending); 533 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 534 535 tid_tx->timeout = timeout; 536 537 /* response timer */ 538 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; 539 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 540 init_timer(&tid_tx->addba_resp_timer); 541 542 /* tx timer */ 543 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; 544 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 545 init_timer_deferrable(&tid_tx->session_timer); 546 547 /* assign a dialog token */ 548 sta->ampdu_mlme.dialog_token_allocator++; 549 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; 550 551 /* 552 * Finally, assign it to the start array; the work item will 553 * collect it and move it to the normal array. 554 */ 555 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx; 556 557 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 558 559 /* this flow continues off the work */ 560 err_unlock_sta: 561 spin_unlock_bh(&sta->lock); 562 return ret; 563 } 564 EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 565 566 static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 567 struct sta_info *sta, u16 tid) 568 { 569 struct tid_ampdu_tx *tid_tx; 570 571 lockdep_assert_held(&sta->ampdu_mlme.mtx); 572 573 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 574 575 ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid); 576 577 drv_ampdu_action(local, sta->sdata, 578 IEEE80211_AMPDU_TX_OPERATIONAL, 579 &sta->sta, tid, NULL, tid_tx->buf_size); 580 581 /* 582 * synchronize with TX path, while splicing the TX path 583 * should block so it won't put more packets onto pending. 584 */ 585 spin_lock_bh(&sta->lock); 586 587 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); 588 /* 589 * Now mark as operational. This will be visible 590 * in the TX path, and lets it go lock-free in 591 * the common case. 592 */ 593 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 594 ieee80211_agg_splice_finish(sta->sdata, tid); 595 596 spin_unlock_bh(&sta->lock); 597 } 598 599 void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 600 { 601 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 602 struct ieee80211_local *local = sdata->local; 603 struct sta_info *sta; 604 struct tid_ampdu_tx *tid_tx; 605 606 trace_api_start_tx_ba_cb(sdata, ra, tid); 607 608 if (tid >= IEEE80211_NUM_TIDS) { 609 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 610 tid, IEEE80211_NUM_TIDS); 611 return; 612 } 613 614 mutex_lock(&local->sta_mtx); 615 sta = sta_info_get_bss(sdata, ra); 616 if (!sta) { 617 mutex_unlock(&local->sta_mtx); 618 ht_dbg(sdata, "Could not find station: %pM\n", ra); 619 return; 620 } 621 622 mutex_lock(&sta->ampdu_mlme.mtx); 623 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 624 625 if (WARN_ON(!tid_tx)) { 626 ht_dbg(sdata, "addBA was not requested!\n"); 627 goto unlock; 628 } 629 630 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 631 goto unlock; 632 633 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) 634 ieee80211_agg_tx_operational(local, sta, tid); 635 636 unlock: 637 mutex_unlock(&sta->ampdu_mlme.mtx); 638 mutex_unlock(&local->sta_mtx); 639 } 640 641 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 642 const u8 *ra, u16 tid) 643 { 644 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 645 struct ieee80211_local *local = sdata->local; 646 struct ieee80211_ra_tid *ra_tid; 647 struct sk_buff *skb = dev_alloc_skb(0); 648 649 if (unlikely(!skb)) 650 return; 651 652 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 653 memcpy(&ra_tid->ra, ra, ETH_ALEN); 654 ra_tid->tid = tid; 655 656 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 657 skb_queue_tail(&sdata->skb_queue, skb); 658 ieee80211_queue_work(&local->hw, &sdata->work); 659 } 660 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 661 662 int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 663 enum ieee80211_back_parties initiator, 664 bool tx) 665 { 666 int ret; 667 668 mutex_lock(&sta->ampdu_mlme.mtx); 669 670 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator, tx); 671 672 mutex_unlock(&sta->ampdu_mlme.mtx); 673 674 return ret; 675 } 676 677 int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) 678 { 679 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 680 struct ieee80211_sub_if_data *sdata = sta->sdata; 681 struct ieee80211_local *local = sdata->local; 682 struct tid_ampdu_tx *tid_tx; 683 int ret = 0; 684 685 trace_api_stop_tx_ba_session(pubsta, tid); 686 687 if (!local->ops->ampdu_action) 688 return -EINVAL; 689 690 if (tid >= IEEE80211_NUM_TIDS) 691 return -EINVAL; 692 693 spin_lock_bh(&sta->lock); 694 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 695 696 if (!tid_tx) { 697 ret = -ENOENT; 698 goto unlock; 699 } 700 701 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 702 /* already in progress stopping it */ 703 ret = 0; 704 goto unlock; 705 } 706 707 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); 708 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 709 710 unlock: 711 spin_unlock_bh(&sta->lock); 712 return ret; 713 } 714 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 715 716 void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 717 { 718 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 719 struct ieee80211_local *local = sdata->local; 720 struct sta_info *sta; 721 struct tid_ampdu_tx *tid_tx; 722 723 trace_api_stop_tx_ba_cb(sdata, ra, tid); 724 725 if (tid >= IEEE80211_NUM_TIDS) { 726 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 727 tid, IEEE80211_NUM_TIDS); 728 return; 729 } 730 731 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid); 732 733 mutex_lock(&local->sta_mtx); 734 735 sta = sta_info_get_bss(sdata, ra); 736 if (!sta) { 737 ht_dbg(sdata, "Could not find station: %pM\n", ra); 738 goto unlock; 739 } 740 741 mutex_lock(&sta->ampdu_mlme.mtx); 742 spin_lock_bh(&sta->lock); 743 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 744 745 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 746 ht_dbg(sdata, "unexpected callback to A-MPDU stop\n"); 747 goto unlock_sta; 748 } 749 750 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop) 751 ieee80211_send_delba(sta->sdata, ra, tid, 752 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 753 754 /* 755 * When we get here, the TX path will not be lockless any more wrt. 756 * aggregation, since the OPERATIONAL bit has long been cleared. 757 * Thus it will block on getting the lock, if it occurs. So if we 758 * stop the queue now, we will not get any more packets, and any 759 * that might be being processed will wait for us here, thereby 760 * guaranteeing that no packets go to the tid_tx pending queue any 761 * more. 762 */ 763 764 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); 765 766 /* future packets must not find the tid_tx struct any more */ 767 ieee80211_assign_tid_tx(sta, tid, NULL); 768 769 ieee80211_agg_splice_finish(sta->sdata, tid); 770 771 kfree_rcu(tid_tx, rcu_head); 772 773 unlock_sta: 774 spin_unlock_bh(&sta->lock); 775 mutex_unlock(&sta->ampdu_mlme.mtx); 776 unlock: 777 mutex_unlock(&local->sta_mtx); 778 } 779 780 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 781 const u8 *ra, u16 tid) 782 { 783 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 784 struct ieee80211_local *local = sdata->local; 785 struct ieee80211_ra_tid *ra_tid; 786 struct sk_buff *skb = dev_alloc_skb(0); 787 788 if (unlikely(!skb)) 789 return; 790 791 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 792 memcpy(&ra_tid->ra, ra, ETH_ALEN); 793 ra_tid->tid = tid; 794 795 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 796 skb_queue_tail(&sdata->skb_queue, skb); 797 ieee80211_queue_work(&local->hw, &sdata->work); 798 } 799 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 800 801 802 void ieee80211_process_addba_resp(struct ieee80211_local *local, 803 struct sta_info *sta, 804 struct ieee80211_mgmt *mgmt, 805 size_t len) 806 { 807 struct tid_ampdu_tx *tid_tx; 808 u16 capab, tid; 809 u8 buf_size; 810 811 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 812 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 813 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; 814 815 mutex_lock(&sta->ampdu_mlme.mtx); 816 817 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 818 if (!tid_tx) 819 goto out; 820 821 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { 822 ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid); 823 goto out; 824 } 825 826 del_timer_sync(&tid_tx->addba_resp_timer); 827 828 ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid); 829 830 /* 831 * addba_resp_timer may have fired before we got here, and 832 * caused WANT_STOP to be set. If the stop then was already 833 * processed further, STOPPING might be set. 834 */ 835 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || 836 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 837 ht_dbg(sta->sdata, 838 "got addBA resp for tid %d but we already gave up\n", 839 tid); 840 goto out; 841 } 842 843 /* 844 * IEEE 802.11-2007 7.3.1.14: 845 * In an ADDBA Response frame, when the Status Code field 846 * is set to 0, the Buffer Size subfield is set to a value 847 * of at least 1. 848 */ 849 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 850 == WLAN_STATUS_SUCCESS && buf_size) { 851 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, 852 &tid_tx->state)) { 853 /* ignore duplicate response */ 854 goto out; 855 } 856 857 tid_tx->buf_size = buf_size; 858 859 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) 860 ieee80211_agg_tx_operational(local, sta, tid); 861 862 sta->ampdu_mlme.addba_req_num[tid] = 0; 863 864 if (tid_tx->timeout) { 865 mod_timer(&tid_tx->session_timer, 866 TU_TO_EXP_TIME(tid_tx->timeout)); 867 tid_tx->last_tx = jiffies; 868 } 869 870 } else { 871 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 872 false); 873 } 874 875 out: 876 mutex_unlock(&sta->ampdu_mlme.mtx); 877 } 878