1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #include <net/mac80211.h> 68 69 #include "mvm.h" 70 #include "sta.h" 71 #include "rs.h" 72 73 /* 74 * New version of ADD_STA_sta command added new fields at the end of the 75 * structure, so sending the size of the relevant API's structure is enough to 76 * support both API versions. 77 */ 78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) 79 { 80 if (iwl_mvm_has_new_rx_api(mvm) || 81 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 82 return sizeof(struct iwl_mvm_add_sta_cmd); 83 else 84 return sizeof(struct iwl_mvm_add_sta_cmd_v7); 85 } 86 87 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, 88 enum nl80211_iftype iftype) 89 { 90 int sta_id; 91 u32 reserved_ids = 0; 92 93 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); 94 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 95 96 lockdep_assert_held(&mvm->mutex); 97 98 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ 99 if (iftype != NL80211_IFTYPE_STATION) 100 reserved_ids = BIT(0); 101 102 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ 103 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) { 104 if (BIT(sta_id) & reserved_ids) 105 continue; 106 107 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 108 lockdep_is_held(&mvm->mutex))) 109 return sta_id; 110 } 111 return IWL_MVM_INVALID_STA; 112 } 113 114 /* send station add/update command to firmware */ 115 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 116 bool update, unsigned int flags) 117 { 118 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 119 struct iwl_mvm_add_sta_cmd add_sta_cmd = { 120 .sta_id = mvm_sta->sta_id, 121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 122 .add_modify = update ? 1 : 0, 123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | 124 STA_FLG_MIMO_EN_MSK), 125 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), 126 }; 127 int ret; 128 u32 status; 129 u32 agg_size = 0, mpdu_dens = 0; 130 131 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 132 add_sta_cmd.station_type = mvm_sta->sta_type; 133 134 if (!update || (flags & STA_MODIFY_QUEUES)) { 135 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); 136 137 if (!iwl_mvm_has_new_tx_api(mvm)) { 138 add_sta_cmd.tfd_queue_msk = 139 cpu_to_le32(mvm_sta->tfd_queue_msk); 140 141 if (flags & STA_MODIFY_QUEUES) 142 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; 143 } else { 144 WARN_ON(flags & STA_MODIFY_QUEUES); 145 } 146 } 147 148 switch (sta->bandwidth) { 149 case IEEE80211_STA_RX_BW_160: 150 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); 151 /* fall through */ 152 case IEEE80211_STA_RX_BW_80: 153 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); 154 /* fall through */ 155 case IEEE80211_STA_RX_BW_40: 156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); 157 /* fall through */ 158 case IEEE80211_STA_RX_BW_20: 159 if (sta->ht_cap.ht_supported) 160 add_sta_cmd.station_flags |= 161 cpu_to_le32(STA_FLG_FAT_EN_20MHZ); 162 break; 163 } 164 165 switch (sta->rx_nss) { 166 case 1: 167 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 168 break; 169 case 2: 170 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); 171 break; 172 case 3 ... 8: 173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); 174 break; 175 } 176 177 switch (sta->smps_mode) { 178 case IEEE80211_SMPS_AUTOMATIC: 179 case IEEE80211_SMPS_NUM_MODES: 180 WARN_ON(1); 181 break; 182 case IEEE80211_SMPS_STATIC: 183 /* override NSS */ 184 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); 185 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 186 break; 187 case IEEE80211_SMPS_DYNAMIC: 188 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); 189 break; 190 case IEEE80211_SMPS_OFF: 191 /* nothing */ 192 break; 193 } 194 195 if (sta->ht_cap.ht_supported) { 196 add_sta_cmd.station_flags_msk |= 197 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | 198 STA_FLG_AGG_MPDU_DENS_MSK); 199 200 mpdu_dens = sta->ht_cap.ampdu_density; 201 } 202 203 if (sta->vht_cap.vht_supported) { 204 agg_size = sta->vht_cap.cap & 205 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; 206 agg_size >>= 207 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 208 } else if (sta->ht_cap.ht_supported) { 209 agg_size = sta->ht_cap.ampdu_factor; 210 } 211 212 add_sta_cmd.station_flags |= 213 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); 214 add_sta_cmd.station_flags |= 215 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 216 if (mvm_sta->associated) 217 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); 218 219 if (sta->wme) { 220 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; 221 222 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 223 add_sta_cmd.uapsd_acs |= BIT(AC_BK); 224 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 225 add_sta_cmd.uapsd_acs |= BIT(AC_BE); 226 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 227 add_sta_cmd.uapsd_acs |= BIT(AC_VI); 228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 229 add_sta_cmd.uapsd_acs |= BIT(AC_VO); 230 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; 231 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; 232 } 233 234 status = ADD_STA_SUCCESS; 235 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 236 iwl_mvm_add_sta_cmd_size(mvm), 237 &add_sta_cmd, &status); 238 if (ret) 239 return ret; 240 241 switch (status & IWL_ADD_STA_STATUS_MASK) { 242 case ADD_STA_SUCCESS: 243 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); 244 break; 245 default: 246 ret = -EIO; 247 IWL_ERR(mvm, "ADD_STA failed\n"); 248 break; 249 } 250 251 return ret; 252 } 253 254 static void iwl_mvm_rx_agg_session_expired(unsigned long data) 255 { 256 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data; 257 struct iwl_mvm_baid_data *ba_data; 258 struct ieee80211_sta *sta; 259 struct iwl_mvm_sta *mvm_sta; 260 unsigned long timeout; 261 262 rcu_read_lock(); 263 264 ba_data = rcu_dereference(*rcu_ptr); 265 266 if (WARN_ON(!ba_data)) 267 goto unlock; 268 269 if (!ba_data->timeout) 270 goto unlock; 271 272 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); 273 if (time_is_after_jiffies(timeout)) { 274 mod_timer(&ba_data->session_timer, timeout); 275 goto unlock; 276 } 277 278 /* Timer expired */ 279 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); 280 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 281 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, 282 sta->addr, ba_data->tid); 283 unlock: 284 rcu_read_unlock(); 285 } 286 287 static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, 288 struct ieee80211_sta *sta) 289 { 290 unsigned long used_hw_queues; 291 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 292 unsigned int wdg_timeout = 293 iwl_mvm_get_wd_timeout(mvm, NULL, true, false); 294 u32 ac; 295 296 lockdep_assert_held(&mvm->mutex); 297 298 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); 299 300 /* Find available queues, and allocate them to the ACs */ 301 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 302 u8 queue = find_first_zero_bit(&used_hw_queues, 303 mvm->first_agg_queue); 304 305 if (queue >= mvm->first_agg_queue) { 306 IWL_ERR(mvm, "Failed to allocate STA queue\n"); 307 return -EBUSY; 308 } 309 310 __set_bit(queue, &used_hw_queues); 311 mvmsta->hw_queue[ac] = queue; 312 } 313 314 /* Found a place for all queues - enable them */ 315 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 316 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], 317 mvmsta->hw_queue[ac], 318 iwl_mvm_ac_to_tx_fifo[ac], 0, 319 wdg_timeout); 320 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); 321 } 322 323 return 0; 324 } 325 326 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, 327 struct ieee80211_sta *sta) 328 { 329 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 330 unsigned long sta_msk; 331 int i; 332 333 lockdep_assert_held(&mvm->mutex); 334 335 /* disable the TDLS STA-specific queues */ 336 sta_msk = mvmsta->tfd_queue_msk; 337 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) 338 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); 339 } 340 341 /* Disable aggregations for a bitmap of TIDs for a given station */ 342 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, 343 unsigned long disable_agg_tids, 344 bool remove_queue) 345 { 346 struct iwl_mvm_add_sta_cmd cmd = {}; 347 struct ieee80211_sta *sta; 348 struct iwl_mvm_sta *mvmsta; 349 u32 status; 350 u8 sta_id; 351 int ret; 352 353 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 354 return -EINVAL; 355 356 spin_lock_bh(&mvm->queue_info_lock); 357 sta_id = mvm->queue_info[queue].ra_sta_id; 358 spin_unlock_bh(&mvm->queue_info_lock); 359 360 rcu_read_lock(); 361 362 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 363 364 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 365 rcu_read_unlock(); 366 return -EINVAL; 367 } 368 369 mvmsta = iwl_mvm_sta_from_mac80211(sta); 370 371 mvmsta->tid_disable_agg |= disable_agg_tids; 372 373 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 374 cmd.sta_id = mvmsta->sta_id; 375 cmd.add_modify = STA_MODE_MODIFY; 376 cmd.modify_mask = STA_MODIFY_QUEUES; 377 if (disable_agg_tids) 378 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 379 if (remove_queue) 380 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; 381 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 382 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 383 384 rcu_read_unlock(); 385 386 /* Notify FW of queue removal from the STA queues */ 387 status = ADD_STA_SUCCESS; 388 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 389 iwl_mvm_add_sta_cmd_size(mvm), 390 &cmd, &status); 391 392 return ret; 393 } 394 395 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 396 { 397 struct ieee80211_sta *sta; 398 struct iwl_mvm_sta *mvmsta; 399 unsigned long tid_bitmap; 400 unsigned long agg_tids = 0; 401 s8 sta_id; 402 int tid; 403 404 lockdep_assert_held(&mvm->mutex); 405 406 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 407 return -EINVAL; 408 409 spin_lock_bh(&mvm->queue_info_lock); 410 sta_id = mvm->queue_info[queue].ra_sta_id; 411 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 412 spin_unlock_bh(&mvm->queue_info_lock); 413 414 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 415 lockdep_is_held(&mvm->mutex)); 416 417 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 418 return -EINVAL; 419 420 mvmsta = iwl_mvm_sta_from_mac80211(sta); 421 422 spin_lock_bh(&mvmsta->lock); 423 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 424 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 425 agg_tids |= BIT(tid); 426 } 427 spin_unlock_bh(&mvmsta->lock); 428 429 return agg_tids; 430 } 431 432 /* 433 * Remove a queue from a station's resources. 434 * Note that this only marks as free. It DOESN'T delete a BA agreement, and 435 * doesn't disable the queue 436 */ 437 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) 438 { 439 struct ieee80211_sta *sta; 440 struct iwl_mvm_sta *mvmsta; 441 unsigned long tid_bitmap; 442 unsigned long disable_agg_tids = 0; 443 u8 sta_id; 444 int tid; 445 446 lockdep_assert_held(&mvm->mutex); 447 448 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 449 return -EINVAL; 450 451 spin_lock_bh(&mvm->queue_info_lock); 452 sta_id = mvm->queue_info[queue].ra_sta_id; 453 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 454 spin_unlock_bh(&mvm->queue_info_lock); 455 456 rcu_read_lock(); 457 458 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 459 460 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 461 rcu_read_unlock(); 462 return 0; 463 } 464 465 mvmsta = iwl_mvm_sta_from_mac80211(sta); 466 467 spin_lock_bh(&mvmsta->lock); 468 /* Unmap MAC queues and TIDs from this queue */ 469 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 470 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 471 disable_agg_tids |= BIT(tid); 472 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 473 } 474 475 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ 476 spin_unlock_bh(&mvmsta->lock); 477 478 rcu_read_unlock(); 479 480 return disable_agg_tids; 481 } 482 483 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, 484 bool same_sta) 485 { 486 struct iwl_mvm_sta *mvmsta; 487 u8 txq_curr_ac, sta_id, tid; 488 unsigned long disable_agg_tids = 0; 489 int ret; 490 491 lockdep_assert_held(&mvm->mutex); 492 493 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 494 return -EINVAL; 495 496 spin_lock_bh(&mvm->queue_info_lock); 497 txq_curr_ac = mvm->queue_info[queue].mac80211_ac; 498 sta_id = mvm->queue_info[queue].ra_sta_id; 499 tid = mvm->queue_info[queue].txq_tid; 500 spin_unlock_bh(&mvm->queue_info_lock); 501 502 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); 503 if (WARN_ON(!mvmsta)) 504 return -EINVAL; 505 506 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); 507 /* Disable the queue */ 508 if (disable_agg_tids) 509 iwl_mvm_invalidate_sta_queue(mvm, queue, 510 disable_agg_tids, false); 511 512 ret = iwl_mvm_disable_txq(mvm, queue, 513 mvmsta->vif->hw_queue[txq_curr_ac], 514 tid, 0); 515 if (ret) { 516 /* Re-mark the inactive queue as inactive */ 517 spin_lock_bh(&mvm->queue_info_lock); 518 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; 519 spin_unlock_bh(&mvm->queue_info_lock); 520 IWL_ERR(mvm, 521 "Failed to free inactive queue %d (ret=%d)\n", 522 queue, ret); 523 524 return ret; 525 } 526 527 /* If TXQ is allocated to another STA, update removal in FW */ 528 if (!same_sta) 529 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); 530 531 return 0; 532 } 533 534 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, 535 unsigned long tfd_queue_mask, u8 ac) 536 { 537 int queue = 0; 538 u8 ac_to_queue[IEEE80211_NUM_ACS]; 539 int i; 540 541 lockdep_assert_held(&mvm->queue_info_lock); 542 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 543 return -EINVAL; 544 545 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); 546 547 /* See what ACs the existing queues for this STA have */ 548 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { 549 /* Only DATA queues can be shared */ 550 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && 551 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 552 continue; 553 554 /* Don't try and take queues being reconfigured */ 555 if (mvm->queue_info[queue].status == 556 IWL_MVM_QUEUE_RECONFIGURING) 557 continue; 558 559 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 560 } 561 562 /* 563 * The queue to share is chosen only from DATA queues as follows (in 564 * descending priority): 565 * 1. An AC_BE queue 566 * 2. Same AC queue 567 * 3. Highest AC queue that is lower than new AC 568 * 4. Any existing AC (there always is at least 1 DATA queue) 569 */ 570 571 /* Priority 1: An AC_BE queue */ 572 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) 573 queue = ac_to_queue[IEEE80211_AC_BE]; 574 /* Priority 2: Same AC queue */ 575 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) 576 queue = ac_to_queue[ac]; 577 /* Priority 3a: If new AC is VO and VI exists - use VI */ 578 else if (ac == IEEE80211_AC_VO && 579 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 580 queue = ac_to_queue[IEEE80211_AC_VI]; 581 /* Priority 3b: No BE so only AC less than the new one is BK */ 582 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) 583 queue = ac_to_queue[IEEE80211_AC_BK]; 584 /* Priority 4a: No BE nor BK - use VI if exists */ 585 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 586 queue = ac_to_queue[IEEE80211_AC_VI]; 587 /* Priority 4b: No BE, BK nor VI - use VO if exists */ 588 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) 589 queue = ac_to_queue[IEEE80211_AC_VO]; 590 591 /* Make sure queue found (or not) is legal */ 592 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && 593 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && 594 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { 595 IWL_ERR(mvm, "No DATA queues available to share\n"); 596 return -ENOSPC; 597 } 598 599 /* Make sure the queue isn't in the middle of being reconfigured */ 600 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { 601 IWL_ERR(mvm, 602 "TXQ %d is in the middle of re-config - try again\n", 603 queue); 604 return -EBUSY; 605 } 606 607 return queue; 608 } 609 610 /* 611 * If a given queue has a higher AC than the TID stream that is being compared 612 * to, the queue needs to be redirected to the lower AC. This function does that 613 * in such a case, otherwise - if no redirection required - it does nothing, 614 * unless the %force param is true. 615 */ 616 int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 617 int ac, int ssn, unsigned int wdg_timeout, 618 bool force) 619 { 620 struct iwl_scd_txq_cfg_cmd cmd = { 621 .scd_queue = queue, 622 .action = SCD_CFG_DISABLE_QUEUE, 623 }; 624 bool shared_queue; 625 unsigned long mq; 626 int ret; 627 628 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 629 return -EINVAL; 630 631 /* 632 * If the AC is lower than current one - FIFO needs to be redirected to 633 * the lowest one of the streams in the queue. Check if this is needed 634 * here. 635 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with 636 * value 3 and VO with value 0, so to check if ac X is lower than ac Y 637 * we need to check if the numerical value of X is LARGER than of Y. 638 */ 639 spin_lock_bh(&mvm->queue_info_lock); 640 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { 641 spin_unlock_bh(&mvm->queue_info_lock); 642 643 IWL_DEBUG_TX_QUEUES(mvm, 644 "No redirection needed on TXQ #%d\n", 645 queue); 646 return 0; 647 } 648 649 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 650 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 651 cmd.tid = mvm->queue_info[queue].txq_tid; 652 mq = mvm->hw_queue_to_mac80211[queue]; 653 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); 654 spin_unlock_bh(&mvm->queue_info_lock); 655 656 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", 657 queue, iwl_mvm_ac_to_tx_fifo[ac]); 658 659 /* Stop MAC queues and wait for this queue to empty */ 660 iwl_mvm_stop_mac_queues(mvm, mq); 661 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); 662 if (ret) { 663 IWL_ERR(mvm, "Error draining queue %d before reconfig\n", 664 queue); 665 ret = -EIO; 666 goto out; 667 } 668 669 /* Before redirecting the queue we need to de-activate it */ 670 iwl_trans_txq_disable(mvm->trans, queue, false); 671 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 672 if (ret) 673 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, 674 ret); 675 676 /* Make sure the SCD wrptr is correctly set before reconfiguring */ 677 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); 678 679 /* Update the TID "owner" of the queue */ 680 spin_lock_bh(&mvm->queue_info_lock); 681 mvm->queue_info[queue].txq_tid = tid; 682 spin_unlock_bh(&mvm->queue_info_lock); 683 684 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ 685 686 /* Redirect to lower AC */ 687 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], 688 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, 689 ssn); 690 691 /* Update AC marking of the queue */ 692 spin_lock_bh(&mvm->queue_info_lock); 693 mvm->queue_info[queue].mac80211_ac = ac; 694 spin_unlock_bh(&mvm->queue_info_lock); 695 696 /* 697 * Mark queue as shared in transport if shared 698 * Note this has to be done after queue enablement because enablement 699 * can also set this value, and there is no indication there to shared 700 * queues 701 */ 702 if (shared_queue) 703 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 704 705 out: 706 /* Continue using the MAC queues */ 707 iwl_mvm_start_mac_queues(mvm, mq); 708 709 return ret; 710 } 711 712 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, 713 struct ieee80211_sta *sta, u8 ac, 714 int tid) 715 { 716 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 717 unsigned int wdg_timeout = 718 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 719 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 720 int queue = -1; 721 722 lockdep_assert_held(&mvm->mutex); 723 724 IWL_DEBUG_TX_QUEUES(mvm, 725 "Allocating queue for sta %d on tid %d\n", 726 mvmsta->sta_id, tid); 727 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid, 728 wdg_timeout); 729 if (queue < 0) 730 return queue; 731 732 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); 733 734 spin_lock_bh(&mvmsta->lock); 735 mvmsta->tid_data[tid].txq_id = queue; 736 mvmsta->tid_data[tid].is_tid_active = true; 737 mvmsta->tfd_queue_msk |= BIT(queue); 738 spin_unlock_bh(&mvmsta->lock); 739 740 return 0; 741 } 742 743 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 744 struct ieee80211_sta *sta, u8 ac, int tid, 745 struct ieee80211_hdr *hdr) 746 { 747 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 748 struct iwl_trans_txq_scd_cfg cfg = { 749 .fifo = iwl_mvm_ac_to_tx_fifo[ac], 750 .sta_id = mvmsta->sta_id, 751 .tid = tid, 752 .frame_limit = IWL_FRAME_LIMIT, 753 }; 754 unsigned int wdg_timeout = 755 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 756 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 757 int queue = -1; 758 bool using_inactive_queue = false, same_sta = false; 759 unsigned long disable_agg_tids = 0; 760 enum iwl_mvm_agg_state queue_state; 761 bool shared_queue = false; 762 int ssn; 763 unsigned long tfd_queue_mask; 764 int ret; 765 766 lockdep_assert_held(&mvm->mutex); 767 768 if (iwl_mvm_has_new_tx_api(mvm)) 769 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 770 771 spin_lock_bh(&mvmsta->lock); 772 tfd_queue_mask = mvmsta->tfd_queue_msk; 773 spin_unlock_bh(&mvmsta->lock); 774 775 spin_lock_bh(&mvm->queue_info_lock); 776 777 /* 778 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one 779 * exists 780 */ 781 if (!ieee80211_is_data_qos(hdr->frame_control) || 782 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 783 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 784 IWL_MVM_DQA_MIN_MGMT_QUEUE, 785 IWL_MVM_DQA_MAX_MGMT_QUEUE); 786 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 787 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 788 queue); 789 790 /* If no such queue is found, we'll use a DATA queue instead */ 791 } 792 793 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 794 (mvm->queue_info[mvmsta->reserved_queue].status == 795 IWL_MVM_QUEUE_RESERVED || 796 mvm->queue_info[mvmsta->reserved_queue].status == 797 IWL_MVM_QUEUE_INACTIVE)) { 798 queue = mvmsta->reserved_queue; 799 mvm->queue_info[queue].reserved = true; 800 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 801 } 802 803 if (queue < 0) 804 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 805 IWL_MVM_DQA_MIN_DATA_QUEUE, 806 IWL_MVM_DQA_MAX_DATA_QUEUE); 807 808 /* 809 * Check if this queue is already allocated but inactive. 810 * In such a case, we'll need to first free this queue before enabling 811 * it again, so we'll mark it as reserved to make sure no new traffic 812 * arrives on it 813 */ 814 if (queue > 0 && 815 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { 816 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 817 using_inactive_queue = true; 818 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; 819 IWL_DEBUG_TX_QUEUES(mvm, 820 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", 821 queue, mvmsta->sta_id, tid); 822 } 823 824 /* No free queue - we'll have to share */ 825 if (queue <= 0) { 826 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 827 if (queue > 0) { 828 shared_queue = true; 829 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 830 } 831 } 832 833 /* 834 * Mark TXQ as ready, even though it hasn't been fully configured yet, 835 * to make sure no one else takes it. 836 * This will allow avoiding re-acquiring the lock at the end of the 837 * configuration. On error we'll mark it back as free. 838 */ 839 if ((queue > 0) && !shared_queue) 840 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 841 842 spin_unlock_bh(&mvm->queue_info_lock); 843 844 /* This shouldn't happen - out of queues */ 845 if (WARN_ON(queue <= 0)) { 846 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 847 tid, cfg.sta_id); 848 return queue; 849 } 850 851 /* 852 * Actual en/disablement of aggregations is through the ADD_STA HCMD, 853 * but for configuring the SCD to send A-MPDUs we need to mark the queue 854 * as aggregatable. 855 * Mark all DATA queues as allowing to be aggregated at some point 856 */ 857 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 858 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 859 860 /* 861 * If this queue was previously inactive (idle) - we need to free it 862 * first 863 */ 864 if (using_inactive_queue) { 865 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); 866 if (ret) 867 return ret; 868 } 869 870 IWL_DEBUG_TX_QUEUES(mvm, 871 "Allocating %squeue #%d to sta %d on tid %d\n", 872 shared_queue ? "shared " : "", queue, 873 mvmsta->sta_id, tid); 874 875 if (shared_queue) { 876 /* Disable any open aggs on this queue */ 877 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 878 879 if (disable_agg_tids) { 880 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 881 queue); 882 iwl_mvm_invalidate_sta_queue(mvm, queue, 883 disable_agg_tids, false); 884 } 885 } 886 887 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 888 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, 889 wdg_timeout); 890 891 /* 892 * Mark queue as shared in transport if shared 893 * Note this has to be done after queue enablement because enablement 894 * can also set this value, and there is no indication there to shared 895 * queues 896 */ 897 if (shared_queue) 898 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 899 900 spin_lock_bh(&mvmsta->lock); 901 mvmsta->tid_data[tid].txq_id = queue; 902 mvmsta->tid_data[tid].is_tid_active = true; 903 mvmsta->tfd_queue_msk |= BIT(queue); 904 queue_state = mvmsta->tid_data[tid].state; 905 906 if (mvmsta->reserved_queue == queue) 907 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 908 spin_unlock_bh(&mvmsta->lock); 909 910 if (!shared_queue) { 911 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 912 if (ret) 913 goto out_err; 914 915 /* If we need to re-enable aggregations... */ 916 if (queue_state == IWL_AGG_ON) { 917 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 918 if (ret) 919 goto out_err; 920 } 921 } else { 922 /* Redirect queue, if needed */ 923 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, 924 wdg_timeout, false); 925 if (ret) 926 goto out_err; 927 } 928 929 return 0; 930 931 out_err: 932 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); 933 934 return ret; 935 } 936 937 static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) 938 { 939 struct iwl_scd_txq_cfg_cmd cmd = { 940 .scd_queue = queue, 941 .action = SCD_CFG_UPDATE_QUEUE_TID, 942 }; 943 int tid; 944 unsigned long tid_bitmap; 945 int ret; 946 947 lockdep_assert_held(&mvm->mutex); 948 949 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 950 return; 951 952 spin_lock_bh(&mvm->queue_info_lock); 953 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 954 spin_unlock_bh(&mvm->queue_info_lock); 955 956 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) 957 return; 958 959 /* Find any TID for queue */ 960 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 961 cmd.tid = tid; 962 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 963 964 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 965 if (ret) { 966 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", 967 queue, ret); 968 return; 969 } 970 971 spin_lock_bh(&mvm->queue_info_lock); 972 mvm->queue_info[queue].txq_tid = tid; 973 spin_unlock_bh(&mvm->queue_info_lock); 974 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", 975 queue, tid); 976 } 977 978 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) 979 { 980 struct ieee80211_sta *sta; 981 struct iwl_mvm_sta *mvmsta; 982 s8 sta_id; 983 int tid = -1; 984 unsigned long tid_bitmap; 985 unsigned int wdg_timeout; 986 int ssn; 987 int ret = true; 988 989 /* queue sharing is disabled on new TX path */ 990 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 991 return; 992 993 lockdep_assert_held(&mvm->mutex); 994 995 spin_lock_bh(&mvm->queue_info_lock); 996 sta_id = mvm->queue_info[queue].ra_sta_id; 997 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 998 spin_unlock_bh(&mvm->queue_info_lock); 999 1000 /* Find TID for queue, and make sure it is the only one on the queue */ 1001 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 1002 if (tid_bitmap != BIT(tid)) { 1003 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", 1004 queue, tid_bitmap); 1005 return; 1006 } 1007 1008 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, 1009 tid); 1010 1011 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1012 lockdep_is_held(&mvm->mutex)); 1013 1014 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 1015 return; 1016 1017 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1018 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1019 1020 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 1021 1022 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, 1023 tid_to_mac80211_ac[tid], ssn, 1024 wdg_timeout, true); 1025 if (ret) { 1026 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); 1027 return; 1028 } 1029 1030 /* If aggs should be turned back on - do it */ 1031 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { 1032 struct iwl_mvm_add_sta_cmd cmd = {0}; 1033 1034 mvmsta->tid_disable_agg &= ~BIT(tid); 1035 1036 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1037 cmd.sta_id = mvmsta->sta_id; 1038 cmd.add_modify = STA_MODE_MODIFY; 1039 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; 1040 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 1041 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 1042 1043 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 1044 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 1045 if (!ret) { 1046 IWL_DEBUG_TX_QUEUES(mvm, 1047 "TXQ #%d is now aggregated again\n", 1048 queue); 1049 1050 /* Mark queue intenally as aggregating again */ 1051 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); 1052 } 1053 } 1054 1055 spin_lock_bh(&mvm->queue_info_lock); 1056 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1057 spin_unlock_bh(&mvm->queue_info_lock); 1058 } 1059 1060 static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 1061 { 1062 if (tid == IWL_MAX_TID_COUNT) 1063 return IEEE80211_AC_VO; /* MGMT */ 1064 1065 return tid_to_mac80211_ac[tid]; 1066 } 1067 1068 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, 1069 struct ieee80211_sta *sta, int tid) 1070 { 1071 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1072 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 1073 struct sk_buff *skb; 1074 struct ieee80211_hdr *hdr; 1075 struct sk_buff_head deferred_tx; 1076 u8 mac_queue; 1077 bool no_queue = false; /* Marks if there is a problem with the queue */ 1078 u8 ac; 1079 1080 lockdep_assert_held(&mvm->mutex); 1081 1082 skb = skb_peek(&tid_data->deferred_tx_frames); 1083 if (!skb) 1084 return; 1085 hdr = (void *)skb->data; 1086 1087 ac = iwl_mvm_tid_to_ac_queue(tid); 1088 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; 1089 1090 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE && 1091 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { 1092 IWL_ERR(mvm, 1093 "Can't alloc TXQ for sta %d tid %d - dropping frame\n", 1094 mvmsta->sta_id, tid); 1095 1096 /* 1097 * Mark queue as problematic so later the deferred traffic is 1098 * freed, as we can do nothing with it 1099 */ 1100 no_queue = true; 1101 } 1102 1103 __skb_queue_head_init(&deferred_tx); 1104 1105 /* Disable bottom-halves when entering TX path */ 1106 local_bh_disable(); 1107 spin_lock(&mvmsta->lock); 1108 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); 1109 mvmsta->deferred_traffic_tid_map &= ~BIT(tid); 1110 spin_unlock(&mvmsta->lock); 1111 1112 while ((skb = __skb_dequeue(&deferred_tx))) 1113 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) 1114 ieee80211_free_txskb(mvm->hw, skb); 1115 local_bh_enable(); 1116 1117 /* Wake queue */ 1118 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); 1119 } 1120 1121 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) 1122 { 1123 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, 1124 add_stream_wk); 1125 struct ieee80211_sta *sta; 1126 struct iwl_mvm_sta *mvmsta; 1127 unsigned long deferred_tid_traffic; 1128 int queue, sta_id, tid; 1129 1130 /* Check inactivity of queues */ 1131 iwl_mvm_inactivity_check(mvm); 1132 1133 mutex_lock(&mvm->mutex); 1134 1135 /* No queue reconfiguration in TVQM mode */ 1136 if (iwl_mvm_has_new_tx_api(mvm)) 1137 goto alloc_queues; 1138 1139 /* Reconfigure queues requiring reconfiguation */ 1140 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) { 1141 bool reconfig; 1142 bool change_owner; 1143 1144 spin_lock_bh(&mvm->queue_info_lock); 1145 reconfig = (mvm->queue_info[queue].status == 1146 IWL_MVM_QUEUE_RECONFIGURING); 1147 1148 /* 1149 * We need to take into account a situation in which a TXQ was 1150 * allocated to TID x, and then turned shared by adding TIDs y 1151 * and z. If TID x becomes inactive and is removed from the TXQ, 1152 * ownership must be given to one of the remaining TIDs. 1153 * This is mainly because if TID x continues - a new queue can't 1154 * be allocated for it as long as it is an owner of another TXQ. 1155 */ 1156 change_owner = !(mvm->queue_info[queue].tid_bitmap & 1157 BIT(mvm->queue_info[queue].txq_tid)) && 1158 (mvm->queue_info[queue].status == 1159 IWL_MVM_QUEUE_SHARED); 1160 spin_unlock_bh(&mvm->queue_info_lock); 1161 1162 if (reconfig) 1163 iwl_mvm_unshare_queue(mvm, queue); 1164 else if (change_owner) 1165 iwl_mvm_change_queue_owner(mvm, queue); 1166 } 1167 1168 alloc_queues: 1169 /* Go over all stations with deferred traffic */ 1170 for_each_set_bit(sta_id, mvm->sta_deferred_frames, 1171 IWL_MVM_STATION_COUNT) { 1172 clear_bit(sta_id, mvm->sta_deferred_frames); 1173 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1174 lockdep_is_held(&mvm->mutex)); 1175 if (IS_ERR_OR_NULL(sta)) 1176 continue; 1177 1178 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1179 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; 1180 1181 for_each_set_bit(tid, &deferred_tid_traffic, 1182 IWL_MAX_TID_COUNT + 1) 1183 iwl_mvm_tx_deferred_stream(mvm, sta, tid); 1184 } 1185 1186 mutex_unlock(&mvm->mutex); 1187 } 1188 1189 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, 1190 struct ieee80211_sta *sta, 1191 enum nl80211_iftype vif_type) 1192 { 1193 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1194 int queue; 1195 bool using_inactive_queue = false, same_sta = false; 1196 1197 /* queue reserving is disabled on new TX path */ 1198 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1199 return 0; 1200 1201 /* 1202 * Check for inactive queues, so we don't reach a situation where we 1203 * can't add a STA due to a shortage in queues that doesn't really exist 1204 */ 1205 iwl_mvm_inactivity_check(mvm); 1206 1207 spin_lock_bh(&mvm->queue_info_lock); 1208 1209 /* Make sure we have free resources for this STA */ 1210 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 1211 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && 1212 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 1213 IWL_MVM_QUEUE_FREE)) 1214 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 1215 else 1216 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1217 IWL_MVM_DQA_MIN_DATA_QUEUE, 1218 IWL_MVM_DQA_MAX_DATA_QUEUE); 1219 if (queue < 0) { 1220 spin_unlock_bh(&mvm->queue_info_lock); 1221 IWL_ERR(mvm, "No available queues for new station\n"); 1222 return -ENOSPC; 1223 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { 1224 /* 1225 * If this queue is already allocated but inactive we'll need to 1226 * first free this queue before enabling it again, we'll mark 1227 * it as reserved to make sure no new traffic arrives on it 1228 */ 1229 using_inactive_queue = true; 1230 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; 1231 } 1232 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 1233 1234 spin_unlock_bh(&mvm->queue_info_lock); 1235 1236 mvmsta->reserved_queue = queue; 1237 1238 if (using_inactive_queue) 1239 iwl_mvm_free_inactive_queue(mvm, queue, same_sta); 1240 1241 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 1242 queue, mvmsta->sta_id); 1243 1244 return 0; 1245 } 1246 1247 /* 1248 * In DQA mode, after a HW restart the queues should be allocated as before, in 1249 * order to avoid race conditions when there are shared queues. This function 1250 * does the re-mapping and queue allocation. 1251 * 1252 * Note that re-enabling aggregations isn't done in this function. 1253 */ 1254 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, 1255 struct iwl_mvm_sta *mvm_sta) 1256 { 1257 unsigned int wdg_timeout = 1258 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); 1259 int i; 1260 struct iwl_trans_txq_scd_cfg cfg = { 1261 .sta_id = mvm_sta->sta_id, 1262 .frame_limit = IWL_FRAME_LIMIT, 1263 }; 1264 1265 /* Make sure reserved queue is still marked as such (if allocated) */ 1266 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) 1267 mvm->queue_info[mvm_sta->reserved_queue].status = 1268 IWL_MVM_QUEUE_RESERVED; 1269 1270 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1271 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1272 int txq_id = tid_data->txq_id; 1273 int ac; 1274 u8 mac_queue; 1275 1276 if (txq_id == IWL_MVM_INVALID_QUEUE) 1277 continue; 1278 1279 skb_queue_head_init(&tid_data->deferred_tx_frames); 1280 1281 ac = tid_to_mac80211_ac[i]; 1282 mac_queue = mvm_sta->vif->hw_queue[ac]; 1283 1284 if (iwl_mvm_has_new_tx_api(mvm)) { 1285 IWL_DEBUG_TX_QUEUES(mvm, 1286 "Re-mapping sta %d tid %d\n", 1287 mvm_sta->sta_id, i); 1288 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, 1289 mvm_sta->sta_id, 1290 i, wdg_timeout); 1291 tid_data->txq_id = txq_id; 1292 } else { 1293 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 1294 1295 cfg.tid = i; 1296 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; 1297 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1298 txq_id == 1299 IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1300 1301 IWL_DEBUG_TX_QUEUES(mvm, 1302 "Re-mapping sta %d tid %d to queue %d\n", 1303 mvm_sta->sta_id, i, txq_id); 1304 1305 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, 1306 wdg_timeout); 1307 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; 1308 } 1309 } 1310 1311 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); 1312 } 1313 1314 int iwl_mvm_add_sta(struct iwl_mvm *mvm, 1315 struct ieee80211_vif *vif, 1316 struct ieee80211_sta *sta) 1317 { 1318 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1319 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1320 struct iwl_mvm_rxq_dup_data *dup_data; 1321 int i, ret, sta_id; 1322 1323 lockdep_assert_held(&mvm->mutex); 1324 1325 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1326 sta_id = iwl_mvm_find_free_sta_id(mvm, 1327 ieee80211_vif_type_p2p(vif)); 1328 else 1329 sta_id = mvm_sta->sta_id; 1330 1331 if (sta_id == IWL_MVM_INVALID_STA) 1332 return -ENOSPC; 1333 1334 spin_lock_init(&mvm_sta->lock); 1335 1336 /* In DQA mode, if this is a HW restart, re-alloc existing queues */ 1337 if (iwl_mvm_is_dqa_supported(mvm) && 1338 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1339 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); 1340 goto update_fw; 1341 } 1342 1343 mvm_sta->sta_id = sta_id; 1344 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, 1345 mvmvif->color); 1346 mvm_sta->vif = vif; 1347 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 1348 mvm_sta->tx_protection = 0; 1349 mvm_sta->tt_tx_protection = false; 1350 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; 1351 1352 /* HW restart, don't assume the memory has been zeroed */ 1353 atomic_set(&mvm->pending_frames[sta_id], 0); 1354 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ 1355 mvm_sta->tfd_queue_msk = 0; 1356 1357 /* 1358 * Allocate new queues for a TDLS station, unless we're in DQA mode, 1359 * and then they'll be allocated dynamically 1360 */ 1361 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { 1362 ret = iwl_mvm_tdls_sta_init(mvm, sta); 1363 if (ret) 1364 return ret; 1365 } else if (!iwl_mvm_is_dqa_supported(mvm)) { 1366 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1367 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) 1368 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); 1369 } 1370 1371 /* for HW restart - reset everything but the sequence number */ 1372 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1373 u16 seq = mvm_sta->tid_data[i].seq_number; 1374 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); 1375 mvm_sta->tid_data[i].seq_number = seq; 1376 1377 if (!iwl_mvm_is_dqa_supported(mvm)) 1378 continue; 1379 1380 /* 1381 * Mark all queues for this STA as unallocated and defer TX 1382 * frames until the queue is allocated 1383 */ 1384 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1385 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); 1386 } 1387 mvm_sta->deferred_traffic_tid_map = 0; 1388 mvm_sta->agg_tids = 0; 1389 1390 if (iwl_mvm_has_new_rx_api(mvm) && 1391 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1392 dup_data = kcalloc(mvm->trans->num_rx_queues, 1393 sizeof(*dup_data), 1394 GFP_KERNEL); 1395 if (!dup_data) 1396 return -ENOMEM; 1397 mvm_sta->dup_data = dup_data; 1398 } 1399 1400 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { 1401 ret = iwl_mvm_reserve_sta_stream(mvm, sta, 1402 ieee80211_vif_type_p2p(vif)); 1403 if (ret) 1404 goto err; 1405 } 1406 1407 update_fw: 1408 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); 1409 if (ret) 1410 goto err; 1411 1412 if (vif->type == NL80211_IFTYPE_STATION) { 1413 if (!sta->tdls) { 1414 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); 1415 mvmvif->ap_sta_id = sta_id; 1416 } else { 1417 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); 1418 } 1419 } 1420 1421 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); 1422 1423 return 0; 1424 1425 err: 1426 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) 1427 iwl_mvm_tdls_sta_deinit(mvm, sta); 1428 return ret; 1429 } 1430 1431 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 1432 bool drain) 1433 { 1434 struct iwl_mvm_add_sta_cmd cmd = {}; 1435 int ret; 1436 u32 status; 1437 1438 lockdep_assert_held(&mvm->mutex); 1439 1440 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1441 cmd.sta_id = mvmsta->sta_id; 1442 cmd.add_modify = STA_MODE_MODIFY; 1443 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; 1444 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 1445 1446 status = ADD_STA_SUCCESS; 1447 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1448 iwl_mvm_add_sta_cmd_size(mvm), 1449 &cmd, &status); 1450 if (ret) 1451 return ret; 1452 1453 switch (status & IWL_ADD_STA_STATUS_MASK) { 1454 case ADD_STA_SUCCESS: 1455 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", 1456 mvmsta->sta_id); 1457 break; 1458 default: 1459 ret = -EIO; 1460 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", 1461 mvmsta->sta_id); 1462 break; 1463 } 1464 1465 return ret; 1466 } 1467 1468 /* 1469 * Remove a station from the FW table. Before sending the command to remove 1470 * the station validate that the station is indeed known to the driver (sanity 1471 * only). 1472 */ 1473 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) 1474 { 1475 struct ieee80211_sta *sta; 1476 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { 1477 .sta_id = sta_id, 1478 }; 1479 int ret; 1480 1481 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1482 lockdep_is_held(&mvm->mutex)); 1483 1484 /* Note: internal stations are marked as error values */ 1485 if (!sta) { 1486 IWL_ERR(mvm, "Invalid station id\n"); 1487 return -EINVAL; 1488 } 1489 1490 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, 1491 sizeof(rm_sta_cmd), &rm_sta_cmd); 1492 if (ret) { 1493 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 1494 return ret; 1495 } 1496 1497 return 0; 1498 } 1499 1500 void iwl_mvm_sta_drained_wk(struct work_struct *wk) 1501 { 1502 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); 1503 u8 sta_id; 1504 1505 /* 1506 * The mutex is needed because of the SYNC cmd, but not only: if the 1507 * work would run concurrently with iwl_mvm_rm_sta, it would run before 1508 * iwl_mvm_rm_sta sets the station as busy, and exit. Then 1509 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean 1510 * that later. 1511 */ 1512 mutex_lock(&mvm->mutex); 1513 1514 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { 1515 int ret; 1516 struct ieee80211_sta *sta = 1517 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1518 lockdep_is_held(&mvm->mutex)); 1519 1520 /* 1521 * This station is in use or RCU-removed; the latter happens in 1522 * managed mode, where mac80211 removes the station before we 1523 * can remove it from firmware (we can only do that after the 1524 * MAC is marked unassociated), and possibly while the deauth 1525 * frame to disconnect from the AP is still queued. Then, the 1526 * station pointer is -ENOENT when the last skb is reclaimed. 1527 */ 1528 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) 1529 continue; 1530 1531 if (PTR_ERR(sta) == -EINVAL) { 1532 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", 1533 sta_id); 1534 continue; 1535 } 1536 1537 if (!sta) { 1538 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", 1539 sta_id); 1540 continue; 1541 } 1542 1543 WARN_ON(PTR_ERR(sta) != -EBUSY); 1544 /* This station was removed and we waited until it got drained, 1545 * we can now proceed and remove it. 1546 */ 1547 ret = iwl_mvm_rm_sta_common(mvm, sta_id); 1548 if (ret) { 1549 IWL_ERR(mvm, 1550 "Couldn't remove sta %d after it was drained\n", 1551 sta_id); 1552 continue; 1553 } 1554 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 1555 clear_bit(sta_id, mvm->sta_drained); 1556 1557 if (mvm->tfd_drained[sta_id]) { 1558 unsigned long i, msk = mvm->tfd_drained[sta_id]; 1559 1560 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) 1561 iwl_mvm_disable_txq(mvm, i, i, 1562 IWL_MAX_TID_COUNT, 0); 1563 1564 mvm->tfd_drained[sta_id] = 0; 1565 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", 1566 sta_id, msk); 1567 } 1568 } 1569 1570 mutex_unlock(&mvm->mutex); 1571 } 1572 1573 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, 1574 struct ieee80211_vif *vif, 1575 struct iwl_mvm_sta *mvm_sta) 1576 { 1577 int ac; 1578 int i; 1579 1580 lockdep_assert_held(&mvm->mutex); 1581 1582 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1583 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) 1584 continue; 1585 1586 ac = iwl_mvm_tid_to_ac_queue(i); 1587 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, 1588 vif->hw_queue[ac], i, 0); 1589 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1590 } 1591 } 1592 1593 int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 1594 struct ieee80211_vif *vif, 1595 struct ieee80211_sta *sta) 1596 { 1597 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1598 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1599 u8 sta_id = mvm_sta->sta_id; 1600 int ret; 1601 1602 lockdep_assert_held(&mvm->mutex); 1603 1604 if (iwl_mvm_has_new_rx_api(mvm)) 1605 kfree(mvm_sta->dup_data); 1606 1607 if ((vif->type == NL80211_IFTYPE_STATION && 1608 mvmvif->ap_sta_id == sta_id) || 1609 iwl_mvm_is_dqa_supported(mvm)){ 1610 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 1611 if (ret) 1612 return ret; 1613 /* flush its queues here since we are freeing mvm_sta */ 1614 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); 1615 if (ret) 1616 return ret; 1617 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 1618 mvm_sta->tfd_queue_msk); 1619 if (ret) 1620 return ret; 1621 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); 1622 1623 /* If DQA is supported - the queues can be disabled now */ 1624 if (iwl_mvm_is_dqa_supported(mvm)) { 1625 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); 1626 /* 1627 * If pending_frames is set at this point - it must be 1628 * driver internal logic error, since queues are empty 1629 * and removed successuly. 1630 * warn on it but set it to 0 anyway to avoid station 1631 * not being removed later in the function 1632 */ 1633 WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0)); 1634 } 1635 1636 /* If there is a TXQ still marked as reserved - free it */ 1637 if (iwl_mvm_is_dqa_supported(mvm) && 1638 mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { 1639 u8 reserved_txq = mvm_sta->reserved_queue; 1640 enum iwl_mvm_queue_status *status; 1641 1642 /* 1643 * If no traffic has gone through the reserved TXQ - it 1644 * is still marked as IWL_MVM_QUEUE_RESERVED, and 1645 * should be manually marked as free again 1646 */ 1647 spin_lock_bh(&mvm->queue_info_lock); 1648 status = &mvm->queue_info[reserved_txq].status; 1649 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && 1650 (*status != IWL_MVM_QUEUE_FREE), 1651 "sta_id %d reserved txq %d status %d", 1652 sta_id, reserved_txq, *status)) { 1653 spin_unlock_bh(&mvm->queue_info_lock); 1654 return -EINVAL; 1655 } 1656 1657 *status = IWL_MVM_QUEUE_FREE; 1658 spin_unlock_bh(&mvm->queue_info_lock); 1659 } 1660 1661 if (vif->type == NL80211_IFTYPE_STATION && 1662 mvmvif->ap_sta_id == sta_id) { 1663 /* if associated - we can't remove the AP STA now */ 1664 if (vif->bss_conf.assoc) 1665 return ret; 1666 1667 /* unassoc - go ahead - remove the AP STA now */ 1668 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1669 1670 /* clear d0i3_ap_sta_id if no longer relevant */ 1671 if (mvm->d0i3_ap_sta_id == sta_id) 1672 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; 1673 } 1674 } 1675 1676 /* 1677 * This shouldn't happen - the TDLS channel switch should be canceled 1678 * before the STA is removed. 1679 */ 1680 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { 1681 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 1682 cancel_delayed_work(&mvm->tdls_cs.dwork); 1683 } 1684 1685 /* 1686 * Make sure that the tx response code sees the station as -EBUSY and 1687 * calls the drain worker. 1688 */ 1689 spin_lock_bh(&mvm_sta->lock); 1690 1691 /* 1692 * There are frames pending on the AC queues for this station. 1693 * We need to wait until all the frames are drained... 1694 */ 1695 if (atomic_read(&mvm->pending_frames[sta_id])) { 1696 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], 1697 ERR_PTR(-EBUSY)); 1698 spin_unlock_bh(&mvm_sta->lock); 1699 1700 /* disable TDLS sta queues on drain complete */ 1701 if (sta->tdls) { 1702 mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk; 1703 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id); 1704 } 1705 1706 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 1707 } else { 1708 spin_unlock_bh(&mvm_sta->lock); 1709 1710 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) 1711 iwl_mvm_tdls_sta_deinit(mvm, sta); 1712 1713 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); 1714 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); 1715 } 1716 1717 return ret; 1718 } 1719 1720 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, 1721 struct ieee80211_vif *vif, 1722 u8 sta_id) 1723 { 1724 int ret = iwl_mvm_rm_sta_common(mvm, sta_id); 1725 1726 lockdep_assert_held(&mvm->mutex); 1727 1728 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 1729 return ret; 1730 } 1731 1732 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, 1733 struct iwl_mvm_int_sta *sta, 1734 u32 qmask, enum nl80211_iftype iftype, 1735 enum iwl_sta_type type) 1736 { 1737 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1738 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); 1739 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) 1740 return -ENOSPC; 1741 } 1742 1743 sta->tfd_queue_msk = qmask; 1744 sta->type = type; 1745 1746 /* put a non-NULL value so iterating over the stations won't stop */ 1747 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); 1748 return 0; 1749 } 1750 1751 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) 1752 { 1753 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 1754 memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 1755 sta->sta_id = IWL_MVM_INVALID_STA; 1756 } 1757 1758 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, 1759 struct iwl_mvm_int_sta *sta, 1760 const u8 *addr, 1761 u16 mac_id, u16 color) 1762 { 1763 struct iwl_mvm_add_sta_cmd cmd; 1764 int ret; 1765 u32 status; 1766 1767 lockdep_assert_held(&mvm->mutex); 1768 1769 memset(&cmd, 0, sizeof(cmd)); 1770 cmd.sta_id = sta->sta_id; 1771 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 1772 color)); 1773 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 1774 cmd.station_type = sta->type; 1775 1776 if (!iwl_mvm_has_new_tx_api(mvm)) 1777 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); 1778 cmd.tid_disable_tx = cpu_to_le16(0xffff); 1779 1780 if (addr) 1781 memcpy(cmd.addr, addr, ETH_ALEN); 1782 1783 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1784 iwl_mvm_add_sta_cmd_size(mvm), 1785 &cmd, &status); 1786 if (ret) 1787 return ret; 1788 1789 switch (status & IWL_ADD_STA_STATUS_MASK) { 1790 case ADD_STA_SUCCESS: 1791 IWL_DEBUG_INFO(mvm, "Internal station added.\n"); 1792 return 0; 1793 default: 1794 ret = -EIO; 1795 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", 1796 status); 1797 break; 1798 } 1799 return ret; 1800 } 1801 1802 static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) 1803 { 1804 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? 1805 mvm->cfg->base_params->wd_timeout : 1806 IWL_WATCHDOG_DISABLED; 1807 1808 if (iwl_mvm_has_new_tx_api(mvm)) { 1809 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, 1810 mvm->aux_sta.sta_id, 1811 IWL_MAX_TID_COUNT, 1812 wdg_timeout); 1813 mvm->aux_queue = queue; 1814 } else if (iwl_mvm_is_dqa_supported(mvm)) { 1815 struct iwl_trans_txq_scd_cfg cfg = { 1816 .fifo = IWL_MVM_TX_FIFO_MCAST, 1817 .sta_id = mvm->aux_sta.sta_id, 1818 .tid = IWL_MAX_TID_COUNT, 1819 .aggregate = false, 1820 .frame_limit = IWL_FRAME_LIMIT, 1821 }; 1822 1823 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, 1824 wdg_timeout); 1825 } else { 1826 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, 1827 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); 1828 } 1829 } 1830 1831 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) 1832 { 1833 int ret; 1834 1835 lockdep_assert_held(&mvm->mutex); 1836 1837 /* Allocate aux station and assign to it the aux queue */ 1838 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), 1839 NL80211_IFTYPE_UNSPECIFIED, 1840 IWL_STA_AUX_ACTIVITY); 1841 if (ret) 1842 return ret; 1843 1844 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 1845 if (!iwl_mvm_has_new_tx_api(mvm)) 1846 iwl_mvm_enable_aux_queue(mvm); 1847 1848 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, 1849 MAC_INDEX_AUX, 0); 1850 if (ret) { 1851 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 1852 return ret; 1853 } 1854 1855 /* 1856 * For a000 firmware and on we cannot add queue to a station unknown 1857 * to firmware so enable queue here - after the station was added 1858 */ 1859 if (iwl_mvm_has_new_tx_api(mvm)) 1860 iwl_mvm_enable_aux_queue(mvm); 1861 1862 return 0; 1863 } 1864 1865 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1866 { 1867 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1868 1869 lockdep_assert_held(&mvm->mutex); 1870 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, 1871 mvmvif->id, 0); 1872 } 1873 1874 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1875 { 1876 int ret; 1877 1878 lockdep_assert_held(&mvm->mutex); 1879 1880 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 1881 if (ret) 1882 IWL_WARN(mvm, "Failed sending remove station\n"); 1883 1884 return ret; 1885 } 1886 1887 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) 1888 { 1889 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); 1890 } 1891 1892 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) 1893 { 1894 lockdep_assert_held(&mvm->mutex); 1895 1896 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 1897 } 1898 1899 /* 1900 * Send the add station command for the vif's broadcast station. 1901 * Assumes that the station was already allocated. 1902 * 1903 * @mvm: the mvm component 1904 * @vif: the interface to which the broadcast station is added 1905 * @bsta: the broadcast station to add. 1906 */ 1907 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1908 { 1909 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1910 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 1911 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 1912 const u8 *baddr = _baddr; 1913 int queue; 1914 int ret; 1915 unsigned int wdg_timeout = 1916 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 1917 struct iwl_trans_txq_scd_cfg cfg = { 1918 .fifo = IWL_MVM_TX_FIFO_VO, 1919 .sta_id = mvmvif->bcast_sta.sta_id, 1920 .tid = IWL_MAX_TID_COUNT, 1921 .aggregate = false, 1922 .frame_limit = IWL_FRAME_LIMIT, 1923 }; 1924 1925 lockdep_assert_held(&mvm->mutex); 1926 1927 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { 1928 if (vif->type == NL80211_IFTYPE_AP || 1929 vif->type == NL80211_IFTYPE_ADHOC) 1930 queue = mvm->probe_queue; 1931 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 1932 queue = mvm->p2p_dev_queue; 1933 else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) 1934 return -EINVAL; 1935 1936 bsta->tfd_queue_msk |= BIT(queue); 1937 1938 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, 1939 &cfg, wdg_timeout); 1940 } 1941 1942 if (vif->type == NL80211_IFTYPE_ADHOC) 1943 baddr = vif->bss_conf.bssid; 1944 1945 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) 1946 return -ENOSPC; 1947 1948 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, 1949 mvmvif->id, mvmvif->color); 1950 if (ret) 1951 return ret; 1952 1953 /* 1954 * For a000 firmware and on we cannot add queue to a station unknown 1955 * to firmware so enable queue here - after the station was added 1956 */ 1957 if (iwl_mvm_has_new_tx_api(mvm)) { 1958 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], 1959 bsta->sta_id, 1960 IWL_MAX_TID_COUNT, 1961 wdg_timeout); 1962 1963 if (vif->type == NL80211_IFTYPE_AP) 1964 mvm->probe_queue = queue; 1965 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 1966 mvm->p2p_dev_queue = queue; 1967 1968 bsta->tfd_queue_msk |= BIT(queue); 1969 } 1970 1971 return 0; 1972 } 1973 1974 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, 1975 struct ieee80211_vif *vif) 1976 { 1977 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1978 1979 lockdep_assert_held(&mvm->mutex); 1980 1981 if (vif->type == NL80211_IFTYPE_AP || 1982 vif->type == NL80211_IFTYPE_ADHOC) 1983 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, 1984 IWL_MAX_TID_COUNT, 0); 1985 1986 if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) { 1987 iwl_mvm_disable_txq(mvm, mvm->probe_queue, 1988 vif->hw_queue[0], IWL_MAX_TID_COUNT, 1989 0); 1990 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue); 1991 } 1992 1993 if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) { 1994 iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue, 1995 vif->hw_queue[0], IWL_MAX_TID_COUNT, 1996 0); 1997 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue); 1998 } 1999 } 2000 2001 /* Send the FW a request to remove the station from it's internal data 2002 * structures, but DO NOT remove the entry from the local data structures. */ 2003 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2004 { 2005 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2006 int ret; 2007 2008 lockdep_assert_held(&mvm->mutex); 2009 2010 if (iwl_mvm_is_dqa_supported(mvm)) 2011 iwl_mvm_free_bcast_sta_queues(mvm, vif); 2012 2013 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); 2014 if (ret) 2015 IWL_WARN(mvm, "Failed sending remove station\n"); 2016 return ret; 2017 } 2018 2019 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2020 { 2021 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2022 u32 qmask = 0; 2023 2024 lockdep_assert_held(&mvm->mutex); 2025 2026 if (!iwl_mvm_is_dqa_supported(mvm)) { 2027 qmask = iwl_mvm_mac_get_queues_mask(vif); 2028 2029 /* 2030 * The firmware defines the TFD queue mask to only be relevant 2031 * for *unicast* queues, so the multicast (CAB) queue shouldn't 2032 * be included. This only happens in NL80211_IFTYPE_AP vif type, 2033 * so the next line will only have an effect there. 2034 */ 2035 qmask &= ~BIT(vif->cab_queue); 2036 } 2037 2038 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, 2039 ieee80211_vif_type_p2p(vif), 2040 IWL_STA_GENERAL_PURPOSE); 2041 } 2042 2043 /* Allocate a new station entry for the broadcast station to the given vif, 2044 * and send it to the FW. 2045 * Note that each P2P mac should have its own broadcast station. 2046 * 2047 * @mvm: the mvm component 2048 * @vif: the interface to which the broadcast station is added 2049 * @bsta: the broadcast station to add. */ 2050 int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2051 { 2052 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2053 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 2054 int ret; 2055 2056 lockdep_assert_held(&mvm->mutex); 2057 2058 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 2059 if (ret) 2060 return ret; 2061 2062 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2063 2064 if (ret) 2065 iwl_mvm_dealloc_int_sta(mvm, bsta); 2066 2067 return ret; 2068 } 2069 2070 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2071 { 2072 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2073 2074 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); 2075 } 2076 2077 /* 2078 * Send the FW a request to remove the station from it's internal data 2079 * structures, and in addition remove it from the local data structure. 2080 */ 2081 int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2082 { 2083 int ret; 2084 2085 lockdep_assert_held(&mvm->mutex); 2086 2087 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); 2088 2089 iwl_mvm_dealloc_bcast_sta(mvm, vif); 2090 2091 return ret; 2092 } 2093 2094 /* 2095 * Allocate a new station entry for the multicast station to the given vif, 2096 * and send it to the FW. 2097 * Note that each AP/GO mac should have its own multicast station. 2098 * 2099 * @mvm: the mvm component 2100 * @vif: the interface to which the multicast station is added 2101 */ 2102 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2103 { 2104 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2105 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; 2106 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; 2107 const u8 *maddr = _maddr; 2108 struct iwl_trans_txq_scd_cfg cfg = { 2109 .fifo = IWL_MVM_TX_FIFO_MCAST, 2110 .sta_id = msta->sta_id, 2111 .tid = IWL_MAX_TID_COUNT, 2112 .aggregate = false, 2113 .frame_limit = IWL_FRAME_LIMIT, 2114 }; 2115 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2116 int ret; 2117 2118 lockdep_assert_held(&mvm->mutex); 2119 2120 if (!iwl_mvm_is_dqa_supported(mvm)) 2121 return 0; 2122 2123 if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) 2124 return -ENOTSUPP; 2125 2126 /* 2127 * While in previous FWs we had to exclude cab queue from TFD queue 2128 * mask, now it is needed as any other queue. 2129 */ 2130 if (!iwl_mvm_has_new_tx_api(mvm) && 2131 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2132 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2133 &cfg, timeout); 2134 msta->tfd_queue_msk |= BIT(vif->cab_queue); 2135 } 2136 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, 2137 mvmvif->id, mvmvif->color); 2138 if (ret) { 2139 iwl_mvm_dealloc_int_sta(mvm, msta); 2140 return ret; 2141 } 2142 2143 /* 2144 * Enable cab queue after the ADD_STA command is sent. 2145 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG 2146 * command with unknown station id, and for FW that doesn't support 2147 * station API since the cab queue is not included in the 2148 * tfd_queue_mask. 2149 */ 2150 if (iwl_mvm_has_new_tx_api(mvm)) { 2151 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, 2152 msta->sta_id, 2153 IWL_MAX_TID_COUNT, 2154 timeout); 2155 mvmvif->cab_queue = queue; 2156 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2157 IWL_UCODE_TLV_API_STA_TYPE)) { 2158 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2159 &cfg, timeout); 2160 } 2161 2162 return 0; 2163 } 2164 2165 /* 2166 * Send the FW a request to remove the station from it's internal data 2167 * structures, and in addition remove it from the local data structure. 2168 */ 2169 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2170 { 2171 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2172 int ret; 2173 2174 lockdep_assert_held(&mvm->mutex); 2175 2176 if (!iwl_mvm_is_dqa_supported(mvm)) 2177 return 0; 2178 2179 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, 2180 IWL_MAX_TID_COUNT, 0); 2181 2182 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); 2183 if (ret) 2184 IWL_WARN(mvm, "Failed sending remove station\n"); 2185 2186 return ret; 2187 } 2188 2189 #define IWL_MAX_RX_BA_SESSIONS 16 2190 2191 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) 2192 { 2193 struct iwl_mvm_delba_notif notif = { 2194 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, 2195 .metadata.sync = 1, 2196 .delba.baid = baid, 2197 }; 2198 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); 2199 }; 2200 2201 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, 2202 struct iwl_mvm_baid_data *data) 2203 { 2204 int i; 2205 2206 iwl_mvm_sync_rxq_del_ba(mvm, data->baid); 2207 2208 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2209 int j; 2210 struct iwl_mvm_reorder_buffer *reorder_buf = 2211 &data->reorder_buf[i]; 2212 2213 spin_lock_bh(&reorder_buf->lock); 2214 if (likely(!reorder_buf->num_stored)) { 2215 spin_unlock_bh(&reorder_buf->lock); 2216 continue; 2217 } 2218 2219 /* 2220 * This shouldn't happen in regular DELBA since the internal 2221 * delBA notification should trigger a release of all frames in 2222 * the reorder buffer. 2223 */ 2224 WARN_ON(1); 2225 2226 for (j = 0; j < reorder_buf->buf_size; j++) 2227 __skb_queue_purge(&reorder_buf->entries[j]); 2228 /* 2229 * Prevent timer re-arm. This prevents a very far fetched case 2230 * where we timed out on the notification. There may be prior 2231 * RX frames pending in the RX queue before the notification 2232 * that might get processed between now and the actual deletion 2233 * and we would re-arm the timer although we are deleting the 2234 * reorder buffer. 2235 */ 2236 reorder_buf->removed = true; 2237 spin_unlock_bh(&reorder_buf->lock); 2238 del_timer_sync(&reorder_buf->reorder_timer); 2239 } 2240 } 2241 2242 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, 2243 u32 sta_id, 2244 struct iwl_mvm_baid_data *data, 2245 u16 ssn, u8 buf_size) 2246 { 2247 int i; 2248 2249 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2250 struct iwl_mvm_reorder_buffer *reorder_buf = 2251 &data->reorder_buf[i]; 2252 int j; 2253 2254 reorder_buf->num_stored = 0; 2255 reorder_buf->head_sn = ssn; 2256 reorder_buf->buf_size = buf_size; 2257 /* rx reorder timer */ 2258 reorder_buf->reorder_timer.function = 2259 iwl_mvm_reorder_timer_expired; 2260 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf; 2261 init_timer(&reorder_buf->reorder_timer); 2262 spin_lock_init(&reorder_buf->lock); 2263 reorder_buf->mvm = mvm; 2264 reorder_buf->queue = i; 2265 reorder_buf->sta_id = sta_id; 2266 reorder_buf->valid = false; 2267 for (j = 0; j < reorder_buf->buf_size; j++) 2268 __skb_queue_head_init(&reorder_buf->entries[j]); 2269 } 2270 } 2271 2272 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2273 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) 2274 { 2275 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2276 struct iwl_mvm_add_sta_cmd cmd = {}; 2277 struct iwl_mvm_baid_data *baid_data = NULL; 2278 int ret; 2279 u32 status; 2280 2281 lockdep_assert_held(&mvm->mutex); 2282 2283 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { 2284 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); 2285 return -ENOSPC; 2286 } 2287 2288 if (iwl_mvm_has_new_rx_api(mvm) && start) { 2289 /* 2290 * Allocate here so if allocation fails we can bail out early 2291 * before starting the BA session in the firmware 2292 */ 2293 baid_data = kzalloc(sizeof(*baid_data) + 2294 mvm->trans->num_rx_queues * 2295 sizeof(baid_data->reorder_buf[0]), 2296 GFP_KERNEL); 2297 if (!baid_data) 2298 return -ENOMEM; 2299 } 2300 2301 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 2302 cmd.sta_id = mvm_sta->sta_id; 2303 cmd.add_modify = STA_MODE_MODIFY; 2304 if (start) { 2305 cmd.add_immediate_ba_tid = (u8) tid; 2306 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 2307 cmd.rx_ba_window = cpu_to_le16((u16)buf_size); 2308 } else { 2309 cmd.remove_immediate_ba_tid = (u8) tid; 2310 } 2311 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : 2312 STA_MODIFY_REMOVE_BA_TID; 2313 2314 status = ADD_STA_SUCCESS; 2315 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2316 iwl_mvm_add_sta_cmd_size(mvm), 2317 &cmd, &status); 2318 if (ret) 2319 goto out_free; 2320 2321 switch (status & IWL_ADD_STA_STATUS_MASK) { 2322 case ADD_STA_SUCCESS: 2323 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 2324 start ? "start" : "stopp"); 2325 break; 2326 case ADD_STA_IMMEDIATE_BA_FAILURE: 2327 IWL_WARN(mvm, "RX BA Session refused by fw\n"); 2328 ret = -ENOSPC; 2329 break; 2330 default: 2331 ret = -EIO; 2332 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", 2333 start ? "start" : "stopp", status); 2334 break; 2335 } 2336 2337 if (ret) 2338 goto out_free; 2339 2340 if (start) { 2341 u8 baid; 2342 2343 mvm->rx_ba_sessions++; 2344 2345 if (!iwl_mvm_has_new_rx_api(mvm)) 2346 return 0; 2347 2348 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { 2349 ret = -EINVAL; 2350 goto out_free; 2351 } 2352 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> 2353 IWL_ADD_STA_BAID_SHIFT); 2354 baid_data->baid = baid; 2355 baid_data->timeout = timeout; 2356 baid_data->last_rx = jiffies; 2357 setup_timer(&baid_data->session_timer, 2358 iwl_mvm_rx_agg_session_expired, 2359 (unsigned long)&mvm->baid_map[baid]); 2360 baid_data->mvm = mvm; 2361 baid_data->tid = tid; 2362 baid_data->sta_id = mvm_sta->sta_id; 2363 2364 mvm_sta->tid_to_baid[tid] = baid; 2365 if (timeout) 2366 mod_timer(&baid_data->session_timer, 2367 TU_TO_EXP_TIME(timeout * 2)); 2368 2369 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id, 2370 baid_data, ssn, buf_size); 2371 /* 2372 * protect the BA data with RCU to cover a case where our 2373 * internal RX sync mechanism will timeout (not that it's 2374 * supposed to happen) and we will free the session data while 2375 * RX is being processed in parallel 2376 */ 2377 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", 2378 mvm_sta->sta_id, tid, baid); 2379 WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); 2380 rcu_assign_pointer(mvm->baid_map[baid], baid_data); 2381 } else { 2382 u8 baid = mvm_sta->tid_to_baid[tid]; 2383 2384 if (mvm->rx_ba_sessions > 0) 2385 /* check that restart flow didn't zero the counter */ 2386 mvm->rx_ba_sessions--; 2387 if (!iwl_mvm_has_new_rx_api(mvm)) 2388 return 0; 2389 2390 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) 2391 return -EINVAL; 2392 2393 baid_data = rcu_access_pointer(mvm->baid_map[baid]); 2394 if (WARN_ON(!baid_data)) 2395 return -EINVAL; 2396 2397 /* synchronize all rx queues so we can safely delete */ 2398 iwl_mvm_free_reorder(mvm, baid_data); 2399 del_timer_sync(&baid_data->session_timer); 2400 RCU_INIT_POINTER(mvm->baid_map[baid], NULL); 2401 kfree_rcu(baid_data, rcu_head); 2402 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); 2403 } 2404 return 0; 2405 2406 out_free: 2407 kfree(baid_data); 2408 return ret; 2409 } 2410 2411 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2412 int tid, u8 queue, bool start) 2413 { 2414 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2415 struct iwl_mvm_add_sta_cmd cmd = {}; 2416 int ret; 2417 u32 status; 2418 2419 lockdep_assert_held(&mvm->mutex); 2420 2421 if (start) { 2422 mvm_sta->tfd_queue_msk |= BIT(queue); 2423 mvm_sta->tid_disable_agg &= ~BIT(tid); 2424 } else { 2425 /* In DQA-mode the queue isn't removed on agg termination */ 2426 if (!iwl_mvm_is_dqa_supported(mvm)) 2427 mvm_sta->tfd_queue_msk &= ~BIT(queue); 2428 mvm_sta->tid_disable_agg |= BIT(tid); 2429 } 2430 2431 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 2432 cmd.sta_id = mvm_sta->sta_id; 2433 cmd.add_modify = STA_MODE_MODIFY; 2434 if (!iwl_mvm_has_new_tx_api(mvm)) 2435 cmd.modify_mask = STA_MODIFY_QUEUES; 2436 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 2437 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 2438 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 2439 2440 status = ADD_STA_SUCCESS; 2441 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2442 iwl_mvm_add_sta_cmd_size(mvm), 2443 &cmd, &status); 2444 if (ret) 2445 return ret; 2446 2447 switch (status & IWL_ADD_STA_STATUS_MASK) { 2448 case ADD_STA_SUCCESS: 2449 break; 2450 default: 2451 ret = -EIO; 2452 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", 2453 start ? "start" : "stopp", status); 2454 break; 2455 } 2456 2457 return ret; 2458 } 2459 2460 const u8 tid_to_mac80211_ac[] = { 2461 IEEE80211_AC_BE, 2462 IEEE80211_AC_BK, 2463 IEEE80211_AC_BK, 2464 IEEE80211_AC_BE, 2465 IEEE80211_AC_VI, 2466 IEEE80211_AC_VI, 2467 IEEE80211_AC_VO, 2468 IEEE80211_AC_VO, 2469 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ 2470 }; 2471 2472 static const u8 tid_to_ucode_ac[] = { 2473 AC_BE, 2474 AC_BK, 2475 AC_BK, 2476 AC_BE, 2477 AC_VI, 2478 AC_VI, 2479 AC_VO, 2480 AC_VO, 2481 }; 2482 2483 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2484 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 2485 { 2486 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2487 struct iwl_mvm_tid_data *tid_data; 2488 int txq_id; 2489 int ret; 2490 2491 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 2492 return -EINVAL; 2493 2494 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 2495 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", 2496 mvmsta->tid_data[tid].state); 2497 return -ENXIO; 2498 } 2499 2500 lockdep_assert_held(&mvm->mutex); 2501 2502 spin_lock_bh(&mvmsta->lock); 2503 2504 /* possible race condition - we entered D0i3 while starting agg */ 2505 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { 2506 spin_unlock_bh(&mvmsta->lock); 2507 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); 2508 return -EIO; 2509 } 2510 2511 spin_lock(&mvm->queue_info_lock); 2512 2513 /* 2514 * Note the possible cases: 2515 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed 2516 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free 2517 * one and mark it as reserved 2518 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in 2519 * non-DQA mode, since the TXQ hasn't yet been allocated 2520 * Don't support case 3 for new TX path as it is not expected to happen 2521 * and aggregation will be offloaded soon anyway 2522 */ 2523 txq_id = mvmsta->tid_data[tid].txq_id; 2524 if (iwl_mvm_has_new_tx_api(mvm)) { 2525 if (txq_id == IWL_MVM_INVALID_QUEUE) { 2526 ret = -ENXIO; 2527 goto release_locks; 2528 } 2529 } else if (iwl_mvm_is_dqa_supported(mvm) && 2530 unlikely(mvm->queue_info[txq_id].status == 2531 IWL_MVM_QUEUE_SHARED)) { 2532 ret = -ENXIO; 2533 IWL_DEBUG_TX_QUEUES(mvm, 2534 "Can't start tid %d agg on shared queue!\n", 2535 tid); 2536 goto release_locks; 2537 } else if (!iwl_mvm_is_dqa_supported(mvm) || 2538 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { 2539 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 2540 mvm->first_agg_queue, 2541 mvm->last_agg_queue); 2542 if (txq_id < 0) { 2543 ret = txq_id; 2544 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 2545 goto release_locks; 2546 } 2547 /* 2548 * TXQ shouldn't be in inactive mode for non-DQA, so getting 2549 * an inactive queue from iwl_mvm_find_free_queue() is 2550 * certainly a bug 2551 */ 2552 WARN_ON(mvm->queue_info[txq_id].status == 2553 IWL_MVM_QUEUE_INACTIVE); 2554 2555 /* TXQ hasn't yet been enabled, so mark it only as reserved */ 2556 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; 2557 } 2558 2559 spin_unlock(&mvm->queue_info_lock); 2560 2561 IWL_DEBUG_TX_QUEUES(mvm, 2562 "AGG for tid %d will be on queue #%d\n", 2563 tid, txq_id); 2564 2565 tid_data = &mvmsta->tid_data[tid]; 2566 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 2567 tid_data->txq_id = txq_id; 2568 *ssn = tid_data->ssn; 2569 2570 IWL_DEBUG_TX_QUEUES(mvm, 2571 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", 2572 mvmsta->sta_id, tid, txq_id, tid_data->ssn, 2573 tid_data->next_reclaimed); 2574 2575 if (tid_data->ssn == tid_data->next_reclaimed) { 2576 tid_data->state = IWL_AGG_STARTING; 2577 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2578 } else { 2579 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; 2580 } 2581 2582 ret = 0; 2583 goto out; 2584 2585 release_locks: 2586 spin_unlock(&mvm->queue_info_lock); 2587 out: 2588 spin_unlock_bh(&mvmsta->lock); 2589 2590 return ret; 2591 } 2592 2593 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2594 struct ieee80211_sta *sta, u16 tid, u8 buf_size, 2595 bool amsdu) 2596 { 2597 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2598 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2599 unsigned int wdg_timeout = 2600 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 2601 int queue, ret; 2602 bool alloc_queue = true; 2603 enum iwl_mvm_queue_status queue_status; 2604 u16 ssn; 2605 2606 struct iwl_trans_txq_scd_cfg cfg = { 2607 .sta_id = mvmsta->sta_id, 2608 .tid = tid, 2609 .frame_limit = buf_size, 2610 .aggregate = true, 2611 }; 2612 2613 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) 2614 != IWL_MAX_TID_COUNT); 2615 2616 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); 2617 2618 spin_lock_bh(&mvmsta->lock); 2619 ssn = tid_data->ssn; 2620 queue = tid_data->txq_id; 2621 tid_data->state = IWL_AGG_ON; 2622 mvmsta->agg_tids |= BIT(tid); 2623 tid_data->ssn = 0xffff; 2624 tid_data->amsdu_in_ampdu_allowed = amsdu; 2625 spin_unlock_bh(&mvmsta->lock); 2626 2627 if (iwl_mvm_has_new_tx_api(mvm)) { 2628 /* 2629 * If no queue iwl_mvm_sta_tx_agg_start() would have failed so 2630 * no need to check queue's status 2631 */ 2632 if (buf_size < mvmsta->max_agg_bufsize) 2633 return -ENOTSUPP; 2634 2635 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2636 if (ret) 2637 return -EIO; 2638 goto out; 2639 } 2640 2641 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 2642 2643 spin_lock_bh(&mvm->queue_info_lock); 2644 queue_status = mvm->queue_info[queue].status; 2645 spin_unlock_bh(&mvm->queue_info_lock); 2646 2647 /* In DQA mode, the existing queue might need to be reconfigured */ 2648 if (iwl_mvm_is_dqa_supported(mvm)) { 2649 /* Maybe there is no need to even alloc a queue... */ 2650 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) 2651 alloc_queue = false; 2652 2653 /* 2654 * Only reconfig the SCD for the queue if the window size has 2655 * changed from current (become smaller) 2656 */ 2657 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { 2658 /* 2659 * If reconfiguring an existing queue, it first must be 2660 * drained 2661 */ 2662 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 2663 BIT(queue)); 2664 if (ret) { 2665 IWL_ERR(mvm, 2666 "Error draining queue before reconfig\n"); 2667 return ret; 2668 } 2669 2670 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, 2671 mvmsta->sta_id, tid, 2672 buf_size, ssn); 2673 if (ret) { 2674 IWL_ERR(mvm, 2675 "Error reconfiguring TXQ #%d\n", queue); 2676 return ret; 2677 } 2678 } 2679 } 2680 2681 if (alloc_queue) 2682 iwl_mvm_enable_txq(mvm, queue, 2683 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, 2684 &cfg, wdg_timeout); 2685 2686 /* Send ADD_STA command to enable aggs only if the queue isn't shared */ 2687 if (queue_status != IWL_MVM_QUEUE_SHARED) { 2688 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2689 if (ret) 2690 return -EIO; 2691 } 2692 2693 /* No need to mark as reserved */ 2694 spin_lock_bh(&mvm->queue_info_lock); 2695 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 2696 spin_unlock_bh(&mvm->queue_info_lock); 2697 2698 out: 2699 /* 2700 * Even though in theory the peer could have different 2701 * aggregation reorder buffer sizes for different sessions, 2702 * our ucode doesn't allow for that and has a global limit 2703 * for each station. Therefore, use the minimum of all the 2704 * aggregation sessions and our default value. 2705 */ 2706 mvmsta->max_agg_bufsize = 2707 min(mvmsta->max_agg_bufsize, buf_size); 2708 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; 2709 2710 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", 2711 sta->addr, tid); 2712 2713 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); 2714 } 2715 2716 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, 2717 struct iwl_mvm_sta *mvmsta, 2718 u16 txq_id) 2719 { 2720 if (iwl_mvm_has_new_tx_api(mvm)) 2721 return; 2722 2723 spin_lock_bh(&mvm->queue_info_lock); 2724 /* 2725 * The TXQ is marked as reserved only if no traffic came through yet 2726 * This means no traffic has been sent on this TID (agg'd or not), so 2727 * we no longer have use for the queue. Since it hasn't even been 2728 * allocated through iwl_mvm_enable_txq, so we can just mark it back as 2729 * free. 2730 */ 2731 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) 2732 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 2733 2734 spin_unlock_bh(&mvm->queue_info_lock); 2735 } 2736 2737 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2738 struct ieee80211_sta *sta, u16 tid) 2739 { 2740 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2741 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2742 u16 txq_id; 2743 int err; 2744 2745 /* 2746 * If mac80211 is cleaning its state, then say that we finished since 2747 * our state has been cleared anyway. 2748 */ 2749 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 2750 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2751 return 0; 2752 } 2753 2754 spin_lock_bh(&mvmsta->lock); 2755 2756 txq_id = tid_data->txq_id; 2757 2758 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", 2759 mvmsta->sta_id, tid, txq_id, tid_data->state); 2760 2761 mvmsta->agg_tids &= ~BIT(tid); 2762 2763 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); 2764 2765 switch (tid_data->state) { 2766 case IWL_AGG_ON: 2767 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 2768 2769 IWL_DEBUG_TX_QUEUES(mvm, 2770 "ssn = %d, next_recl = %d\n", 2771 tid_data->ssn, tid_data->next_reclaimed); 2772 2773 /* There are still packets for this RA / TID in the HW */ 2774 if (tid_data->ssn != tid_data->next_reclaimed) { 2775 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; 2776 err = 0; 2777 break; 2778 } 2779 2780 tid_data->ssn = 0xffff; 2781 tid_data->state = IWL_AGG_OFF; 2782 spin_unlock_bh(&mvmsta->lock); 2783 2784 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2785 2786 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 2787 2788 if (!iwl_mvm_is_dqa_supported(mvm)) { 2789 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; 2790 2791 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); 2792 } 2793 return 0; 2794 case IWL_AGG_STARTING: 2795 case IWL_EMPTYING_HW_QUEUE_ADDBA: 2796 /* 2797 * The agg session has been stopped before it was set up. This 2798 * can happen when the AddBA timer times out for example. 2799 */ 2800 2801 /* No barriers since we are under mutex */ 2802 lockdep_assert_held(&mvm->mutex); 2803 2804 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2805 tid_data->state = IWL_AGG_OFF; 2806 err = 0; 2807 break; 2808 default: 2809 IWL_ERR(mvm, 2810 "Stopping AGG while state not ON or starting for %d on %d (%d)\n", 2811 mvmsta->sta_id, tid, tid_data->state); 2812 IWL_ERR(mvm, 2813 "\ttid_data->txq_id = %d\n", tid_data->txq_id); 2814 err = -EINVAL; 2815 } 2816 2817 spin_unlock_bh(&mvmsta->lock); 2818 2819 return err; 2820 } 2821 2822 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2823 struct ieee80211_sta *sta, u16 tid) 2824 { 2825 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2826 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2827 u16 txq_id; 2828 enum iwl_mvm_agg_state old_state; 2829 2830 /* 2831 * First set the agg state to OFF to avoid calling 2832 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. 2833 */ 2834 spin_lock_bh(&mvmsta->lock); 2835 txq_id = tid_data->txq_id; 2836 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 2837 mvmsta->sta_id, tid, txq_id, tid_data->state); 2838 old_state = tid_data->state; 2839 tid_data->state = IWL_AGG_OFF; 2840 mvmsta->agg_tids &= ~BIT(tid); 2841 spin_unlock_bh(&mvmsta->lock); 2842 2843 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); 2844 2845 if (old_state >= IWL_AGG_ON) { 2846 iwl_mvm_drain_sta(mvm, mvmsta, true); 2847 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) 2848 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 2849 iwl_trans_wait_tx_queues_empty(mvm->trans, 2850 mvmsta->tfd_queue_msk); 2851 iwl_mvm_drain_sta(mvm, mvmsta, false); 2852 2853 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 2854 2855 if (!iwl_mvm_is_dqa_supported(mvm)) { 2856 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; 2857 2858 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, 2859 tid, 0); 2860 } 2861 } 2862 2863 return 0; 2864 } 2865 2866 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 2867 { 2868 int i, max = -1, max_offs = -1; 2869 2870 lockdep_assert_held(&mvm->mutex); 2871 2872 /* Pick the unused key offset with the highest 'deleted' 2873 * counter. Every time a key is deleted, all the counters 2874 * are incremented and the one that was just deleted is 2875 * reset to zero. Thus, the highest counter is the one 2876 * that was deleted longest ago. Pick that one. 2877 */ 2878 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 2879 if (test_bit(i, mvm->fw_key_table)) 2880 continue; 2881 if (mvm->fw_key_deleted[i] > max) { 2882 max = mvm->fw_key_deleted[i]; 2883 max_offs = i; 2884 } 2885 } 2886 2887 if (max_offs < 0) 2888 return STA_KEY_IDX_INVALID; 2889 2890 return max_offs; 2891 } 2892 2893 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, 2894 struct ieee80211_vif *vif, 2895 struct ieee80211_sta *sta) 2896 { 2897 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2898 2899 if (sta) 2900 return iwl_mvm_sta_from_mac80211(sta); 2901 2902 /* 2903 * The device expects GTKs for station interfaces to be 2904 * installed as GTKs for the AP station. If we have no 2905 * station ID, then use AP's station ID. 2906 */ 2907 if (vif->type == NL80211_IFTYPE_STATION && 2908 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 2909 u8 sta_id = mvmvif->ap_sta_id; 2910 2911 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], 2912 lockdep_is_held(&mvm->mutex)); 2913 2914 /* 2915 * It is possible that the 'sta' parameter is NULL, 2916 * for example when a GTK is removed - the sta_id will then 2917 * be the AP ID, and no station was passed by mac80211. 2918 */ 2919 if (IS_ERR_OR_NULL(sta)) 2920 return NULL; 2921 2922 return iwl_mvm_sta_from_mac80211(sta); 2923 } 2924 2925 return NULL; 2926 } 2927 2928 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 2929 struct iwl_mvm_sta *mvm_sta, 2930 struct ieee80211_key_conf *key, bool mcast, 2931 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 2932 u8 key_offset) 2933 { 2934 union { 2935 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 2936 struct iwl_mvm_add_sta_key_cmd cmd; 2937 } u = {}; 2938 __le16 key_flags; 2939 int ret; 2940 u32 status; 2941 u16 keyidx; 2942 u64 pn = 0; 2943 int i, size; 2944 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 2945 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 2946 2947 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & 2948 STA_KEY_FLG_KEYID_MSK; 2949 key_flags = cpu_to_le16(keyidx); 2950 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); 2951 2952 switch (key->cipher) { 2953 case WLAN_CIPHER_SUITE_TKIP: 2954 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); 2955 if (new_api) { 2956 memcpy((void *)&u.cmd.tx_mic_key, 2957 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 2958 IWL_MIC_KEY_SIZE); 2959 2960 memcpy((void *)&u.cmd.rx_mic_key, 2961 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 2962 IWL_MIC_KEY_SIZE); 2963 pn = atomic64_read(&key->tx_pn); 2964 2965 } else { 2966 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; 2967 for (i = 0; i < 5; i++) 2968 u.cmd_v1.tkip_rx_ttak[i] = 2969 cpu_to_le16(tkip_p1k[i]); 2970 } 2971 memcpy(u.cmd.common.key, key->key, key->keylen); 2972 break; 2973 case WLAN_CIPHER_SUITE_CCMP: 2974 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); 2975 memcpy(u.cmd.common.key, key->key, key->keylen); 2976 if (new_api) 2977 pn = atomic64_read(&key->tx_pn); 2978 break; 2979 case WLAN_CIPHER_SUITE_WEP104: 2980 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); 2981 /* fall through */ 2982 case WLAN_CIPHER_SUITE_WEP40: 2983 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); 2984 memcpy(u.cmd.common.key + 3, key->key, key->keylen); 2985 break; 2986 case WLAN_CIPHER_SUITE_GCMP_256: 2987 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); 2988 /* fall through */ 2989 case WLAN_CIPHER_SUITE_GCMP: 2990 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); 2991 memcpy(u.cmd.common.key, key->key, key->keylen); 2992 if (new_api) 2993 pn = atomic64_read(&key->tx_pn); 2994 break; 2995 default: 2996 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); 2997 memcpy(u.cmd.common.key, key->key, key->keylen); 2998 } 2999 3000 if (mcast) 3001 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3002 3003 u.cmd.common.key_offset = key_offset; 3004 u.cmd.common.key_flags = key_flags; 3005 u.cmd.common.sta_id = mvm_sta->sta_id; 3006 3007 if (new_api) { 3008 u.cmd.transmit_seq_cnt = cpu_to_le64(pn); 3009 size = sizeof(u.cmd); 3010 } else { 3011 size = sizeof(u.cmd_v1); 3012 } 3013 3014 status = ADD_STA_SUCCESS; 3015 if (cmd_flags & CMD_ASYNC) 3016 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, 3017 &u.cmd); 3018 else 3019 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, 3020 &u.cmd, &status); 3021 3022 switch (status) { 3023 case ADD_STA_SUCCESS: 3024 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); 3025 break; 3026 default: 3027 ret = -EIO; 3028 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); 3029 break; 3030 } 3031 3032 return ret; 3033 } 3034 3035 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, 3036 struct ieee80211_key_conf *keyconf, 3037 u8 sta_id, bool remove_key) 3038 { 3039 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; 3040 3041 /* verify the key details match the required command's expectations */ 3042 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || 3043 (keyconf->keyidx != 4 && keyconf->keyidx != 5) || 3044 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && 3045 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && 3046 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) 3047 return -EINVAL; 3048 3049 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && 3050 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) 3051 return -EINVAL; 3052 3053 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); 3054 igtk_cmd.sta_id = cpu_to_le32(sta_id); 3055 3056 if (remove_key) { 3057 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3058 } else { 3059 struct ieee80211_key_seq seq; 3060 const u8 *pn; 3061 3062 switch (keyconf->cipher) { 3063 case WLAN_CIPHER_SUITE_AES_CMAC: 3064 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); 3065 break; 3066 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3067 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3068 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); 3069 break; 3070 default: 3071 return -EINVAL; 3072 } 3073 3074 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); 3075 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3076 igtk_cmd.ctrl_flags |= 3077 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); 3078 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3079 pn = seq.aes_cmac.pn; 3080 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | 3081 ((u64) pn[4] << 8) | 3082 ((u64) pn[3] << 16) | 3083 ((u64) pn[2] << 24) | 3084 ((u64) pn[1] << 32) | 3085 ((u64) pn[0] << 40)); 3086 } 3087 3088 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", 3089 remove_key ? "removing" : "installing", 3090 igtk_cmd.sta_id); 3091 3092 if (!iwl_mvm_has_new_rx_api(mvm)) { 3093 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { 3094 .ctrl_flags = igtk_cmd.ctrl_flags, 3095 .key_id = igtk_cmd.key_id, 3096 .sta_id = igtk_cmd.sta_id, 3097 .receive_seq_cnt = igtk_cmd.receive_seq_cnt 3098 }; 3099 3100 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, 3101 ARRAY_SIZE(igtk_cmd_v1.igtk)); 3102 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3103 sizeof(igtk_cmd_v1), &igtk_cmd_v1); 3104 } 3105 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3106 sizeof(igtk_cmd), &igtk_cmd); 3107 } 3108 3109 3110 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, 3111 struct ieee80211_vif *vif, 3112 struct ieee80211_sta *sta) 3113 { 3114 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3115 3116 if (sta) 3117 return sta->addr; 3118 3119 if (vif->type == NL80211_IFTYPE_STATION && 3120 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 3121 u8 sta_id = mvmvif->ap_sta_id; 3122 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 3123 lockdep_is_held(&mvm->mutex)); 3124 return sta->addr; 3125 } 3126 3127 3128 return NULL; 3129 } 3130 3131 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3132 struct ieee80211_vif *vif, 3133 struct ieee80211_sta *sta, 3134 struct ieee80211_key_conf *keyconf, 3135 u8 key_offset, 3136 bool mcast) 3137 { 3138 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3139 int ret; 3140 const u8 *addr; 3141 struct ieee80211_key_seq seq; 3142 u16 p1k[5]; 3143 3144 switch (keyconf->cipher) { 3145 case WLAN_CIPHER_SUITE_TKIP: 3146 addr = iwl_mvm_get_mac_addr(mvm, vif, sta); 3147 /* get phase 1 key from mac80211 */ 3148 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3149 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 3150 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 3151 seq.tkip.iv32, p1k, 0, key_offset); 3152 break; 3153 case WLAN_CIPHER_SUITE_CCMP: 3154 case WLAN_CIPHER_SUITE_WEP40: 3155 case WLAN_CIPHER_SUITE_WEP104: 3156 case WLAN_CIPHER_SUITE_GCMP: 3157 case WLAN_CIPHER_SUITE_GCMP_256: 3158 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 3159 0, NULL, 0, key_offset); 3160 break; 3161 default: 3162 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 3163 0, NULL, 0, key_offset); 3164 } 3165 3166 return ret; 3167 } 3168 3169 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, 3170 struct ieee80211_key_conf *keyconf, 3171 bool mcast) 3172 { 3173 union { 3174 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 3175 struct iwl_mvm_add_sta_key_cmd cmd; 3176 } u = {}; 3177 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 3178 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 3179 __le16 key_flags; 3180 int ret, size; 3181 u32 status; 3182 3183 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 3184 STA_KEY_FLG_KEYID_MSK); 3185 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); 3186 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); 3187 3188 if (mcast) 3189 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3190 3191 /* 3192 * The fields assigned here are in the same location at the start 3193 * of the command, so we can do this union trick. 3194 */ 3195 u.cmd.common.key_flags = key_flags; 3196 u.cmd.common.key_offset = keyconf->hw_key_idx; 3197 u.cmd.common.sta_id = sta_id; 3198 3199 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); 3200 3201 status = ADD_STA_SUCCESS; 3202 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, 3203 &status); 3204 3205 switch (status) { 3206 case ADD_STA_SUCCESS: 3207 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); 3208 break; 3209 default: 3210 ret = -EIO; 3211 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); 3212 break; 3213 } 3214 3215 return ret; 3216 } 3217 3218 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3219 struct ieee80211_vif *vif, 3220 struct ieee80211_sta *sta, 3221 struct ieee80211_key_conf *keyconf, 3222 u8 key_offset) 3223 { 3224 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3225 struct iwl_mvm_sta *mvm_sta; 3226 u8 sta_id; 3227 int ret; 3228 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 3229 3230 lockdep_assert_held(&mvm->mutex); 3231 3232 /* Get the station id from the mvm local station table */ 3233 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3234 if (!mvm_sta) { 3235 IWL_ERR(mvm, "Failed to find station\n"); 3236 return -EINVAL; 3237 } 3238 sta_id = mvm_sta->sta_id; 3239 3240 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3241 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3242 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3243 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); 3244 goto end; 3245 } 3246 3247 /* 3248 * It is possible that the 'sta' parameter is NULL, and thus 3249 * there is a need to retrieve the sta from the local station table. 3250 */ 3251 if (!sta) { 3252 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 3253 lockdep_is_held(&mvm->mutex)); 3254 if (IS_ERR_OR_NULL(sta)) { 3255 IWL_ERR(mvm, "Invalid station id\n"); 3256 return -EINVAL; 3257 } 3258 } 3259 3260 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 3261 return -EINVAL; 3262 3263 /* If the key_offset is not pre-assigned, we need to find a 3264 * new offset to use. In normal cases, the offset is not 3265 * pre-assigned, but during HW_RESTART we want to reuse the 3266 * same indices, so we pass them when this function is called. 3267 * 3268 * In D3 entry, we need to hardcoded the indices (because the 3269 * firmware hardcodes the PTK offset to 0). In this case, we 3270 * need to make sure we don't overwrite the hw_key_idx in the 3271 * keyconf structure, because otherwise we cannot configure 3272 * the original ones back when resuming. 3273 */ 3274 if (key_offset == STA_KEY_IDX_INVALID) { 3275 key_offset = iwl_mvm_set_fw_key_idx(mvm); 3276 if (key_offset == STA_KEY_IDX_INVALID) 3277 return -ENOSPC; 3278 keyconf->hw_key_idx = key_offset; 3279 } 3280 3281 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); 3282 if (ret) 3283 goto end; 3284 3285 /* 3286 * For WEP, the same key is used for multicast and unicast. Upload it 3287 * again, using the same key offset, and now pointing the other one 3288 * to the same key slot (offset). 3289 * If this fails, remove the original as well. 3290 */ 3291 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3292 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { 3293 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, 3294 key_offset, !mcast); 3295 if (ret) { 3296 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3297 goto end; 3298 } 3299 } 3300 3301 __set_bit(key_offset, mvm->fw_key_table); 3302 3303 end: 3304 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 3305 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 3306 sta ? sta->addr : zero_addr, ret); 3307 return ret; 3308 } 3309 3310 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 3311 struct ieee80211_vif *vif, 3312 struct ieee80211_sta *sta, 3313 struct ieee80211_key_conf *keyconf) 3314 { 3315 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3316 struct iwl_mvm_sta *mvm_sta; 3317 u8 sta_id = IWL_MVM_INVALID_STA; 3318 int ret, i; 3319 3320 lockdep_assert_held(&mvm->mutex); 3321 3322 /* Get the station from the mvm local station table */ 3323 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3324 if (!mvm_sta) { 3325 IWL_ERR(mvm, "Failed to find station\n"); 3326 return -EINVAL; 3327 } 3328 sta_id = mvm_sta->sta_id; 3329 3330 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3331 keyconf->keyidx, sta_id); 3332 3333 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3334 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3335 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3336 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3337 3338 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3339 IWL_ERR(mvm, "offset %d not used in fw key table.\n", 3340 keyconf->hw_key_idx); 3341 return -ENOENT; 3342 } 3343 3344 /* track which key was deleted last */ 3345 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3346 if (mvm->fw_key_deleted[i] < U8_MAX) 3347 mvm->fw_key_deleted[i]++; 3348 } 3349 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; 3350 3351 if (!mvm_sta) { 3352 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); 3353 return 0; 3354 } 3355 3356 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3357 if (ret) 3358 return ret; 3359 3360 /* delete WEP key twice to get rid of (now useless) offset */ 3361 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3362 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) 3363 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); 3364 3365 return ret; 3366 } 3367 3368 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, 3369 struct ieee80211_vif *vif, 3370 struct ieee80211_key_conf *keyconf, 3371 struct ieee80211_sta *sta, u32 iv32, 3372 u16 *phase1key) 3373 { 3374 struct iwl_mvm_sta *mvm_sta; 3375 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3376 3377 rcu_read_lock(); 3378 3379 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3380 if (WARN_ON_ONCE(!mvm_sta)) 3381 goto unlock; 3382 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 3383 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); 3384 3385 unlock: 3386 rcu_read_unlock(); 3387 } 3388 3389 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, 3390 struct ieee80211_sta *sta) 3391 { 3392 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3393 struct iwl_mvm_add_sta_cmd cmd = { 3394 .add_modify = STA_MODE_MODIFY, 3395 .sta_id = mvmsta->sta_id, 3396 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 3397 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3398 }; 3399 int ret; 3400 3401 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 3402 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3403 if (ret) 3404 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3405 } 3406 3407 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 3408 struct ieee80211_sta *sta, 3409 enum ieee80211_frame_release_type reason, 3410 u16 cnt, u16 tids, bool more_data, 3411 bool single_sta_queue) 3412 { 3413 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3414 struct iwl_mvm_add_sta_cmd cmd = { 3415 .add_modify = STA_MODE_MODIFY, 3416 .sta_id = mvmsta->sta_id, 3417 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 3418 .sleep_tx_count = cpu_to_le16(cnt), 3419 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3420 }; 3421 int tid, ret; 3422 unsigned long _tids = tids; 3423 3424 /* convert TIDs to ACs - we don't support TSPEC so that's OK 3425 * Note that this field is reserved and unused by firmware not 3426 * supporting GO uAPSD, so it's safe to always do this. 3427 */ 3428 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 3429 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 3430 3431 /* If we're releasing frames from aggregation or dqa queues then check 3432 * if all the queues that we're releasing frames from, combined, have: 3433 * - more frames than the service period, in which case more_data 3434 * needs to be set 3435 * - fewer than 'cnt' frames, in which case we need to adjust the 3436 * firmware command (but do that unconditionally) 3437 */ 3438 if (single_sta_queue) { 3439 int remaining = cnt; 3440 int sleep_tx_count; 3441 3442 spin_lock_bh(&mvmsta->lock); 3443 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { 3444 struct iwl_mvm_tid_data *tid_data; 3445 u16 n_queued; 3446 3447 tid_data = &mvmsta->tid_data[tid]; 3448 if (WARN(!iwl_mvm_is_dqa_supported(mvm) && 3449 tid_data->state != IWL_AGG_ON && 3450 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, 3451 "TID %d state is %d\n", 3452 tid, tid_data->state)) { 3453 spin_unlock_bh(&mvmsta->lock); 3454 ieee80211_sta_eosp(sta); 3455 return; 3456 } 3457 3458 n_queued = iwl_mvm_tid_queued(tid_data); 3459 if (n_queued > remaining) { 3460 more_data = true; 3461 remaining = 0; 3462 break; 3463 } 3464 remaining -= n_queued; 3465 } 3466 sleep_tx_count = cnt - remaining; 3467 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) 3468 mvmsta->sleep_tx_count = sleep_tx_count; 3469 spin_unlock_bh(&mvmsta->lock); 3470 3471 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); 3472 if (WARN_ON(cnt - remaining == 0)) { 3473 ieee80211_sta_eosp(sta); 3474 return; 3475 } 3476 } 3477 3478 /* Note: this is ignored by firmware not supporting GO uAPSD */ 3479 if (more_data) 3480 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; 3481 3482 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { 3483 mvmsta->next_status_eosp = true; 3484 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; 3485 } else { 3486 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; 3487 } 3488 3489 /* block the Tx queues until the FW updated the sleep Tx count */ 3490 iwl_trans_block_txq_ptrs(mvm->trans, true); 3491 3492 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 3493 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, 3494 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3495 if (ret) 3496 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3497 } 3498 3499 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, 3500 struct iwl_rx_cmd_buffer *rxb) 3501 { 3502 struct iwl_rx_packet *pkt = rxb_addr(rxb); 3503 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; 3504 struct ieee80211_sta *sta; 3505 u32 sta_id = le32_to_cpu(notif->sta_id); 3506 3507 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) 3508 return; 3509 3510 rcu_read_lock(); 3511 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 3512 if (!IS_ERR_OR_NULL(sta)) 3513 ieee80211_sta_eosp(sta); 3514 rcu_read_unlock(); 3515 } 3516 3517 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 3518 struct iwl_mvm_sta *mvmsta, bool disable) 3519 { 3520 struct iwl_mvm_add_sta_cmd cmd = { 3521 .add_modify = STA_MODE_MODIFY, 3522 .sta_id = mvmsta->sta_id, 3523 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 3524 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 3525 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3526 }; 3527 int ret; 3528 3529 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 3530 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3531 if (ret) 3532 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3533 } 3534 3535 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 3536 struct ieee80211_sta *sta, 3537 bool disable) 3538 { 3539 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3540 3541 spin_lock_bh(&mvm_sta->lock); 3542 3543 if (mvm_sta->disable_tx == disable) { 3544 spin_unlock_bh(&mvm_sta->lock); 3545 return; 3546 } 3547 3548 mvm_sta->disable_tx = disable; 3549 3550 /* 3551 * Tell mac80211 to start/stop queuing tx for this station, 3552 * but don't stop queuing if there are still pending frames 3553 * for this station. 3554 */ 3555 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) 3556 ieee80211_sta_block_awake(mvm->hw, sta, disable); 3557 3558 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); 3559 3560 spin_unlock_bh(&mvm_sta->lock); 3561 } 3562 3563 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, 3564 struct iwl_mvm_vif *mvmvif, 3565 struct iwl_mvm_int_sta *sta, 3566 bool disable) 3567 { 3568 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); 3569 struct iwl_mvm_add_sta_cmd cmd = { 3570 .add_modify = STA_MODE_MODIFY, 3571 .sta_id = sta->sta_id, 3572 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 3573 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 3574 .mac_id_n_color = cpu_to_le32(id), 3575 }; 3576 int ret; 3577 3578 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, 3579 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3580 if (ret) 3581 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3582 } 3583 3584 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 3585 struct iwl_mvm_vif *mvmvif, 3586 bool disable) 3587 { 3588 struct ieee80211_sta *sta; 3589 struct iwl_mvm_sta *mvm_sta; 3590 int i; 3591 3592 lockdep_assert_held(&mvm->mutex); 3593 3594 /* Block/unblock all the stations of the given mvmvif */ 3595 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 3596 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 3597 lockdep_is_held(&mvm->mutex)); 3598 if (IS_ERR_OR_NULL(sta)) 3599 continue; 3600 3601 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3602 if (mvm_sta->mac_id_n_color != 3603 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) 3604 continue; 3605 3606 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); 3607 } 3608 3609 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 3610 return; 3611 3612 /* Need to block/unblock also multicast station */ 3613 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) 3614 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 3615 &mvmvif->mcast_sta, disable); 3616 3617 /* 3618 * Only unblock the broadcast station (FW blocks it for immediate 3619 * quiet, not the driver) 3620 */ 3621 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) 3622 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 3623 &mvmvif->bcast_sta, disable); 3624 } 3625 3626 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 3627 { 3628 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3629 struct iwl_mvm_sta *mvmsta; 3630 3631 rcu_read_lock(); 3632 3633 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); 3634 3635 if (!WARN_ON(!mvmsta)) 3636 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); 3637 3638 rcu_read_unlock(); 3639 } 3640