1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 25 * USA 26 * 27 * The full GNU General Public License is included in this distribution 28 * in the file called COPYING. 29 * 30 * Contact Information: 31 * Intel Linux Wireless <linuxwifi@intel.com> 32 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 33 * 34 * BSD LICENSE 35 * 36 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 39 * Copyright(c) 2018 Intel Corporation 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 46 * * Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * * Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in 50 * the documentation and/or other materials provided with the 51 * distribution. 52 * * Neither the name Intel Corporation nor the names of its 53 * contributors may be used to endorse or promote products derived 54 * from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 57 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 58 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 59 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 60 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 62 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 66 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 * 68 *****************************************************************************/ 69 #include <net/mac80211.h> 70 71 #include "mvm.h" 72 #include "sta.h" 73 #include "rs.h" 74 75 /* 76 * New version of ADD_STA_sta command added new fields at the end of the 77 * structure, so sending the size of the relevant API's structure is enough to 78 * support both API versions. 79 */ 80 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) 81 { 82 if (iwl_mvm_has_new_rx_api(mvm) || 83 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 84 return sizeof(struct iwl_mvm_add_sta_cmd); 85 else 86 return sizeof(struct iwl_mvm_add_sta_cmd_v7); 87 } 88 89 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, 90 enum nl80211_iftype iftype) 91 { 92 int sta_id; 93 u32 reserved_ids = 0; 94 95 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); 96 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 97 98 lockdep_assert_held(&mvm->mutex); 99 100 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ 101 if (iftype != NL80211_IFTYPE_STATION) 102 reserved_ids = BIT(0); 103 104 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ 105 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) { 106 if (BIT(sta_id) & reserved_ids) 107 continue; 108 109 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 110 lockdep_is_held(&mvm->mutex))) 111 return sta_id; 112 } 113 return IWL_MVM_INVALID_STA; 114 } 115 116 /* send station add/update command to firmware */ 117 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 118 bool update, unsigned int flags) 119 { 120 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 121 struct iwl_mvm_add_sta_cmd add_sta_cmd = { 122 .sta_id = mvm_sta->sta_id, 123 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 124 .add_modify = update ? 1 : 0, 125 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | 126 STA_FLG_MIMO_EN_MSK | 127 STA_FLG_RTS_MIMO_PROT), 128 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), 129 }; 130 int ret; 131 u32 status; 132 u32 agg_size = 0, mpdu_dens = 0; 133 134 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 135 add_sta_cmd.station_type = mvm_sta->sta_type; 136 137 if (!update || (flags & STA_MODIFY_QUEUES)) { 138 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); 139 140 if (!iwl_mvm_has_new_tx_api(mvm)) { 141 add_sta_cmd.tfd_queue_msk = 142 cpu_to_le32(mvm_sta->tfd_queue_msk); 143 144 if (flags & STA_MODIFY_QUEUES) 145 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; 146 } else { 147 WARN_ON(flags & STA_MODIFY_QUEUES); 148 } 149 } 150 151 switch (sta->bandwidth) { 152 case IEEE80211_STA_RX_BW_160: 153 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); 154 /* fall through */ 155 case IEEE80211_STA_RX_BW_80: 156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); 157 /* fall through */ 158 case IEEE80211_STA_RX_BW_40: 159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); 160 /* fall through */ 161 case IEEE80211_STA_RX_BW_20: 162 if (sta->ht_cap.ht_supported) 163 add_sta_cmd.station_flags |= 164 cpu_to_le32(STA_FLG_FAT_EN_20MHZ); 165 break; 166 } 167 168 switch (sta->rx_nss) { 169 case 1: 170 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 171 break; 172 case 2: 173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); 174 break; 175 case 3 ... 8: 176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); 177 break; 178 } 179 180 switch (sta->smps_mode) { 181 case IEEE80211_SMPS_AUTOMATIC: 182 case IEEE80211_SMPS_NUM_MODES: 183 WARN_ON(1); 184 break; 185 case IEEE80211_SMPS_STATIC: 186 /* override NSS */ 187 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); 188 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 189 break; 190 case IEEE80211_SMPS_DYNAMIC: 191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); 192 break; 193 case IEEE80211_SMPS_OFF: 194 /* nothing */ 195 break; 196 } 197 198 if (sta->ht_cap.ht_supported) { 199 add_sta_cmd.station_flags_msk |= 200 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | 201 STA_FLG_AGG_MPDU_DENS_MSK); 202 203 mpdu_dens = sta->ht_cap.ampdu_density; 204 } 205 206 if (sta->vht_cap.vht_supported) { 207 agg_size = sta->vht_cap.cap & 208 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; 209 agg_size >>= 210 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 211 } else if (sta->ht_cap.ht_supported) { 212 agg_size = sta->ht_cap.ampdu_factor; 213 } 214 215 add_sta_cmd.station_flags |= 216 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); 217 add_sta_cmd.station_flags |= 218 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 219 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) 220 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); 221 222 if (sta->wme) { 223 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; 224 225 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 226 add_sta_cmd.uapsd_acs |= BIT(AC_BK); 227 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 228 add_sta_cmd.uapsd_acs |= BIT(AC_BE); 229 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 230 add_sta_cmd.uapsd_acs |= BIT(AC_VI); 231 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 232 add_sta_cmd.uapsd_acs |= BIT(AC_VO); 233 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; 234 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; 235 } 236 237 status = ADD_STA_SUCCESS; 238 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 239 iwl_mvm_add_sta_cmd_size(mvm), 240 &add_sta_cmd, &status); 241 if (ret) 242 return ret; 243 244 switch (status & IWL_ADD_STA_STATUS_MASK) { 245 case ADD_STA_SUCCESS: 246 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); 247 break; 248 default: 249 ret = -EIO; 250 IWL_ERR(mvm, "ADD_STA failed\n"); 251 break; 252 } 253 254 return ret; 255 } 256 257 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) 258 { 259 struct iwl_mvm_baid_data *data = 260 from_timer(data, t, session_timer); 261 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; 262 struct iwl_mvm_baid_data *ba_data; 263 struct ieee80211_sta *sta; 264 struct iwl_mvm_sta *mvm_sta; 265 unsigned long timeout; 266 267 rcu_read_lock(); 268 269 ba_data = rcu_dereference(*rcu_ptr); 270 271 if (WARN_ON(!ba_data)) 272 goto unlock; 273 274 if (!ba_data->timeout) 275 goto unlock; 276 277 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); 278 if (time_is_after_jiffies(timeout)) { 279 mod_timer(&ba_data->session_timer, timeout); 280 goto unlock; 281 } 282 283 /* Timer expired */ 284 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); 285 286 /* 287 * sta should be valid unless the following happens: 288 * The firmware asserts which triggers a reconfig flow, but 289 * the reconfig fails before we set the pointer to sta into 290 * the fw_id_to_mac_id pointer table. Mac80211 can't stop 291 * A-MDPU and hence the timer continues to run. Then, the 292 * timer expires and sta is NULL. 293 */ 294 if (!sta) 295 goto unlock; 296 297 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 298 ieee80211_rx_ba_timer_expired(mvm_sta->vif, 299 sta->addr, ba_data->tid); 300 unlock: 301 rcu_read_unlock(); 302 } 303 304 /* Disable aggregations for a bitmap of TIDs for a given station */ 305 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, 306 unsigned long disable_agg_tids, 307 bool remove_queue) 308 { 309 struct iwl_mvm_add_sta_cmd cmd = {}; 310 struct ieee80211_sta *sta; 311 struct iwl_mvm_sta *mvmsta; 312 u32 status; 313 u8 sta_id; 314 int ret; 315 316 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 317 return -EINVAL; 318 319 spin_lock_bh(&mvm->queue_info_lock); 320 sta_id = mvm->queue_info[queue].ra_sta_id; 321 spin_unlock_bh(&mvm->queue_info_lock); 322 323 rcu_read_lock(); 324 325 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 326 327 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 328 rcu_read_unlock(); 329 return -EINVAL; 330 } 331 332 mvmsta = iwl_mvm_sta_from_mac80211(sta); 333 334 mvmsta->tid_disable_agg |= disable_agg_tids; 335 336 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 337 cmd.sta_id = mvmsta->sta_id; 338 cmd.add_modify = STA_MODE_MODIFY; 339 cmd.modify_mask = STA_MODIFY_QUEUES; 340 if (disable_agg_tids) 341 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 342 if (remove_queue) 343 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; 344 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 345 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 346 347 rcu_read_unlock(); 348 349 /* Notify FW of queue removal from the STA queues */ 350 status = ADD_STA_SUCCESS; 351 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 352 iwl_mvm_add_sta_cmd_size(mvm), 353 &cmd, &status); 354 355 return ret; 356 } 357 358 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 359 { 360 struct ieee80211_sta *sta; 361 struct iwl_mvm_sta *mvmsta; 362 unsigned long tid_bitmap; 363 unsigned long agg_tids = 0; 364 u8 sta_id; 365 int tid; 366 367 lockdep_assert_held(&mvm->mutex); 368 369 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 370 return -EINVAL; 371 372 spin_lock_bh(&mvm->queue_info_lock); 373 sta_id = mvm->queue_info[queue].ra_sta_id; 374 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 375 spin_unlock_bh(&mvm->queue_info_lock); 376 377 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 378 lockdep_is_held(&mvm->mutex)); 379 380 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 381 return -EINVAL; 382 383 mvmsta = iwl_mvm_sta_from_mac80211(sta); 384 385 spin_lock_bh(&mvmsta->lock); 386 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 387 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 388 agg_tids |= BIT(tid); 389 } 390 spin_unlock_bh(&mvmsta->lock); 391 392 return agg_tids; 393 } 394 395 /* 396 * Remove a queue from a station's resources. 397 * Note that this only marks as free. It DOESN'T delete a BA agreement, and 398 * doesn't disable the queue 399 */ 400 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) 401 { 402 struct ieee80211_sta *sta; 403 struct iwl_mvm_sta *mvmsta; 404 unsigned long tid_bitmap; 405 unsigned long disable_agg_tids = 0; 406 u8 sta_id; 407 int tid; 408 409 lockdep_assert_held(&mvm->mutex); 410 411 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 412 return -EINVAL; 413 414 spin_lock_bh(&mvm->queue_info_lock); 415 sta_id = mvm->queue_info[queue].ra_sta_id; 416 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 417 spin_unlock_bh(&mvm->queue_info_lock); 418 419 rcu_read_lock(); 420 421 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 422 423 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 424 rcu_read_unlock(); 425 return 0; 426 } 427 428 mvmsta = iwl_mvm_sta_from_mac80211(sta); 429 430 spin_lock_bh(&mvmsta->lock); 431 /* Unmap MAC queues and TIDs from this queue */ 432 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 433 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 434 disable_agg_tids |= BIT(tid); 435 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 436 } 437 438 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ 439 spin_unlock_bh(&mvmsta->lock); 440 441 rcu_read_unlock(); 442 443 return disable_agg_tids; 444 } 445 446 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, 447 bool same_sta) 448 { 449 struct iwl_mvm_sta *mvmsta; 450 u8 txq_curr_ac, sta_id, tid; 451 unsigned long disable_agg_tids = 0; 452 int ret; 453 454 lockdep_assert_held(&mvm->mutex); 455 456 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 457 return -EINVAL; 458 459 spin_lock_bh(&mvm->queue_info_lock); 460 txq_curr_ac = mvm->queue_info[queue].mac80211_ac; 461 sta_id = mvm->queue_info[queue].ra_sta_id; 462 tid = mvm->queue_info[queue].txq_tid; 463 spin_unlock_bh(&mvm->queue_info_lock); 464 465 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); 466 if (WARN_ON(!mvmsta)) 467 return -EINVAL; 468 469 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); 470 /* Disable the queue */ 471 if (disable_agg_tids) 472 iwl_mvm_invalidate_sta_queue(mvm, queue, 473 disable_agg_tids, false); 474 475 ret = iwl_mvm_disable_txq(mvm, queue, 476 mvmsta->vif->hw_queue[txq_curr_ac], 477 tid, 0); 478 if (ret) { 479 /* Re-mark the inactive queue as inactive */ 480 spin_lock_bh(&mvm->queue_info_lock); 481 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; 482 spin_unlock_bh(&mvm->queue_info_lock); 483 IWL_ERR(mvm, 484 "Failed to free inactive queue %d (ret=%d)\n", 485 queue, ret); 486 487 return ret; 488 } 489 490 /* If TXQ is allocated to another STA, update removal in FW */ 491 if (!same_sta) 492 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); 493 494 return 0; 495 } 496 497 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, 498 unsigned long tfd_queue_mask, u8 ac) 499 { 500 int queue = 0; 501 u8 ac_to_queue[IEEE80211_NUM_ACS]; 502 int i; 503 504 lockdep_assert_held(&mvm->queue_info_lock); 505 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 506 return -EINVAL; 507 508 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); 509 510 /* See what ACs the existing queues for this STA have */ 511 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { 512 /* Only DATA queues can be shared */ 513 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && 514 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 515 continue; 516 517 /* Don't try and take queues being reconfigured */ 518 if (mvm->queue_info[queue].status == 519 IWL_MVM_QUEUE_RECONFIGURING) 520 continue; 521 522 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 523 } 524 525 /* 526 * The queue to share is chosen only from DATA queues as follows (in 527 * descending priority): 528 * 1. An AC_BE queue 529 * 2. Same AC queue 530 * 3. Highest AC queue that is lower than new AC 531 * 4. Any existing AC (there always is at least 1 DATA queue) 532 */ 533 534 /* Priority 1: An AC_BE queue */ 535 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) 536 queue = ac_to_queue[IEEE80211_AC_BE]; 537 /* Priority 2: Same AC queue */ 538 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) 539 queue = ac_to_queue[ac]; 540 /* Priority 3a: If new AC is VO and VI exists - use VI */ 541 else if (ac == IEEE80211_AC_VO && 542 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 543 queue = ac_to_queue[IEEE80211_AC_VI]; 544 /* Priority 3b: No BE so only AC less than the new one is BK */ 545 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) 546 queue = ac_to_queue[IEEE80211_AC_BK]; 547 /* Priority 4a: No BE nor BK - use VI if exists */ 548 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 549 queue = ac_to_queue[IEEE80211_AC_VI]; 550 /* Priority 4b: No BE, BK nor VI - use VO if exists */ 551 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) 552 queue = ac_to_queue[IEEE80211_AC_VO]; 553 554 /* Make sure queue found (or not) is legal */ 555 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && 556 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && 557 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { 558 IWL_ERR(mvm, "No DATA queues available to share\n"); 559 return -ENOSPC; 560 } 561 562 /* Make sure the queue isn't in the middle of being reconfigured */ 563 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { 564 IWL_ERR(mvm, 565 "TXQ %d is in the middle of re-config - try again\n", 566 queue); 567 return -EBUSY; 568 } 569 570 return queue; 571 } 572 573 /* 574 * If a given queue has a higher AC than the TID stream that is being compared 575 * to, the queue needs to be redirected to the lower AC. This function does that 576 * in such a case, otherwise - if no redirection required - it does nothing, 577 * unless the %force param is true. 578 */ 579 int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 580 int ac, int ssn, unsigned int wdg_timeout, 581 bool force) 582 { 583 struct iwl_scd_txq_cfg_cmd cmd = { 584 .scd_queue = queue, 585 .action = SCD_CFG_DISABLE_QUEUE, 586 }; 587 bool shared_queue; 588 unsigned long mq; 589 int ret; 590 591 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 592 return -EINVAL; 593 594 /* 595 * If the AC is lower than current one - FIFO needs to be redirected to 596 * the lowest one of the streams in the queue. Check if this is needed 597 * here. 598 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with 599 * value 3 and VO with value 0, so to check if ac X is lower than ac Y 600 * we need to check if the numerical value of X is LARGER than of Y. 601 */ 602 spin_lock_bh(&mvm->queue_info_lock); 603 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { 604 spin_unlock_bh(&mvm->queue_info_lock); 605 606 IWL_DEBUG_TX_QUEUES(mvm, 607 "No redirection needed on TXQ #%d\n", 608 queue); 609 return 0; 610 } 611 612 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 613 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 614 cmd.tid = mvm->queue_info[queue].txq_tid; 615 mq = mvm->hw_queue_to_mac80211[queue]; 616 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); 617 spin_unlock_bh(&mvm->queue_info_lock); 618 619 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", 620 queue, iwl_mvm_ac_to_tx_fifo[ac]); 621 622 /* Stop MAC queues and wait for this queue to empty */ 623 iwl_mvm_stop_mac_queues(mvm, mq); 624 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); 625 if (ret) { 626 IWL_ERR(mvm, "Error draining queue %d before reconfig\n", 627 queue); 628 ret = -EIO; 629 goto out; 630 } 631 632 /* Before redirecting the queue we need to de-activate it */ 633 iwl_trans_txq_disable(mvm->trans, queue, false); 634 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 635 if (ret) 636 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, 637 ret); 638 639 /* Make sure the SCD wrptr is correctly set before reconfiguring */ 640 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); 641 642 /* Update the TID "owner" of the queue */ 643 spin_lock_bh(&mvm->queue_info_lock); 644 mvm->queue_info[queue].txq_tid = tid; 645 spin_unlock_bh(&mvm->queue_info_lock); 646 647 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ 648 649 /* Redirect to lower AC */ 650 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], 651 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); 652 653 /* Update AC marking of the queue */ 654 spin_lock_bh(&mvm->queue_info_lock); 655 mvm->queue_info[queue].mac80211_ac = ac; 656 spin_unlock_bh(&mvm->queue_info_lock); 657 658 /* 659 * Mark queue as shared in transport if shared 660 * Note this has to be done after queue enablement because enablement 661 * can also set this value, and there is no indication there to shared 662 * queues 663 */ 664 if (shared_queue) 665 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 666 667 out: 668 /* Continue using the MAC queues */ 669 iwl_mvm_start_mac_queues(mvm, mq); 670 671 return ret; 672 } 673 674 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, 675 struct ieee80211_sta *sta, u8 ac, 676 int tid) 677 { 678 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 679 unsigned int wdg_timeout = 680 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 681 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 682 int queue = -1; 683 684 lockdep_assert_held(&mvm->mutex); 685 686 IWL_DEBUG_TX_QUEUES(mvm, 687 "Allocating queue for sta %d on tid %d\n", 688 mvmsta->sta_id, tid); 689 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid, 690 wdg_timeout); 691 if (queue < 0) 692 return queue; 693 694 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); 695 696 spin_lock_bh(&mvmsta->lock); 697 mvmsta->tid_data[tid].txq_id = queue; 698 mvmsta->tid_data[tid].is_tid_active = true; 699 spin_unlock_bh(&mvmsta->lock); 700 701 return 0; 702 } 703 704 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 705 struct ieee80211_sta *sta, u8 ac, int tid, 706 struct ieee80211_hdr *hdr) 707 { 708 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 709 struct iwl_trans_txq_scd_cfg cfg = { 710 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 711 .sta_id = mvmsta->sta_id, 712 .tid = tid, 713 .frame_limit = IWL_FRAME_LIMIT, 714 }; 715 unsigned int wdg_timeout = 716 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 717 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 718 int queue = -1; 719 bool using_inactive_queue = false, same_sta = false; 720 unsigned long disable_agg_tids = 0; 721 enum iwl_mvm_agg_state queue_state; 722 bool shared_queue = false, inc_ssn; 723 int ssn; 724 unsigned long tfd_queue_mask; 725 int ret; 726 727 lockdep_assert_held(&mvm->mutex); 728 729 if (iwl_mvm_has_new_tx_api(mvm)) 730 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 731 732 spin_lock_bh(&mvmsta->lock); 733 tfd_queue_mask = mvmsta->tfd_queue_msk; 734 spin_unlock_bh(&mvmsta->lock); 735 736 spin_lock_bh(&mvm->queue_info_lock); 737 738 /* 739 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one 740 * exists 741 */ 742 if (!ieee80211_is_data_qos(hdr->frame_control) || 743 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 744 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 745 IWL_MVM_DQA_MIN_MGMT_QUEUE, 746 IWL_MVM_DQA_MAX_MGMT_QUEUE); 747 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 748 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 749 queue); 750 751 /* If no such queue is found, we'll use a DATA queue instead */ 752 } 753 754 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 755 (mvm->queue_info[mvmsta->reserved_queue].status == 756 IWL_MVM_QUEUE_RESERVED || 757 mvm->queue_info[mvmsta->reserved_queue].status == 758 IWL_MVM_QUEUE_INACTIVE)) { 759 queue = mvmsta->reserved_queue; 760 mvm->queue_info[queue].reserved = true; 761 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 762 } 763 764 if (queue < 0) 765 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 766 IWL_MVM_DQA_MIN_DATA_QUEUE, 767 IWL_MVM_DQA_MAX_DATA_QUEUE); 768 769 /* 770 * Check if this queue is already allocated but inactive. 771 * In such a case, we'll need to first free this queue before enabling 772 * it again, so we'll mark it as reserved to make sure no new traffic 773 * arrives on it 774 */ 775 if (queue > 0 && 776 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { 777 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 778 using_inactive_queue = true; 779 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; 780 IWL_DEBUG_TX_QUEUES(mvm, 781 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", 782 queue, mvmsta->sta_id, tid); 783 } 784 785 /* No free queue - we'll have to share */ 786 if (queue <= 0) { 787 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 788 if (queue > 0) { 789 shared_queue = true; 790 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 791 } 792 } 793 794 /* 795 * Mark TXQ as ready, even though it hasn't been fully configured yet, 796 * to make sure no one else takes it. 797 * This will allow avoiding re-acquiring the lock at the end of the 798 * configuration. On error we'll mark it back as free. 799 */ 800 if ((queue > 0) && !shared_queue) 801 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 802 803 spin_unlock_bh(&mvm->queue_info_lock); 804 805 /* This shouldn't happen - out of queues */ 806 if (WARN_ON(queue <= 0)) { 807 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 808 tid, cfg.sta_id); 809 return queue; 810 } 811 812 /* 813 * Actual en/disablement of aggregations is through the ADD_STA HCMD, 814 * but for configuring the SCD to send A-MPDUs we need to mark the queue 815 * as aggregatable. 816 * Mark all DATA queues as allowing to be aggregated at some point 817 */ 818 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 819 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 820 821 /* 822 * If this queue was previously inactive (idle) - we need to free it 823 * first 824 */ 825 if (using_inactive_queue) { 826 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); 827 if (ret) 828 return ret; 829 } 830 831 IWL_DEBUG_TX_QUEUES(mvm, 832 "Allocating %squeue #%d to sta %d on tid %d\n", 833 shared_queue ? "shared " : "", queue, 834 mvmsta->sta_id, tid); 835 836 if (shared_queue) { 837 /* Disable any open aggs on this queue */ 838 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 839 840 if (disable_agg_tids) { 841 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 842 queue); 843 iwl_mvm_invalidate_sta_queue(mvm, queue, 844 disable_agg_tids, false); 845 } 846 } 847 848 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 849 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue, 850 ssn, &cfg, wdg_timeout); 851 if (inc_ssn) { 852 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; 853 le16_add_cpu(&hdr->seq_ctrl, 0x10); 854 } 855 856 /* 857 * Mark queue as shared in transport if shared 858 * Note this has to be done after queue enablement because enablement 859 * can also set this value, and there is no indication there to shared 860 * queues 861 */ 862 if (shared_queue) 863 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 864 865 spin_lock_bh(&mvmsta->lock); 866 /* 867 * This looks racy, but it is not. We have only one packet for 868 * this ra/tid in our Tx path since we stop the Qdisc when we 869 * need to allocate a new TFD queue. 870 */ 871 if (inc_ssn) 872 mvmsta->tid_data[tid].seq_number += 0x10; 873 mvmsta->tid_data[tid].txq_id = queue; 874 mvmsta->tid_data[tid].is_tid_active = true; 875 mvmsta->tfd_queue_msk |= BIT(queue); 876 queue_state = mvmsta->tid_data[tid].state; 877 878 if (mvmsta->reserved_queue == queue) 879 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 880 spin_unlock_bh(&mvmsta->lock); 881 882 if (!shared_queue) { 883 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 884 if (ret) 885 goto out_err; 886 887 /* If we need to re-enable aggregations... */ 888 if (queue_state == IWL_AGG_ON) { 889 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 890 if (ret) 891 goto out_err; 892 } 893 } else { 894 /* Redirect queue, if needed */ 895 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, 896 wdg_timeout, false); 897 if (ret) 898 goto out_err; 899 } 900 901 return 0; 902 903 out_err: 904 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); 905 906 return ret; 907 } 908 909 static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) 910 { 911 struct iwl_scd_txq_cfg_cmd cmd = { 912 .scd_queue = queue, 913 .action = SCD_CFG_UPDATE_QUEUE_TID, 914 }; 915 int tid; 916 unsigned long tid_bitmap; 917 int ret; 918 919 lockdep_assert_held(&mvm->mutex); 920 921 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 922 return; 923 924 spin_lock_bh(&mvm->queue_info_lock); 925 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 926 spin_unlock_bh(&mvm->queue_info_lock); 927 928 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) 929 return; 930 931 /* Find any TID for queue */ 932 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 933 cmd.tid = tid; 934 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 935 936 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 937 if (ret) { 938 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", 939 queue, ret); 940 return; 941 } 942 943 spin_lock_bh(&mvm->queue_info_lock); 944 mvm->queue_info[queue].txq_tid = tid; 945 spin_unlock_bh(&mvm->queue_info_lock); 946 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", 947 queue, tid); 948 } 949 950 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) 951 { 952 struct ieee80211_sta *sta; 953 struct iwl_mvm_sta *mvmsta; 954 u8 sta_id; 955 int tid = -1; 956 unsigned long tid_bitmap; 957 unsigned int wdg_timeout; 958 int ssn; 959 int ret = true; 960 961 /* queue sharing is disabled on new TX path */ 962 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 963 return; 964 965 lockdep_assert_held(&mvm->mutex); 966 967 spin_lock_bh(&mvm->queue_info_lock); 968 sta_id = mvm->queue_info[queue].ra_sta_id; 969 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 970 spin_unlock_bh(&mvm->queue_info_lock); 971 972 /* Find TID for queue, and make sure it is the only one on the queue */ 973 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 974 if (tid_bitmap != BIT(tid)) { 975 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", 976 queue, tid_bitmap); 977 return; 978 } 979 980 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, 981 tid); 982 983 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 984 lockdep_is_held(&mvm->mutex)); 985 986 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 987 return; 988 989 mvmsta = iwl_mvm_sta_from_mac80211(sta); 990 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 991 992 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 993 994 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, 995 tid_to_mac80211_ac[tid], ssn, 996 wdg_timeout, true); 997 if (ret) { 998 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); 999 return; 1000 } 1001 1002 /* If aggs should be turned back on - do it */ 1003 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { 1004 struct iwl_mvm_add_sta_cmd cmd = {0}; 1005 1006 mvmsta->tid_disable_agg &= ~BIT(tid); 1007 1008 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1009 cmd.sta_id = mvmsta->sta_id; 1010 cmd.add_modify = STA_MODE_MODIFY; 1011 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; 1012 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 1013 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 1014 1015 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 1016 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 1017 if (!ret) { 1018 IWL_DEBUG_TX_QUEUES(mvm, 1019 "TXQ #%d is now aggregated again\n", 1020 queue); 1021 1022 /* Mark queue intenally as aggregating again */ 1023 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); 1024 } 1025 } 1026 1027 spin_lock_bh(&mvm->queue_info_lock); 1028 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1029 spin_unlock_bh(&mvm->queue_info_lock); 1030 } 1031 1032 static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 1033 { 1034 if (tid == IWL_MAX_TID_COUNT) 1035 return IEEE80211_AC_VO; /* MGMT */ 1036 1037 return tid_to_mac80211_ac[tid]; 1038 } 1039 1040 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, 1041 struct ieee80211_sta *sta, int tid) 1042 { 1043 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1044 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 1045 struct sk_buff *skb; 1046 struct ieee80211_hdr *hdr; 1047 struct sk_buff_head deferred_tx; 1048 u8 mac_queue; 1049 bool no_queue = false; /* Marks if there is a problem with the queue */ 1050 u8 ac; 1051 1052 lockdep_assert_held(&mvm->mutex); 1053 1054 skb = skb_peek(&tid_data->deferred_tx_frames); 1055 if (!skb) 1056 return; 1057 hdr = (void *)skb->data; 1058 1059 ac = iwl_mvm_tid_to_ac_queue(tid); 1060 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; 1061 1062 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE && 1063 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { 1064 IWL_ERR(mvm, 1065 "Can't alloc TXQ for sta %d tid %d - dropping frame\n", 1066 mvmsta->sta_id, tid); 1067 1068 /* 1069 * Mark queue as problematic so later the deferred traffic is 1070 * freed, as we can do nothing with it 1071 */ 1072 no_queue = true; 1073 } 1074 1075 __skb_queue_head_init(&deferred_tx); 1076 1077 /* Disable bottom-halves when entering TX path */ 1078 local_bh_disable(); 1079 spin_lock(&mvmsta->lock); 1080 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); 1081 mvmsta->deferred_traffic_tid_map &= ~BIT(tid); 1082 spin_unlock(&mvmsta->lock); 1083 1084 while ((skb = __skb_dequeue(&deferred_tx))) 1085 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) 1086 ieee80211_free_txskb(mvm->hw, skb); 1087 local_bh_enable(); 1088 1089 /* Wake queue */ 1090 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); 1091 } 1092 1093 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) 1094 { 1095 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, 1096 add_stream_wk); 1097 struct ieee80211_sta *sta; 1098 struct iwl_mvm_sta *mvmsta; 1099 unsigned long deferred_tid_traffic; 1100 int queue, sta_id, tid; 1101 1102 /* Check inactivity of queues */ 1103 iwl_mvm_inactivity_check(mvm); 1104 1105 mutex_lock(&mvm->mutex); 1106 1107 /* No queue reconfiguration in TVQM mode */ 1108 if (iwl_mvm_has_new_tx_api(mvm)) 1109 goto alloc_queues; 1110 1111 /* Reconfigure queues requiring reconfiguation */ 1112 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) { 1113 bool reconfig; 1114 bool change_owner; 1115 1116 spin_lock_bh(&mvm->queue_info_lock); 1117 reconfig = (mvm->queue_info[queue].status == 1118 IWL_MVM_QUEUE_RECONFIGURING); 1119 1120 /* 1121 * We need to take into account a situation in which a TXQ was 1122 * allocated to TID x, and then turned shared by adding TIDs y 1123 * and z. If TID x becomes inactive and is removed from the TXQ, 1124 * ownership must be given to one of the remaining TIDs. 1125 * This is mainly because if TID x continues - a new queue can't 1126 * be allocated for it as long as it is an owner of another TXQ. 1127 */ 1128 change_owner = !(mvm->queue_info[queue].tid_bitmap & 1129 BIT(mvm->queue_info[queue].txq_tid)) && 1130 (mvm->queue_info[queue].status == 1131 IWL_MVM_QUEUE_SHARED); 1132 spin_unlock_bh(&mvm->queue_info_lock); 1133 1134 if (reconfig) 1135 iwl_mvm_unshare_queue(mvm, queue); 1136 else if (change_owner) 1137 iwl_mvm_change_queue_owner(mvm, queue); 1138 } 1139 1140 alloc_queues: 1141 /* Go over all stations with deferred traffic */ 1142 for_each_set_bit(sta_id, mvm->sta_deferred_frames, 1143 IWL_MVM_STATION_COUNT) { 1144 clear_bit(sta_id, mvm->sta_deferred_frames); 1145 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1146 lockdep_is_held(&mvm->mutex)); 1147 if (IS_ERR_OR_NULL(sta)) 1148 continue; 1149 1150 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1151 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; 1152 1153 for_each_set_bit(tid, &deferred_tid_traffic, 1154 IWL_MAX_TID_COUNT + 1) 1155 iwl_mvm_tx_deferred_stream(mvm, sta, tid); 1156 } 1157 1158 mutex_unlock(&mvm->mutex); 1159 } 1160 1161 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, 1162 struct ieee80211_sta *sta, 1163 enum nl80211_iftype vif_type) 1164 { 1165 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1166 int queue; 1167 bool using_inactive_queue = false, same_sta = false; 1168 1169 /* queue reserving is disabled on new TX path */ 1170 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1171 return 0; 1172 1173 /* 1174 * Check for inactive queues, so we don't reach a situation where we 1175 * can't add a STA due to a shortage in queues that doesn't really exist 1176 */ 1177 iwl_mvm_inactivity_check(mvm); 1178 1179 spin_lock_bh(&mvm->queue_info_lock); 1180 1181 /* Make sure we have free resources for this STA */ 1182 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 1183 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && 1184 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 1185 IWL_MVM_QUEUE_FREE)) 1186 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 1187 else 1188 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1189 IWL_MVM_DQA_MIN_DATA_QUEUE, 1190 IWL_MVM_DQA_MAX_DATA_QUEUE); 1191 if (queue < 0) { 1192 spin_unlock_bh(&mvm->queue_info_lock); 1193 IWL_ERR(mvm, "No available queues for new station\n"); 1194 return -ENOSPC; 1195 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { 1196 /* 1197 * If this queue is already allocated but inactive we'll need to 1198 * first free this queue before enabling it again, we'll mark 1199 * it as reserved to make sure no new traffic arrives on it 1200 */ 1201 using_inactive_queue = true; 1202 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; 1203 } 1204 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 1205 1206 spin_unlock_bh(&mvm->queue_info_lock); 1207 1208 mvmsta->reserved_queue = queue; 1209 1210 if (using_inactive_queue) 1211 iwl_mvm_free_inactive_queue(mvm, queue, same_sta); 1212 1213 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 1214 queue, mvmsta->sta_id); 1215 1216 return 0; 1217 } 1218 1219 /* 1220 * In DQA mode, after a HW restart the queues should be allocated as before, in 1221 * order to avoid race conditions when there are shared queues. This function 1222 * does the re-mapping and queue allocation. 1223 * 1224 * Note that re-enabling aggregations isn't done in this function. 1225 */ 1226 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, 1227 struct iwl_mvm_sta *mvm_sta) 1228 { 1229 unsigned int wdg_timeout = 1230 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); 1231 int i; 1232 struct iwl_trans_txq_scd_cfg cfg = { 1233 .sta_id = mvm_sta->sta_id, 1234 .frame_limit = IWL_FRAME_LIMIT, 1235 }; 1236 1237 /* Make sure reserved queue is still marked as such (if allocated) */ 1238 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) 1239 mvm->queue_info[mvm_sta->reserved_queue].status = 1240 IWL_MVM_QUEUE_RESERVED; 1241 1242 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1243 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1244 int txq_id = tid_data->txq_id; 1245 int ac; 1246 u8 mac_queue; 1247 1248 if (txq_id == IWL_MVM_INVALID_QUEUE) 1249 continue; 1250 1251 skb_queue_head_init(&tid_data->deferred_tx_frames); 1252 1253 ac = tid_to_mac80211_ac[i]; 1254 mac_queue = mvm_sta->vif->hw_queue[ac]; 1255 1256 if (iwl_mvm_has_new_tx_api(mvm)) { 1257 IWL_DEBUG_TX_QUEUES(mvm, 1258 "Re-mapping sta %d tid %d\n", 1259 mvm_sta->sta_id, i); 1260 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, 1261 mvm_sta->sta_id, 1262 i, wdg_timeout); 1263 tid_data->txq_id = txq_id; 1264 1265 /* 1266 * Since we don't set the seq number after reset, and HW 1267 * sets it now, FW reset will cause the seq num to start 1268 * at 0 again, so driver will need to update it 1269 * internally as well, so it keeps in sync with real val 1270 */ 1271 tid_data->seq_number = 0; 1272 } else { 1273 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 1274 1275 cfg.tid = i; 1276 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); 1277 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1278 txq_id == 1279 IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1280 1281 IWL_DEBUG_TX_QUEUES(mvm, 1282 "Re-mapping sta %d tid %d to queue %d\n", 1283 mvm_sta->sta_id, i, txq_id); 1284 1285 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, 1286 wdg_timeout); 1287 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; 1288 } 1289 } 1290 } 1291 1292 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, 1293 struct iwl_mvm_int_sta *sta, 1294 const u8 *addr, 1295 u16 mac_id, u16 color) 1296 { 1297 struct iwl_mvm_add_sta_cmd cmd; 1298 int ret; 1299 u32 status = ADD_STA_SUCCESS; 1300 1301 lockdep_assert_held(&mvm->mutex); 1302 1303 memset(&cmd, 0, sizeof(cmd)); 1304 cmd.sta_id = sta->sta_id; 1305 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 1306 color)); 1307 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 1308 cmd.station_type = sta->type; 1309 1310 if (!iwl_mvm_has_new_tx_api(mvm)) 1311 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); 1312 cmd.tid_disable_tx = cpu_to_le16(0xffff); 1313 1314 if (addr) 1315 memcpy(cmd.addr, addr, ETH_ALEN); 1316 1317 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1318 iwl_mvm_add_sta_cmd_size(mvm), 1319 &cmd, &status); 1320 if (ret) 1321 return ret; 1322 1323 switch (status & IWL_ADD_STA_STATUS_MASK) { 1324 case ADD_STA_SUCCESS: 1325 IWL_DEBUG_INFO(mvm, "Internal station added.\n"); 1326 return 0; 1327 default: 1328 ret = -EIO; 1329 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", 1330 status); 1331 break; 1332 } 1333 return ret; 1334 } 1335 1336 int iwl_mvm_add_sta(struct iwl_mvm *mvm, 1337 struct ieee80211_vif *vif, 1338 struct ieee80211_sta *sta) 1339 { 1340 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1341 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1342 struct iwl_mvm_rxq_dup_data *dup_data; 1343 int i, ret, sta_id; 1344 bool sta_update = false; 1345 unsigned int sta_flags = 0; 1346 1347 lockdep_assert_held(&mvm->mutex); 1348 1349 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1350 sta_id = iwl_mvm_find_free_sta_id(mvm, 1351 ieee80211_vif_type_p2p(vif)); 1352 else 1353 sta_id = mvm_sta->sta_id; 1354 1355 if (sta_id == IWL_MVM_INVALID_STA) 1356 return -ENOSPC; 1357 1358 spin_lock_init(&mvm_sta->lock); 1359 1360 /* if this is a HW restart re-alloc existing queues */ 1361 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1362 struct iwl_mvm_int_sta tmp_sta = { 1363 .sta_id = sta_id, 1364 .type = mvm_sta->sta_type, 1365 }; 1366 1367 /* 1368 * First add an empty station since allocating 1369 * a queue requires a valid station 1370 */ 1371 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, 1372 mvmvif->id, mvmvif->color); 1373 if (ret) 1374 goto err; 1375 1376 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); 1377 sta_update = true; 1378 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; 1379 goto update_fw; 1380 } 1381 1382 mvm_sta->sta_id = sta_id; 1383 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, 1384 mvmvif->color); 1385 mvm_sta->vif = vif; 1386 if (!mvm->trans->cfg->gen2) 1387 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 1388 else 1389 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; 1390 mvm_sta->tx_protection = 0; 1391 mvm_sta->tt_tx_protection = false; 1392 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; 1393 1394 /* HW restart, don't assume the memory has been zeroed */ 1395 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ 1396 mvm_sta->tfd_queue_msk = 0; 1397 1398 /* for HW restart - reset everything but the sequence number */ 1399 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1400 u16 seq = mvm_sta->tid_data[i].seq_number; 1401 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); 1402 mvm_sta->tid_data[i].seq_number = seq; 1403 1404 /* 1405 * Mark all queues for this STA as unallocated and defer TX 1406 * frames until the queue is allocated 1407 */ 1408 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1409 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); 1410 } 1411 mvm_sta->deferred_traffic_tid_map = 0; 1412 mvm_sta->agg_tids = 0; 1413 1414 if (iwl_mvm_has_new_rx_api(mvm) && 1415 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1416 int q; 1417 1418 dup_data = kcalloc(mvm->trans->num_rx_queues, 1419 sizeof(*dup_data), GFP_KERNEL); 1420 if (!dup_data) 1421 return -ENOMEM; 1422 /* 1423 * Initialize all the last_seq values to 0xffff which can never 1424 * compare equal to the frame's seq_ctrl in the check in 1425 * iwl_mvm_is_dup() since the lower 4 bits are the fragment 1426 * number and fragmented packets don't reach that function. 1427 * 1428 * This thus allows receiving a packet with seqno 0 and the 1429 * retry bit set as the very first packet on a new TID. 1430 */ 1431 for (q = 0; q < mvm->trans->num_rx_queues; q++) 1432 memset(dup_data[q].last_seq, 0xff, 1433 sizeof(dup_data[q].last_seq)); 1434 mvm_sta->dup_data = dup_data; 1435 } 1436 1437 if (!iwl_mvm_has_new_tx_api(mvm)) { 1438 ret = iwl_mvm_reserve_sta_stream(mvm, sta, 1439 ieee80211_vif_type_p2p(vif)); 1440 if (ret) 1441 goto err; 1442 } 1443 1444 /* 1445 * if rs is registered with mac80211, then "add station" will be handled 1446 * via the corresponding ops, otherwise need to notify rate scaling here 1447 */ 1448 if (iwl_mvm_has_tlc_offload(mvm)) 1449 iwl_mvm_rs_add_sta(mvm, mvm_sta); 1450 1451 update_fw: 1452 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); 1453 if (ret) 1454 goto err; 1455 1456 if (vif->type == NL80211_IFTYPE_STATION) { 1457 if (!sta->tdls) { 1458 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); 1459 mvmvif->ap_sta_id = sta_id; 1460 } else { 1461 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); 1462 } 1463 } 1464 1465 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); 1466 1467 return 0; 1468 1469 err: 1470 return ret; 1471 } 1472 1473 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 1474 bool drain) 1475 { 1476 struct iwl_mvm_add_sta_cmd cmd = {}; 1477 int ret; 1478 u32 status; 1479 1480 lockdep_assert_held(&mvm->mutex); 1481 1482 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1483 cmd.sta_id = mvmsta->sta_id; 1484 cmd.add_modify = STA_MODE_MODIFY; 1485 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; 1486 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 1487 1488 status = ADD_STA_SUCCESS; 1489 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1490 iwl_mvm_add_sta_cmd_size(mvm), 1491 &cmd, &status); 1492 if (ret) 1493 return ret; 1494 1495 switch (status & IWL_ADD_STA_STATUS_MASK) { 1496 case ADD_STA_SUCCESS: 1497 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", 1498 mvmsta->sta_id); 1499 break; 1500 default: 1501 ret = -EIO; 1502 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", 1503 mvmsta->sta_id); 1504 break; 1505 } 1506 1507 return ret; 1508 } 1509 1510 /* 1511 * Remove a station from the FW table. Before sending the command to remove 1512 * the station validate that the station is indeed known to the driver (sanity 1513 * only). 1514 */ 1515 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) 1516 { 1517 struct ieee80211_sta *sta; 1518 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { 1519 .sta_id = sta_id, 1520 }; 1521 int ret; 1522 1523 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1524 lockdep_is_held(&mvm->mutex)); 1525 1526 /* Note: internal stations are marked as error values */ 1527 if (!sta) { 1528 IWL_ERR(mvm, "Invalid station id\n"); 1529 return -EINVAL; 1530 } 1531 1532 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, 1533 sizeof(rm_sta_cmd), &rm_sta_cmd); 1534 if (ret) { 1535 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 1536 return ret; 1537 } 1538 1539 return 0; 1540 } 1541 1542 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, 1543 struct ieee80211_vif *vif, 1544 struct iwl_mvm_sta *mvm_sta) 1545 { 1546 int ac; 1547 int i; 1548 1549 lockdep_assert_held(&mvm->mutex); 1550 1551 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1552 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) 1553 continue; 1554 1555 ac = iwl_mvm_tid_to_ac_queue(i); 1556 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, 1557 vif->hw_queue[ac], i, 0); 1558 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1559 } 1560 } 1561 1562 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, 1563 struct iwl_mvm_sta *mvm_sta) 1564 { 1565 int i; 1566 1567 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1568 u16 txq_id; 1569 int ret; 1570 1571 spin_lock_bh(&mvm_sta->lock); 1572 txq_id = mvm_sta->tid_data[i].txq_id; 1573 spin_unlock_bh(&mvm_sta->lock); 1574 1575 if (txq_id == IWL_MVM_INVALID_QUEUE) 1576 continue; 1577 1578 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); 1579 if (ret) 1580 return ret; 1581 } 1582 1583 return 0; 1584 } 1585 1586 int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 1587 struct ieee80211_vif *vif, 1588 struct ieee80211_sta *sta) 1589 { 1590 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1591 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1592 u8 sta_id = mvm_sta->sta_id; 1593 int ret; 1594 1595 lockdep_assert_held(&mvm->mutex); 1596 1597 if (iwl_mvm_has_new_rx_api(mvm)) 1598 kfree(mvm_sta->dup_data); 1599 1600 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 1601 if (ret) 1602 return ret; 1603 1604 /* flush its queues here since we are freeing mvm_sta */ 1605 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); 1606 if (ret) 1607 return ret; 1608 if (iwl_mvm_has_new_tx_api(mvm)) { 1609 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); 1610 } else { 1611 u32 q_mask = mvm_sta->tfd_queue_msk; 1612 1613 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 1614 q_mask); 1615 } 1616 if (ret) 1617 return ret; 1618 1619 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); 1620 1621 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); 1622 1623 /* If there is a TXQ still marked as reserved - free it */ 1624 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { 1625 u8 reserved_txq = mvm_sta->reserved_queue; 1626 enum iwl_mvm_queue_status *status; 1627 1628 /* 1629 * If no traffic has gone through the reserved TXQ - it 1630 * is still marked as IWL_MVM_QUEUE_RESERVED, and 1631 * should be manually marked as free again 1632 */ 1633 spin_lock_bh(&mvm->queue_info_lock); 1634 status = &mvm->queue_info[reserved_txq].status; 1635 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && 1636 (*status != IWL_MVM_QUEUE_FREE), 1637 "sta_id %d reserved txq %d status %d", 1638 sta_id, reserved_txq, *status)) { 1639 spin_unlock_bh(&mvm->queue_info_lock); 1640 return -EINVAL; 1641 } 1642 1643 *status = IWL_MVM_QUEUE_FREE; 1644 spin_unlock_bh(&mvm->queue_info_lock); 1645 } 1646 1647 if (vif->type == NL80211_IFTYPE_STATION && 1648 mvmvif->ap_sta_id == sta_id) { 1649 /* if associated - we can't remove the AP STA now */ 1650 if (vif->bss_conf.assoc) 1651 return ret; 1652 1653 /* unassoc - go ahead - remove the AP STA now */ 1654 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1655 1656 /* clear d0i3_ap_sta_id if no longer relevant */ 1657 if (mvm->d0i3_ap_sta_id == sta_id) 1658 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; 1659 } 1660 1661 /* 1662 * This shouldn't happen - the TDLS channel switch should be canceled 1663 * before the STA is removed. 1664 */ 1665 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { 1666 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 1667 cancel_delayed_work(&mvm->tdls_cs.dwork); 1668 } 1669 1670 /* 1671 * Make sure that the tx response code sees the station as -EBUSY and 1672 * calls the drain worker. 1673 */ 1674 spin_lock_bh(&mvm_sta->lock); 1675 spin_unlock_bh(&mvm_sta->lock); 1676 1677 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); 1678 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); 1679 1680 return ret; 1681 } 1682 1683 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, 1684 struct ieee80211_vif *vif, 1685 u8 sta_id) 1686 { 1687 int ret = iwl_mvm_rm_sta_common(mvm, sta_id); 1688 1689 lockdep_assert_held(&mvm->mutex); 1690 1691 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 1692 return ret; 1693 } 1694 1695 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, 1696 struct iwl_mvm_int_sta *sta, 1697 u32 qmask, enum nl80211_iftype iftype, 1698 enum iwl_sta_type type) 1699 { 1700 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 1701 sta->sta_id == IWL_MVM_INVALID_STA) { 1702 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); 1703 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) 1704 return -ENOSPC; 1705 } 1706 1707 sta->tfd_queue_msk = qmask; 1708 sta->type = type; 1709 1710 /* put a non-NULL value so iterating over the stations won't stop */ 1711 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); 1712 return 0; 1713 } 1714 1715 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) 1716 { 1717 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 1718 memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 1719 sta->sta_id = IWL_MVM_INVALID_STA; 1720 } 1721 1722 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, 1723 u8 sta_id, u8 fifo) 1724 { 1725 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? 1726 mvm->cfg->base_params->wd_timeout : 1727 IWL_WATCHDOG_DISABLED; 1728 1729 if (iwl_mvm_has_new_tx_api(mvm)) { 1730 int tvqm_queue = 1731 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, 1732 IWL_MAX_TID_COUNT, 1733 wdg_timeout); 1734 *queue = tvqm_queue; 1735 } else { 1736 struct iwl_trans_txq_scd_cfg cfg = { 1737 .fifo = fifo, 1738 .sta_id = sta_id, 1739 .tid = IWL_MAX_TID_COUNT, 1740 .aggregate = false, 1741 .frame_limit = IWL_FRAME_LIMIT, 1742 }; 1743 1744 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout); 1745 } 1746 } 1747 1748 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) 1749 { 1750 int ret; 1751 1752 lockdep_assert_held(&mvm->mutex); 1753 1754 /* Allocate aux station and assign to it the aux queue */ 1755 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), 1756 NL80211_IFTYPE_UNSPECIFIED, 1757 IWL_STA_AUX_ACTIVITY); 1758 if (ret) 1759 return ret; 1760 1761 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 1762 if (!iwl_mvm_has_new_tx_api(mvm)) 1763 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, 1764 mvm->aux_sta.sta_id, 1765 IWL_MVM_TX_FIFO_MCAST); 1766 1767 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, 1768 MAC_INDEX_AUX, 0); 1769 if (ret) { 1770 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 1771 return ret; 1772 } 1773 1774 /* 1775 * For 22000 firmware and on we cannot add queue to a station unknown 1776 * to firmware so enable queue here - after the station was added 1777 */ 1778 if (iwl_mvm_has_new_tx_api(mvm)) 1779 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, 1780 mvm->aux_sta.sta_id, 1781 IWL_MVM_TX_FIFO_MCAST); 1782 1783 return 0; 1784 } 1785 1786 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1787 { 1788 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1789 int ret; 1790 1791 lockdep_assert_held(&mvm->mutex); 1792 1793 /* Map snif queue to fifo - must happen before adding snif station */ 1794 if (!iwl_mvm_has_new_tx_api(mvm)) 1795 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, 1796 mvm->snif_sta.sta_id, 1797 IWL_MVM_TX_FIFO_BE); 1798 1799 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, 1800 mvmvif->id, 0); 1801 if (ret) 1802 return ret; 1803 1804 /* 1805 * For 22000 firmware and on we cannot add queue to a station unknown 1806 * to firmware so enable queue here - after the station was added 1807 */ 1808 if (iwl_mvm_has_new_tx_api(mvm)) 1809 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, 1810 mvm->snif_sta.sta_id, 1811 IWL_MVM_TX_FIFO_BE); 1812 1813 return 0; 1814 } 1815 1816 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1817 { 1818 int ret; 1819 1820 lockdep_assert_held(&mvm->mutex); 1821 1822 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, 1823 IWL_MAX_TID_COUNT, 0); 1824 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 1825 if (ret) 1826 IWL_WARN(mvm, "Failed sending remove station\n"); 1827 1828 return ret; 1829 } 1830 1831 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) 1832 { 1833 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); 1834 } 1835 1836 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) 1837 { 1838 lockdep_assert_held(&mvm->mutex); 1839 1840 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 1841 } 1842 1843 /* 1844 * Send the add station command for the vif's broadcast station. 1845 * Assumes that the station was already allocated. 1846 * 1847 * @mvm: the mvm component 1848 * @vif: the interface to which the broadcast station is added 1849 * @bsta: the broadcast station to add. 1850 */ 1851 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1852 { 1853 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1854 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 1855 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 1856 const u8 *baddr = _baddr; 1857 int queue; 1858 int ret; 1859 unsigned int wdg_timeout = 1860 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 1861 struct iwl_trans_txq_scd_cfg cfg = { 1862 .fifo = IWL_MVM_TX_FIFO_VO, 1863 .sta_id = mvmvif->bcast_sta.sta_id, 1864 .tid = IWL_MAX_TID_COUNT, 1865 .aggregate = false, 1866 .frame_limit = IWL_FRAME_LIMIT, 1867 }; 1868 1869 lockdep_assert_held(&mvm->mutex); 1870 1871 if (!iwl_mvm_has_new_tx_api(mvm)) { 1872 if (vif->type == NL80211_IFTYPE_AP || 1873 vif->type == NL80211_IFTYPE_ADHOC) 1874 queue = mvm->probe_queue; 1875 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 1876 queue = mvm->p2p_dev_queue; 1877 else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) 1878 return -EINVAL; 1879 1880 bsta->tfd_queue_msk |= BIT(queue); 1881 1882 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, 1883 &cfg, wdg_timeout); 1884 } 1885 1886 if (vif->type == NL80211_IFTYPE_ADHOC) 1887 baddr = vif->bss_conf.bssid; 1888 1889 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) 1890 return -ENOSPC; 1891 1892 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, 1893 mvmvif->id, mvmvif->color); 1894 if (ret) 1895 return ret; 1896 1897 /* 1898 * For 22000 firmware and on we cannot add queue to a station unknown 1899 * to firmware so enable queue here - after the station was added 1900 */ 1901 if (iwl_mvm_has_new_tx_api(mvm)) { 1902 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], 1903 bsta->sta_id, 1904 IWL_MAX_TID_COUNT, 1905 wdg_timeout); 1906 1907 if (vif->type == NL80211_IFTYPE_AP || 1908 vif->type == NL80211_IFTYPE_ADHOC) 1909 mvm->probe_queue = queue; 1910 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 1911 mvm->p2p_dev_queue = queue; 1912 } 1913 1914 return 0; 1915 } 1916 1917 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, 1918 struct ieee80211_vif *vif) 1919 { 1920 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1921 int queue; 1922 1923 lockdep_assert_held(&mvm->mutex); 1924 1925 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0); 1926 1927 switch (vif->type) { 1928 case NL80211_IFTYPE_AP: 1929 case NL80211_IFTYPE_ADHOC: 1930 queue = mvm->probe_queue; 1931 break; 1932 case NL80211_IFTYPE_P2P_DEVICE: 1933 queue = mvm->p2p_dev_queue; 1934 break; 1935 default: 1936 WARN(1, "Can't free bcast queue on vif type %d\n", 1937 vif->type); 1938 return; 1939 } 1940 1941 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); 1942 if (iwl_mvm_has_new_tx_api(mvm)) 1943 return; 1944 1945 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue))); 1946 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue); 1947 } 1948 1949 /* Send the FW a request to remove the station from it's internal data 1950 * structures, but DO NOT remove the entry from the local data structures. */ 1951 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1952 { 1953 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1954 int ret; 1955 1956 lockdep_assert_held(&mvm->mutex); 1957 1958 iwl_mvm_free_bcast_sta_queues(mvm, vif); 1959 1960 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); 1961 if (ret) 1962 IWL_WARN(mvm, "Failed sending remove station\n"); 1963 return ret; 1964 } 1965 1966 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1967 { 1968 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1969 1970 lockdep_assert_held(&mvm->mutex); 1971 1972 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, 1973 ieee80211_vif_type_p2p(vif), 1974 IWL_STA_GENERAL_PURPOSE); 1975 } 1976 1977 /* Allocate a new station entry for the broadcast station to the given vif, 1978 * and send it to the FW. 1979 * Note that each P2P mac should have its own broadcast station. 1980 * 1981 * @mvm: the mvm component 1982 * @vif: the interface to which the broadcast station is added 1983 * @bsta: the broadcast station to add. */ 1984 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1985 { 1986 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1987 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 1988 int ret; 1989 1990 lockdep_assert_held(&mvm->mutex); 1991 1992 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1993 if (ret) 1994 return ret; 1995 1996 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 1997 1998 if (ret) 1999 iwl_mvm_dealloc_int_sta(mvm, bsta); 2000 2001 return ret; 2002 } 2003 2004 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2005 { 2006 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2007 2008 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); 2009 } 2010 2011 /* 2012 * Send the FW a request to remove the station from it's internal data 2013 * structures, and in addition remove it from the local data structure. 2014 */ 2015 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2016 { 2017 int ret; 2018 2019 lockdep_assert_held(&mvm->mutex); 2020 2021 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); 2022 2023 iwl_mvm_dealloc_bcast_sta(mvm, vif); 2024 2025 return ret; 2026 } 2027 2028 /* 2029 * Allocate a new station entry for the multicast station to the given vif, 2030 * and send it to the FW. 2031 * Note that each AP/GO mac should have its own multicast station. 2032 * 2033 * @mvm: the mvm component 2034 * @vif: the interface to which the multicast station is added 2035 */ 2036 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2037 { 2038 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2039 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; 2040 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; 2041 const u8 *maddr = _maddr; 2042 struct iwl_trans_txq_scd_cfg cfg = { 2043 .fifo = IWL_MVM_TX_FIFO_MCAST, 2044 .sta_id = msta->sta_id, 2045 .tid = 0, 2046 .aggregate = false, 2047 .frame_limit = IWL_FRAME_LIMIT, 2048 }; 2049 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2050 int ret; 2051 2052 lockdep_assert_held(&mvm->mutex); 2053 2054 if (WARN_ON(vif->type != NL80211_IFTYPE_AP && 2055 vif->type != NL80211_IFTYPE_ADHOC)) 2056 return -ENOTSUPP; 2057 2058 /* 2059 * In IBSS, ieee80211_check_queues() sets the cab_queue to be 2060 * invalid, so make sure we use the queue we want. 2061 * Note that this is done here as we want to avoid making DQA 2062 * changes in mac80211 layer. 2063 */ 2064 if (vif->type == NL80211_IFTYPE_ADHOC) { 2065 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; 2066 mvmvif->cab_queue = vif->cab_queue; 2067 } 2068 2069 /* 2070 * While in previous FWs we had to exclude cab queue from TFD queue 2071 * mask, now it is needed as any other queue. 2072 */ 2073 if (!iwl_mvm_has_new_tx_api(mvm) && 2074 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2075 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2076 &cfg, timeout); 2077 msta->tfd_queue_msk |= BIT(vif->cab_queue); 2078 } 2079 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, 2080 mvmvif->id, mvmvif->color); 2081 if (ret) { 2082 iwl_mvm_dealloc_int_sta(mvm, msta); 2083 return ret; 2084 } 2085 2086 /* 2087 * Enable cab queue after the ADD_STA command is sent. 2088 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG 2089 * command with unknown station id, and for FW that doesn't support 2090 * station API since the cab queue is not included in the 2091 * tfd_queue_mask. 2092 */ 2093 if (iwl_mvm_has_new_tx_api(mvm)) { 2094 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, 2095 msta->sta_id, 2096 0, 2097 timeout); 2098 mvmvif->cab_queue = queue; 2099 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2100 IWL_UCODE_TLV_API_STA_TYPE)) 2101 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2102 &cfg, timeout); 2103 2104 return 0; 2105 } 2106 2107 /* 2108 * Send the FW a request to remove the station from it's internal data 2109 * structures, and in addition remove it from the local data structure. 2110 */ 2111 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2112 { 2113 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2114 int ret; 2115 2116 lockdep_assert_held(&mvm->mutex); 2117 2118 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); 2119 2120 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, 2121 0, 0); 2122 2123 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); 2124 if (ret) 2125 IWL_WARN(mvm, "Failed sending remove station\n"); 2126 2127 return ret; 2128 } 2129 2130 #define IWL_MAX_RX_BA_SESSIONS 16 2131 2132 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) 2133 { 2134 struct iwl_mvm_delba_notif notif = { 2135 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, 2136 .metadata.sync = 1, 2137 .delba.baid = baid, 2138 }; 2139 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); 2140 }; 2141 2142 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, 2143 struct iwl_mvm_baid_data *data) 2144 { 2145 int i; 2146 2147 iwl_mvm_sync_rxq_del_ba(mvm, data->baid); 2148 2149 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2150 int j; 2151 struct iwl_mvm_reorder_buffer *reorder_buf = 2152 &data->reorder_buf[i]; 2153 struct iwl_mvm_reorder_buf_entry *entries = 2154 &data->entries[i * data->entries_per_queue]; 2155 2156 spin_lock_bh(&reorder_buf->lock); 2157 if (likely(!reorder_buf->num_stored)) { 2158 spin_unlock_bh(&reorder_buf->lock); 2159 continue; 2160 } 2161 2162 /* 2163 * This shouldn't happen in regular DELBA since the internal 2164 * delBA notification should trigger a release of all frames in 2165 * the reorder buffer. 2166 */ 2167 WARN_ON(1); 2168 2169 for (j = 0; j < reorder_buf->buf_size; j++) 2170 __skb_queue_purge(&entries[j].e.frames); 2171 /* 2172 * Prevent timer re-arm. This prevents a very far fetched case 2173 * where we timed out on the notification. There may be prior 2174 * RX frames pending in the RX queue before the notification 2175 * that might get processed between now and the actual deletion 2176 * and we would re-arm the timer although we are deleting the 2177 * reorder buffer. 2178 */ 2179 reorder_buf->removed = true; 2180 spin_unlock_bh(&reorder_buf->lock); 2181 del_timer_sync(&reorder_buf->reorder_timer); 2182 } 2183 } 2184 2185 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, 2186 struct iwl_mvm_baid_data *data, 2187 u16 ssn, u8 buf_size) 2188 { 2189 int i; 2190 2191 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2192 struct iwl_mvm_reorder_buffer *reorder_buf = 2193 &data->reorder_buf[i]; 2194 struct iwl_mvm_reorder_buf_entry *entries = 2195 &data->entries[i * data->entries_per_queue]; 2196 int j; 2197 2198 reorder_buf->num_stored = 0; 2199 reorder_buf->head_sn = ssn; 2200 reorder_buf->buf_size = buf_size; 2201 /* rx reorder timer */ 2202 timer_setup(&reorder_buf->reorder_timer, 2203 iwl_mvm_reorder_timer_expired, 0); 2204 spin_lock_init(&reorder_buf->lock); 2205 reorder_buf->mvm = mvm; 2206 reorder_buf->queue = i; 2207 reorder_buf->valid = false; 2208 for (j = 0; j < reorder_buf->buf_size; j++) 2209 __skb_queue_head_init(&entries[j].e.frames); 2210 } 2211 } 2212 2213 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2214 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) 2215 { 2216 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2217 struct iwl_mvm_add_sta_cmd cmd = {}; 2218 struct iwl_mvm_baid_data *baid_data = NULL; 2219 int ret; 2220 u32 status; 2221 2222 lockdep_assert_held(&mvm->mutex); 2223 2224 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { 2225 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); 2226 return -ENOSPC; 2227 } 2228 2229 if (iwl_mvm_has_new_rx_api(mvm) && start) { 2230 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); 2231 2232 /* sparse doesn't like the __align() so don't check */ 2233 #ifndef __CHECKER__ 2234 /* 2235 * The division below will be OK if either the cache line size 2236 * can be divided by the entry size (ALIGN will round up) or if 2237 * if the entry size can be divided by the cache line size, in 2238 * which case the ALIGN() will do nothing. 2239 */ 2240 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && 2241 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); 2242 #endif 2243 2244 /* 2245 * Upward align the reorder buffer size to fill an entire cache 2246 * line for each queue, to avoid sharing cache lines between 2247 * different queues. 2248 */ 2249 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); 2250 2251 /* 2252 * Allocate here so if allocation fails we can bail out early 2253 * before starting the BA session in the firmware 2254 */ 2255 baid_data = kzalloc(sizeof(*baid_data) + 2256 mvm->trans->num_rx_queues * 2257 reorder_buf_size, 2258 GFP_KERNEL); 2259 if (!baid_data) 2260 return -ENOMEM; 2261 2262 /* 2263 * This division is why we need the above BUILD_BUG_ON(), 2264 * if that doesn't hold then this will not be right. 2265 */ 2266 baid_data->entries_per_queue = 2267 reorder_buf_size / sizeof(baid_data->entries[0]); 2268 } 2269 2270 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 2271 cmd.sta_id = mvm_sta->sta_id; 2272 cmd.add_modify = STA_MODE_MODIFY; 2273 if (start) { 2274 cmd.add_immediate_ba_tid = (u8) tid; 2275 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 2276 cmd.rx_ba_window = cpu_to_le16((u16)buf_size); 2277 } else { 2278 cmd.remove_immediate_ba_tid = (u8) tid; 2279 } 2280 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : 2281 STA_MODIFY_REMOVE_BA_TID; 2282 2283 status = ADD_STA_SUCCESS; 2284 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2285 iwl_mvm_add_sta_cmd_size(mvm), 2286 &cmd, &status); 2287 if (ret) 2288 goto out_free; 2289 2290 switch (status & IWL_ADD_STA_STATUS_MASK) { 2291 case ADD_STA_SUCCESS: 2292 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 2293 start ? "start" : "stopp"); 2294 break; 2295 case ADD_STA_IMMEDIATE_BA_FAILURE: 2296 IWL_WARN(mvm, "RX BA Session refused by fw\n"); 2297 ret = -ENOSPC; 2298 break; 2299 default: 2300 ret = -EIO; 2301 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", 2302 start ? "start" : "stopp", status); 2303 break; 2304 } 2305 2306 if (ret) 2307 goto out_free; 2308 2309 if (start) { 2310 u8 baid; 2311 2312 mvm->rx_ba_sessions++; 2313 2314 if (!iwl_mvm_has_new_rx_api(mvm)) 2315 return 0; 2316 2317 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { 2318 ret = -EINVAL; 2319 goto out_free; 2320 } 2321 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> 2322 IWL_ADD_STA_BAID_SHIFT); 2323 baid_data->baid = baid; 2324 baid_data->timeout = timeout; 2325 baid_data->last_rx = jiffies; 2326 baid_data->rcu_ptr = &mvm->baid_map[baid]; 2327 timer_setup(&baid_data->session_timer, 2328 iwl_mvm_rx_agg_session_expired, 0); 2329 baid_data->mvm = mvm; 2330 baid_data->tid = tid; 2331 baid_data->sta_id = mvm_sta->sta_id; 2332 2333 mvm_sta->tid_to_baid[tid] = baid; 2334 if (timeout) 2335 mod_timer(&baid_data->session_timer, 2336 TU_TO_EXP_TIME(timeout * 2)); 2337 2338 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); 2339 /* 2340 * protect the BA data with RCU to cover a case where our 2341 * internal RX sync mechanism will timeout (not that it's 2342 * supposed to happen) and we will free the session data while 2343 * RX is being processed in parallel 2344 */ 2345 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", 2346 mvm_sta->sta_id, tid, baid); 2347 WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); 2348 rcu_assign_pointer(mvm->baid_map[baid], baid_data); 2349 } else { 2350 u8 baid = mvm_sta->tid_to_baid[tid]; 2351 2352 if (mvm->rx_ba_sessions > 0) 2353 /* check that restart flow didn't zero the counter */ 2354 mvm->rx_ba_sessions--; 2355 if (!iwl_mvm_has_new_rx_api(mvm)) 2356 return 0; 2357 2358 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) 2359 return -EINVAL; 2360 2361 baid_data = rcu_access_pointer(mvm->baid_map[baid]); 2362 if (WARN_ON(!baid_data)) 2363 return -EINVAL; 2364 2365 /* synchronize all rx queues so we can safely delete */ 2366 iwl_mvm_free_reorder(mvm, baid_data); 2367 del_timer_sync(&baid_data->session_timer); 2368 RCU_INIT_POINTER(mvm->baid_map[baid], NULL); 2369 kfree_rcu(baid_data, rcu_head); 2370 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); 2371 } 2372 return 0; 2373 2374 out_free: 2375 kfree(baid_data); 2376 return ret; 2377 } 2378 2379 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2380 int tid, u8 queue, bool start) 2381 { 2382 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2383 struct iwl_mvm_add_sta_cmd cmd = {}; 2384 int ret; 2385 u32 status; 2386 2387 lockdep_assert_held(&mvm->mutex); 2388 2389 if (start) { 2390 mvm_sta->tfd_queue_msk |= BIT(queue); 2391 mvm_sta->tid_disable_agg &= ~BIT(tid); 2392 } else { 2393 /* In DQA-mode the queue isn't removed on agg termination */ 2394 mvm_sta->tid_disable_agg |= BIT(tid); 2395 } 2396 2397 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 2398 cmd.sta_id = mvm_sta->sta_id; 2399 cmd.add_modify = STA_MODE_MODIFY; 2400 if (!iwl_mvm_has_new_tx_api(mvm)) 2401 cmd.modify_mask = STA_MODIFY_QUEUES; 2402 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 2403 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 2404 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 2405 2406 status = ADD_STA_SUCCESS; 2407 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2408 iwl_mvm_add_sta_cmd_size(mvm), 2409 &cmd, &status); 2410 if (ret) 2411 return ret; 2412 2413 switch (status & IWL_ADD_STA_STATUS_MASK) { 2414 case ADD_STA_SUCCESS: 2415 break; 2416 default: 2417 ret = -EIO; 2418 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", 2419 start ? "start" : "stopp", status); 2420 break; 2421 } 2422 2423 return ret; 2424 } 2425 2426 const u8 tid_to_mac80211_ac[] = { 2427 IEEE80211_AC_BE, 2428 IEEE80211_AC_BK, 2429 IEEE80211_AC_BK, 2430 IEEE80211_AC_BE, 2431 IEEE80211_AC_VI, 2432 IEEE80211_AC_VI, 2433 IEEE80211_AC_VO, 2434 IEEE80211_AC_VO, 2435 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ 2436 }; 2437 2438 static const u8 tid_to_ucode_ac[] = { 2439 AC_BE, 2440 AC_BK, 2441 AC_BK, 2442 AC_BE, 2443 AC_VI, 2444 AC_VI, 2445 AC_VO, 2446 AC_VO, 2447 }; 2448 2449 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2450 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 2451 { 2452 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2453 struct iwl_mvm_tid_data *tid_data; 2454 u16 normalized_ssn; 2455 int txq_id; 2456 int ret; 2457 2458 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 2459 return -EINVAL; 2460 2461 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && 2462 mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 2463 IWL_ERR(mvm, 2464 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", 2465 mvmsta->tid_data[tid].state); 2466 return -ENXIO; 2467 } 2468 2469 lockdep_assert_held(&mvm->mutex); 2470 2471 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && 2472 iwl_mvm_has_new_tx_api(mvm)) { 2473 u8 ac = tid_to_mac80211_ac[tid]; 2474 2475 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 2476 if (ret) 2477 return ret; 2478 } 2479 2480 spin_lock_bh(&mvmsta->lock); 2481 2482 /* possible race condition - we entered D0i3 while starting agg */ 2483 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { 2484 spin_unlock_bh(&mvmsta->lock); 2485 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); 2486 return -EIO; 2487 } 2488 2489 spin_lock(&mvm->queue_info_lock); 2490 2491 /* 2492 * Note the possible cases: 2493 * 1. An enabled TXQ - TXQ needs to become agg'ed 2494 * 2. The TXQ hasn't yet been enabled, so find a free one and mark 2495 * it as reserved 2496 */ 2497 txq_id = mvmsta->tid_data[tid].txq_id; 2498 if (txq_id == IWL_MVM_INVALID_QUEUE) { 2499 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 2500 IWL_MVM_DQA_MIN_DATA_QUEUE, 2501 IWL_MVM_DQA_MAX_DATA_QUEUE); 2502 if (txq_id < 0) { 2503 ret = txq_id; 2504 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 2505 goto release_locks; 2506 } 2507 2508 /* TXQ hasn't yet been enabled, so mark it only as reserved */ 2509 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; 2510 } else if (unlikely(mvm->queue_info[txq_id].status == 2511 IWL_MVM_QUEUE_SHARED)) { 2512 ret = -ENXIO; 2513 IWL_DEBUG_TX_QUEUES(mvm, 2514 "Can't start tid %d agg on shared queue!\n", 2515 tid); 2516 goto release_locks; 2517 } 2518 2519 spin_unlock(&mvm->queue_info_lock); 2520 2521 IWL_DEBUG_TX_QUEUES(mvm, 2522 "AGG for tid %d will be on queue #%d\n", 2523 tid, txq_id); 2524 2525 tid_data = &mvmsta->tid_data[tid]; 2526 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 2527 tid_data->txq_id = txq_id; 2528 *ssn = tid_data->ssn; 2529 2530 IWL_DEBUG_TX_QUEUES(mvm, 2531 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", 2532 mvmsta->sta_id, tid, txq_id, tid_data->ssn, 2533 tid_data->next_reclaimed); 2534 2535 /* 2536 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 2537 * to align the wrap around of ssn so we compare relevant values. 2538 */ 2539 normalized_ssn = tid_data->ssn; 2540 if (mvm->trans->cfg->gen2) 2541 normalized_ssn &= 0xff; 2542 2543 if (normalized_ssn == tid_data->next_reclaimed) { 2544 tid_data->state = IWL_AGG_STARTING; 2545 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2546 } else { 2547 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; 2548 } 2549 2550 ret = 0; 2551 goto out; 2552 2553 release_locks: 2554 spin_unlock(&mvm->queue_info_lock); 2555 out: 2556 spin_unlock_bh(&mvmsta->lock); 2557 2558 return ret; 2559 } 2560 2561 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2562 struct ieee80211_sta *sta, u16 tid, u8 buf_size, 2563 bool amsdu) 2564 { 2565 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2566 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2567 unsigned int wdg_timeout = 2568 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 2569 int queue, ret; 2570 bool alloc_queue = true; 2571 enum iwl_mvm_queue_status queue_status; 2572 u16 ssn; 2573 2574 struct iwl_trans_txq_scd_cfg cfg = { 2575 .sta_id = mvmsta->sta_id, 2576 .tid = tid, 2577 .frame_limit = buf_size, 2578 .aggregate = true, 2579 }; 2580 2581 /* 2582 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation 2583 * manager, so this function should never be called in this case. 2584 */ 2585 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) 2586 return -EINVAL; 2587 2588 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) 2589 != IWL_MAX_TID_COUNT); 2590 2591 spin_lock_bh(&mvmsta->lock); 2592 ssn = tid_data->ssn; 2593 queue = tid_data->txq_id; 2594 tid_data->state = IWL_AGG_ON; 2595 mvmsta->agg_tids |= BIT(tid); 2596 tid_data->ssn = 0xffff; 2597 tid_data->amsdu_in_ampdu_allowed = amsdu; 2598 spin_unlock_bh(&mvmsta->lock); 2599 2600 if (iwl_mvm_has_new_tx_api(mvm)) { 2601 /* 2602 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() 2603 * would have failed, so if we are here there is no need to 2604 * allocate a queue. 2605 * However, if aggregation size is different than the default 2606 * size, the scheduler should be reconfigured. 2607 * We cannot do this with the new TX API, so return unsupported 2608 * for now, until it will be offloaded to firmware.. 2609 * Note that if SCD default value changes - this condition 2610 * should be updated as well. 2611 */ 2612 if (buf_size < IWL_FRAME_LIMIT) 2613 return -ENOTSUPP; 2614 2615 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2616 if (ret) 2617 return -EIO; 2618 goto out; 2619 } 2620 2621 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 2622 2623 spin_lock_bh(&mvm->queue_info_lock); 2624 queue_status = mvm->queue_info[queue].status; 2625 spin_unlock_bh(&mvm->queue_info_lock); 2626 2627 /* Maybe there is no need to even alloc a queue... */ 2628 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) 2629 alloc_queue = false; 2630 2631 /* 2632 * Only reconfig the SCD for the queue if the window size has 2633 * changed from current (become smaller) 2634 */ 2635 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { 2636 /* 2637 * If reconfiguring an existing queue, it first must be 2638 * drained 2639 */ 2640 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 2641 BIT(queue)); 2642 if (ret) { 2643 IWL_ERR(mvm, 2644 "Error draining queue before reconfig\n"); 2645 return ret; 2646 } 2647 2648 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, 2649 mvmsta->sta_id, tid, 2650 buf_size, ssn); 2651 if (ret) { 2652 IWL_ERR(mvm, 2653 "Error reconfiguring TXQ #%d\n", queue); 2654 return ret; 2655 } 2656 } 2657 2658 if (alloc_queue) 2659 iwl_mvm_enable_txq(mvm, queue, 2660 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, 2661 &cfg, wdg_timeout); 2662 2663 /* Send ADD_STA command to enable aggs only if the queue isn't shared */ 2664 if (queue_status != IWL_MVM_QUEUE_SHARED) { 2665 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2666 if (ret) 2667 return -EIO; 2668 } 2669 2670 /* No need to mark as reserved */ 2671 spin_lock_bh(&mvm->queue_info_lock); 2672 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 2673 spin_unlock_bh(&mvm->queue_info_lock); 2674 2675 out: 2676 /* 2677 * Even though in theory the peer could have different 2678 * aggregation reorder buffer sizes for different sessions, 2679 * our ucode doesn't allow for that and has a global limit 2680 * for each station. Therefore, use the minimum of all the 2681 * aggregation sessions and our default value. 2682 */ 2683 mvmsta->max_agg_bufsize = 2684 min(mvmsta->max_agg_bufsize, buf_size); 2685 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; 2686 2687 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", 2688 sta->addr, tid); 2689 2690 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false); 2691 } 2692 2693 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, 2694 struct iwl_mvm_sta *mvmsta, 2695 struct iwl_mvm_tid_data *tid_data) 2696 { 2697 u16 txq_id = tid_data->txq_id; 2698 2699 if (iwl_mvm_has_new_tx_api(mvm)) 2700 return; 2701 2702 spin_lock_bh(&mvm->queue_info_lock); 2703 /* 2704 * The TXQ is marked as reserved only if no traffic came through yet 2705 * This means no traffic has been sent on this TID (agg'd or not), so 2706 * we no longer have use for the queue. Since it hasn't even been 2707 * allocated through iwl_mvm_enable_txq, so we can just mark it back as 2708 * free. 2709 */ 2710 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { 2711 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 2712 tid_data->txq_id = IWL_MVM_INVALID_QUEUE; 2713 } 2714 2715 spin_unlock_bh(&mvm->queue_info_lock); 2716 } 2717 2718 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2719 struct ieee80211_sta *sta, u16 tid) 2720 { 2721 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2722 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2723 u16 txq_id; 2724 int err; 2725 2726 /* 2727 * If mac80211 is cleaning its state, then say that we finished since 2728 * our state has been cleared anyway. 2729 */ 2730 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 2731 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2732 return 0; 2733 } 2734 2735 spin_lock_bh(&mvmsta->lock); 2736 2737 txq_id = tid_data->txq_id; 2738 2739 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", 2740 mvmsta->sta_id, tid, txq_id, tid_data->state); 2741 2742 mvmsta->agg_tids &= ~BIT(tid); 2743 2744 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 2745 2746 switch (tid_data->state) { 2747 case IWL_AGG_ON: 2748 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 2749 2750 IWL_DEBUG_TX_QUEUES(mvm, 2751 "ssn = %d, next_recl = %d\n", 2752 tid_data->ssn, tid_data->next_reclaimed); 2753 2754 tid_data->ssn = 0xffff; 2755 tid_data->state = IWL_AGG_OFF; 2756 spin_unlock_bh(&mvmsta->lock); 2757 2758 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2759 2760 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 2761 return 0; 2762 case IWL_AGG_STARTING: 2763 case IWL_EMPTYING_HW_QUEUE_ADDBA: 2764 /* 2765 * The agg session has been stopped before it was set up. This 2766 * can happen when the AddBA timer times out for example. 2767 */ 2768 2769 /* No barriers since we are under mutex */ 2770 lockdep_assert_held(&mvm->mutex); 2771 2772 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2773 tid_data->state = IWL_AGG_OFF; 2774 err = 0; 2775 break; 2776 default: 2777 IWL_ERR(mvm, 2778 "Stopping AGG while state not ON or starting for %d on %d (%d)\n", 2779 mvmsta->sta_id, tid, tid_data->state); 2780 IWL_ERR(mvm, 2781 "\ttid_data->txq_id = %d\n", tid_data->txq_id); 2782 err = -EINVAL; 2783 } 2784 2785 spin_unlock_bh(&mvmsta->lock); 2786 2787 return err; 2788 } 2789 2790 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2791 struct ieee80211_sta *sta, u16 tid) 2792 { 2793 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2794 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2795 u16 txq_id; 2796 enum iwl_mvm_agg_state old_state; 2797 2798 /* 2799 * First set the agg state to OFF to avoid calling 2800 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. 2801 */ 2802 spin_lock_bh(&mvmsta->lock); 2803 txq_id = tid_data->txq_id; 2804 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 2805 mvmsta->sta_id, tid, txq_id, tid_data->state); 2806 old_state = tid_data->state; 2807 tid_data->state = IWL_AGG_OFF; 2808 mvmsta->agg_tids &= ~BIT(tid); 2809 spin_unlock_bh(&mvmsta->lock); 2810 2811 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 2812 2813 if (old_state >= IWL_AGG_ON) { 2814 iwl_mvm_drain_sta(mvm, mvmsta, true); 2815 2816 if (iwl_mvm_has_new_tx_api(mvm)) { 2817 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, 2818 BIT(tid), 0)) 2819 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 2820 iwl_trans_wait_txq_empty(mvm->trans, txq_id); 2821 } else { 2822 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) 2823 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 2824 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); 2825 } 2826 2827 iwl_mvm_drain_sta(mvm, mvmsta, false); 2828 2829 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 2830 } 2831 2832 return 0; 2833 } 2834 2835 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 2836 { 2837 int i, max = -1, max_offs = -1; 2838 2839 lockdep_assert_held(&mvm->mutex); 2840 2841 /* Pick the unused key offset with the highest 'deleted' 2842 * counter. Every time a key is deleted, all the counters 2843 * are incremented and the one that was just deleted is 2844 * reset to zero. Thus, the highest counter is the one 2845 * that was deleted longest ago. Pick that one. 2846 */ 2847 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 2848 if (test_bit(i, mvm->fw_key_table)) 2849 continue; 2850 if (mvm->fw_key_deleted[i] > max) { 2851 max = mvm->fw_key_deleted[i]; 2852 max_offs = i; 2853 } 2854 } 2855 2856 if (max_offs < 0) 2857 return STA_KEY_IDX_INVALID; 2858 2859 return max_offs; 2860 } 2861 2862 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, 2863 struct ieee80211_vif *vif, 2864 struct ieee80211_sta *sta) 2865 { 2866 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2867 2868 if (sta) 2869 return iwl_mvm_sta_from_mac80211(sta); 2870 2871 /* 2872 * The device expects GTKs for station interfaces to be 2873 * installed as GTKs for the AP station. If we have no 2874 * station ID, then use AP's station ID. 2875 */ 2876 if (vif->type == NL80211_IFTYPE_STATION && 2877 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 2878 u8 sta_id = mvmvif->ap_sta_id; 2879 2880 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], 2881 lockdep_is_held(&mvm->mutex)); 2882 2883 /* 2884 * It is possible that the 'sta' parameter is NULL, 2885 * for example when a GTK is removed - the sta_id will then 2886 * be the AP ID, and no station was passed by mac80211. 2887 */ 2888 if (IS_ERR_OR_NULL(sta)) 2889 return NULL; 2890 2891 return iwl_mvm_sta_from_mac80211(sta); 2892 } 2893 2894 return NULL; 2895 } 2896 2897 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 2898 u32 sta_id, 2899 struct ieee80211_key_conf *key, bool mcast, 2900 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 2901 u8 key_offset, bool mfp) 2902 { 2903 union { 2904 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 2905 struct iwl_mvm_add_sta_key_cmd cmd; 2906 } u = {}; 2907 __le16 key_flags; 2908 int ret; 2909 u32 status; 2910 u16 keyidx; 2911 u64 pn = 0; 2912 int i, size; 2913 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 2914 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 2915 2916 if (sta_id == IWL_MVM_INVALID_STA) 2917 return -EINVAL; 2918 2919 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & 2920 STA_KEY_FLG_KEYID_MSK; 2921 key_flags = cpu_to_le16(keyidx); 2922 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); 2923 2924 switch (key->cipher) { 2925 case WLAN_CIPHER_SUITE_TKIP: 2926 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); 2927 if (new_api) { 2928 memcpy((void *)&u.cmd.tx_mic_key, 2929 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 2930 IWL_MIC_KEY_SIZE); 2931 2932 memcpy((void *)&u.cmd.rx_mic_key, 2933 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 2934 IWL_MIC_KEY_SIZE); 2935 pn = atomic64_read(&key->tx_pn); 2936 2937 } else { 2938 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; 2939 for (i = 0; i < 5; i++) 2940 u.cmd_v1.tkip_rx_ttak[i] = 2941 cpu_to_le16(tkip_p1k[i]); 2942 } 2943 memcpy(u.cmd.common.key, key->key, key->keylen); 2944 break; 2945 case WLAN_CIPHER_SUITE_CCMP: 2946 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); 2947 memcpy(u.cmd.common.key, key->key, key->keylen); 2948 if (new_api) 2949 pn = atomic64_read(&key->tx_pn); 2950 break; 2951 case WLAN_CIPHER_SUITE_WEP104: 2952 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); 2953 /* fall through */ 2954 case WLAN_CIPHER_SUITE_WEP40: 2955 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); 2956 memcpy(u.cmd.common.key + 3, key->key, key->keylen); 2957 break; 2958 case WLAN_CIPHER_SUITE_GCMP_256: 2959 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); 2960 /* fall through */ 2961 case WLAN_CIPHER_SUITE_GCMP: 2962 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); 2963 memcpy(u.cmd.common.key, key->key, key->keylen); 2964 if (new_api) 2965 pn = atomic64_read(&key->tx_pn); 2966 break; 2967 default: 2968 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); 2969 memcpy(u.cmd.common.key, key->key, key->keylen); 2970 } 2971 2972 if (mcast) 2973 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 2974 if (mfp) 2975 key_flags |= cpu_to_le16(STA_KEY_MFP); 2976 2977 u.cmd.common.key_offset = key_offset; 2978 u.cmd.common.key_flags = key_flags; 2979 u.cmd.common.sta_id = sta_id; 2980 2981 if (new_api) { 2982 u.cmd.transmit_seq_cnt = cpu_to_le64(pn); 2983 size = sizeof(u.cmd); 2984 } else { 2985 size = sizeof(u.cmd_v1); 2986 } 2987 2988 status = ADD_STA_SUCCESS; 2989 if (cmd_flags & CMD_ASYNC) 2990 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, 2991 &u.cmd); 2992 else 2993 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, 2994 &u.cmd, &status); 2995 2996 switch (status) { 2997 case ADD_STA_SUCCESS: 2998 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); 2999 break; 3000 default: 3001 ret = -EIO; 3002 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); 3003 break; 3004 } 3005 3006 return ret; 3007 } 3008 3009 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, 3010 struct ieee80211_key_conf *keyconf, 3011 u8 sta_id, bool remove_key) 3012 { 3013 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; 3014 3015 /* verify the key details match the required command's expectations */ 3016 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || 3017 (keyconf->keyidx != 4 && keyconf->keyidx != 5) || 3018 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && 3019 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && 3020 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) 3021 return -EINVAL; 3022 3023 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && 3024 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) 3025 return -EINVAL; 3026 3027 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); 3028 igtk_cmd.sta_id = cpu_to_le32(sta_id); 3029 3030 if (remove_key) { 3031 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3032 } else { 3033 struct ieee80211_key_seq seq; 3034 const u8 *pn; 3035 3036 switch (keyconf->cipher) { 3037 case WLAN_CIPHER_SUITE_AES_CMAC: 3038 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); 3039 break; 3040 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3041 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3042 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); 3043 break; 3044 default: 3045 return -EINVAL; 3046 } 3047 3048 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); 3049 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3050 igtk_cmd.ctrl_flags |= 3051 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); 3052 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3053 pn = seq.aes_cmac.pn; 3054 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | 3055 ((u64) pn[4] << 8) | 3056 ((u64) pn[3] << 16) | 3057 ((u64) pn[2] << 24) | 3058 ((u64) pn[1] << 32) | 3059 ((u64) pn[0] << 40)); 3060 } 3061 3062 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", 3063 remove_key ? "removing" : "installing", 3064 igtk_cmd.sta_id); 3065 3066 if (!iwl_mvm_has_new_rx_api(mvm)) { 3067 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { 3068 .ctrl_flags = igtk_cmd.ctrl_flags, 3069 .key_id = igtk_cmd.key_id, 3070 .sta_id = igtk_cmd.sta_id, 3071 .receive_seq_cnt = igtk_cmd.receive_seq_cnt 3072 }; 3073 3074 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, 3075 ARRAY_SIZE(igtk_cmd_v1.igtk)); 3076 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3077 sizeof(igtk_cmd_v1), &igtk_cmd_v1); 3078 } 3079 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3080 sizeof(igtk_cmd), &igtk_cmd); 3081 } 3082 3083 3084 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, 3085 struct ieee80211_vif *vif, 3086 struct ieee80211_sta *sta) 3087 { 3088 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3089 3090 if (sta) 3091 return sta->addr; 3092 3093 if (vif->type == NL80211_IFTYPE_STATION && 3094 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 3095 u8 sta_id = mvmvif->ap_sta_id; 3096 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 3097 lockdep_is_held(&mvm->mutex)); 3098 return sta->addr; 3099 } 3100 3101 3102 return NULL; 3103 } 3104 3105 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3106 struct ieee80211_vif *vif, 3107 struct ieee80211_sta *sta, 3108 struct ieee80211_key_conf *keyconf, 3109 u8 key_offset, 3110 bool mcast) 3111 { 3112 int ret; 3113 const u8 *addr; 3114 struct ieee80211_key_seq seq; 3115 u16 p1k[5]; 3116 u32 sta_id; 3117 bool mfp = false; 3118 3119 if (sta) { 3120 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3121 3122 sta_id = mvm_sta->sta_id; 3123 mfp = sta->mfp; 3124 } else if (vif->type == NL80211_IFTYPE_AP && 3125 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 3126 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3127 3128 sta_id = mvmvif->mcast_sta.sta_id; 3129 } else { 3130 IWL_ERR(mvm, "Failed to find station id\n"); 3131 return -EINVAL; 3132 } 3133 3134 switch (keyconf->cipher) { 3135 case WLAN_CIPHER_SUITE_TKIP: 3136 if (vif->type == NL80211_IFTYPE_AP) { 3137 ret = -EINVAL; 3138 break; 3139 } 3140 addr = iwl_mvm_get_mac_addr(mvm, vif, sta); 3141 /* get phase 1 key from mac80211 */ 3142 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3143 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 3144 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3145 seq.tkip.iv32, p1k, 0, key_offset, 3146 mfp); 3147 break; 3148 case WLAN_CIPHER_SUITE_CCMP: 3149 case WLAN_CIPHER_SUITE_WEP40: 3150 case WLAN_CIPHER_SUITE_WEP104: 3151 case WLAN_CIPHER_SUITE_GCMP: 3152 case WLAN_CIPHER_SUITE_GCMP_256: 3153 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3154 0, NULL, 0, key_offset, mfp); 3155 break; 3156 default: 3157 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3158 0, NULL, 0, key_offset, mfp); 3159 } 3160 3161 return ret; 3162 } 3163 3164 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, 3165 struct ieee80211_key_conf *keyconf, 3166 bool mcast) 3167 { 3168 union { 3169 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 3170 struct iwl_mvm_add_sta_key_cmd cmd; 3171 } u = {}; 3172 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 3173 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 3174 __le16 key_flags; 3175 int ret, size; 3176 u32 status; 3177 3178 /* This is a valid situation for GTK removal */ 3179 if (sta_id == IWL_MVM_INVALID_STA) 3180 return 0; 3181 3182 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 3183 STA_KEY_FLG_KEYID_MSK); 3184 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); 3185 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); 3186 3187 if (mcast) 3188 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3189 3190 /* 3191 * The fields assigned here are in the same location at the start 3192 * of the command, so we can do this union trick. 3193 */ 3194 u.cmd.common.key_flags = key_flags; 3195 u.cmd.common.key_offset = keyconf->hw_key_idx; 3196 u.cmd.common.sta_id = sta_id; 3197 3198 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); 3199 3200 status = ADD_STA_SUCCESS; 3201 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, 3202 &status); 3203 3204 switch (status) { 3205 case ADD_STA_SUCCESS: 3206 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); 3207 break; 3208 default: 3209 ret = -EIO; 3210 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); 3211 break; 3212 } 3213 3214 return ret; 3215 } 3216 3217 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3218 struct ieee80211_vif *vif, 3219 struct ieee80211_sta *sta, 3220 struct ieee80211_key_conf *keyconf, 3221 u8 key_offset) 3222 { 3223 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3224 struct iwl_mvm_sta *mvm_sta; 3225 u8 sta_id = IWL_MVM_INVALID_STA; 3226 int ret; 3227 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 3228 3229 lockdep_assert_held(&mvm->mutex); 3230 3231 if (vif->type != NL80211_IFTYPE_AP || 3232 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 3233 /* Get the station id from the mvm local station table */ 3234 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3235 if (!mvm_sta) { 3236 IWL_ERR(mvm, "Failed to find station\n"); 3237 return -EINVAL; 3238 } 3239 sta_id = mvm_sta->sta_id; 3240 3241 /* 3242 * It is possible that the 'sta' parameter is NULL, and thus 3243 * there is a need to retrieve the sta from the local station 3244 * table. 3245 */ 3246 if (!sta) { 3247 sta = rcu_dereference_protected( 3248 mvm->fw_id_to_mac_id[sta_id], 3249 lockdep_is_held(&mvm->mutex)); 3250 if (IS_ERR_OR_NULL(sta)) { 3251 IWL_ERR(mvm, "Invalid station id\n"); 3252 return -EINVAL; 3253 } 3254 } 3255 3256 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 3257 return -EINVAL; 3258 } else { 3259 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3260 3261 sta_id = mvmvif->mcast_sta.sta_id; 3262 } 3263 3264 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3265 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3266 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3267 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); 3268 goto end; 3269 } 3270 3271 /* If the key_offset is not pre-assigned, we need to find a 3272 * new offset to use. In normal cases, the offset is not 3273 * pre-assigned, but during HW_RESTART we want to reuse the 3274 * same indices, so we pass them when this function is called. 3275 * 3276 * In D3 entry, we need to hardcoded the indices (because the 3277 * firmware hardcodes the PTK offset to 0). In this case, we 3278 * need to make sure we don't overwrite the hw_key_idx in the 3279 * keyconf structure, because otherwise we cannot configure 3280 * the original ones back when resuming. 3281 */ 3282 if (key_offset == STA_KEY_IDX_INVALID) { 3283 key_offset = iwl_mvm_set_fw_key_idx(mvm); 3284 if (key_offset == STA_KEY_IDX_INVALID) 3285 return -ENOSPC; 3286 keyconf->hw_key_idx = key_offset; 3287 } 3288 3289 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); 3290 if (ret) 3291 goto end; 3292 3293 /* 3294 * For WEP, the same key is used for multicast and unicast. Upload it 3295 * again, using the same key offset, and now pointing the other one 3296 * to the same key slot (offset). 3297 * If this fails, remove the original as well. 3298 */ 3299 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3300 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && 3301 sta) { 3302 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, 3303 key_offset, !mcast); 3304 if (ret) { 3305 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3306 goto end; 3307 } 3308 } 3309 3310 __set_bit(key_offset, mvm->fw_key_table); 3311 3312 end: 3313 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 3314 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 3315 sta ? sta->addr : zero_addr, ret); 3316 return ret; 3317 } 3318 3319 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 3320 struct ieee80211_vif *vif, 3321 struct ieee80211_sta *sta, 3322 struct ieee80211_key_conf *keyconf) 3323 { 3324 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3325 struct iwl_mvm_sta *mvm_sta; 3326 u8 sta_id = IWL_MVM_INVALID_STA; 3327 int ret, i; 3328 3329 lockdep_assert_held(&mvm->mutex); 3330 3331 /* Get the station from the mvm local station table */ 3332 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3333 if (mvm_sta) 3334 sta_id = mvm_sta->sta_id; 3335 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) 3336 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id; 3337 3338 3339 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3340 keyconf->keyidx, sta_id); 3341 3342 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3343 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3344 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) 3345 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3346 3347 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3348 IWL_ERR(mvm, "offset %d not used in fw key table.\n", 3349 keyconf->hw_key_idx); 3350 return -ENOENT; 3351 } 3352 3353 /* track which key was deleted last */ 3354 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3355 if (mvm->fw_key_deleted[i] < U8_MAX) 3356 mvm->fw_key_deleted[i]++; 3357 } 3358 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; 3359 3360 if (sta && !mvm_sta) { 3361 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); 3362 return 0; 3363 } 3364 3365 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3366 if (ret) 3367 return ret; 3368 3369 /* delete WEP key twice to get rid of (now useless) offset */ 3370 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3371 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) 3372 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); 3373 3374 return ret; 3375 } 3376 3377 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, 3378 struct ieee80211_vif *vif, 3379 struct ieee80211_key_conf *keyconf, 3380 struct ieee80211_sta *sta, u32 iv32, 3381 u16 *phase1key) 3382 { 3383 struct iwl_mvm_sta *mvm_sta; 3384 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3385 bool mfp = sta ? sta->mfp : false; 3386 3387 rcu_read_lock(); 3388 3389 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3390 if (WARN_ON_ONCE(!mvm_sta)) 3391 goto unlock; 3392 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, 3393 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, 3394 mfp); 3395 3396 unlock: 3397 rcu_read_unlock(); 3398 } 3399 3400 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, 3401 struct ieee80211_sta *sta) 3402 { 3403 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3404 struct iwl_mvm_add_sta_cmd cmd = { 3405 .add_modify = STA_MODE_MODIFY, 3406 .sta_id = mvmsta->sta_id, 3407 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 3408 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3409 }; 3410 int ret; 3411 3412 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 3413 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3414 if (ret) 3415 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3416 } 3417 3418 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 3419 struct ieee80211_sta *sta, 3420 enum ieee80211_frame_release_type reason, 3421 u16 cnt, u16 tids, bool more_data, 3422 bool single_sta_queue) 3423 { 3424 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3425 struct iwl_mvm_add_sta_cmd cmd = { 3426 .add_modify = STA_MODE_MODIFY, 3427 .sta_id = mvmsta->sta_id, 3428 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 3429 .sleep_tx_count = cpu_to_le16(cnt), 3430 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3431 }; 3432 int tid, ret; 3433 unsigned long _tids = tids; 3434 3435 /* convert TIDs to ACs - we don't support TSPEC so that's OK 3436 * Note that this field is reserved and unused by firmware not 3437 * supporting GO uAPSD, so it's safe to always do this. 3438 */ 3439 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 3440 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 3441 3442 /* If we're releasing frames from aggregation or dqa queues then check 3443 * if all the queues that we're releasing frames from, combined, have: 3444 * - more frames than the service period, in which case more_data 3445 * needs to be set 3446 * - fewer than 'cnt' frames, in which case we need to adjust the 3447 * firmware command (but do that unconditionally) 3448 */ 3449 if (single_sta_queue) { 3450 int remaining = cnt; 3451 int sleep_tx_count; 3452 3453 spin_lock_bh(&mvmsta->lock); 3454 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { 3455 struct iwl_mvm_tid_data *tid_data; 3456 u16 n_queued; 3457 3458 tid_data = &mvmsta->tid_data[tid]; 3459 3460 n_queued = iwl_mvm_tid_queued(mvm, tid_data); 3461 if (n_queued > remaining) { 3462 more_data = true; 3463 remaining = 0; 3464 break; 3465 } 3466 remaining -= n_queued; 3467 } 3468 sleep_tx_count = cnt - remaining; 3469 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) 3470 mvmsta->sleep_tx_count = sleep_tx_count; 3471 spin_unlock_bh(&mvmsta->lock); 3472 3473 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); 3474 if (WARN_ON(cnt - remaining == 0)) { 3475 ieee80211_sta_eosp(sta); 3476 return; 3477 } 3478 } 3479 3480 /* Note: this is ignored by firmware not supporting GO uAPSD */ 3481 if (more_data) 3482 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; 3483 3484 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { 3485 mvmsta->next_status_eosp = true; 3486 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; 3487 } else { 3488 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; 3489 } 3490 3491 /* block the Tx queues until the FW updated the sleep Tx count */ 3492 iwl_trans_block_txq_ptrs(mvm->trans, true); 3493 3494 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 3495 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, 3496 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3497 if (ret) 3498 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3499 } 3500 3501 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, 3502 struct iwl_rx_cmd_buffer *rxb) 3503 { 3504 struct iwl_rx_packet *pkt = rxb_addr(rxb); 3505 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; 3506 struct ieee80211_sta *sta; 3507 u32 sta_id = le32_to_cpu(notif->sta_id); 3508 3509 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) 3510 return; 3511 3512 rcu_read_lock(); 3513 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 3514 if (!IS_ERR_OR_NULL(sta)) 3515 ieee80211_sta_eosp(sta); 3516 rcu_read_unlock(); 3517 } 3518 3519 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 3520 struct iwl_mvm_sta *mvmsta, bool disable) 3521 { 3522 struct iwl_mvm_add_sta_cmd cmd = { 3523 .add_modify = STA_MODE_MODIFY, 3524 .sta_id = mvmsta->sta_id, 3525 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 3526 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 3527 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3528 }; 3529 int ret; 3530 3531 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 3532 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3533 if (ret) 3534 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3535 } 3536 3537 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 3538 struct ieee80211_sta *sta, 3539 bool disable) 3540 { 3541 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3542 3543 spin_lock_bh(&mvm_sta->lock); 3544 3545 if (mvm_sta->disable_tx == disable) { 3546 spin_unlock_bh(&mvm_sta->lock); 3547 return; 3548 } 3549 3550 mvm_sta->disable_tx = disable; 3551 3552 /* Tell mac80211 to start/stop queuing tx for this station */ 3553 ieee80211_sta_block_awake(mvm->hw, sta, disable); 3554 3555 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); 3556 3557 spin_unlock_bh(&mvm_sta->lock); 3558 } 3559 3560 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, 3561 struct iwl_mvm_vif *mvmvif, 3562 struct iwl_mvm_int_sta *sta, 3563 bool disable) 3564 { 3565 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); 3566 struct iwl_mvm_add_sta_cmd cmd = { 3567 .add_modify = STA_MODE_MODIFY, 3568 .sta_id = sta->sta_id, 3569 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 3570 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 3571 .mac_id_n_color = cpu_to_le32(id), 3572 }; 3573 int ret; 3574 3575 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, 3576 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3577 if (ret) 3578 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3579 } 3580 3581 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 3582 struct iwl_mvm_vif *mvmvif, 3583 bool disable) 3584 { 3585 struct ieee80211_sta *sta; 3586 struct iwl_mvm_sta *mvm_sta; 3587 int i; 3588 3589 lockdep_assert_held(&mvm->mutex); 3590 3591 /* Block/unblock all the stations of the given mvmvif */ 3592 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 3593 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 3594 lockdep_is_held(&mvm->mutex)); 3595 if (IS_ERR_OR_NULL(sta)) 3596 continue; 3597 3598 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3599 if (mvm_sta->mac_id_n_color != 3600 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) 3601 continue; 3602 3603 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); 3604 } 3605 3606 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 3607 return; 3608 3609 /* Need to block/unblock also multicast station */ 3610 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) 3611 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 3612 &mvmvif->mcast_sta, disable); 3613 3614 /* 3615 * Only unblock the broadcast station (FW blocks it for immediate 3616 * quiet, not the driver) 3617 */ 3618 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) 3619 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 3620 &mvmvif->bcast_sta, disable); 3621 } 3622 3623 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 3624 { 3625 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3626 struct iwl_mvm_sta *mvmsta; 3627 3628 rcu_read_lock(); 3629 3630 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); 3631 3632 if (!WARN_ON(!mvmsta)) 3633 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); 3634 3635 rcu_read_unlock(); 3636 } 3637 3638 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) 3639 { 3640 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 3641 3642 /* 3643 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 3644 * to align the wrap around of ssn so we compare relevant values. 3645 */ 3646 if (mvm->trans->cfg->gen2) 3647 sn &= 0xff; 3648 3649 return ieee80211_sn_sub(sn, tid_data->next_reclaimed); 3650 } 3651