1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * The full GNU General Public License is included in this distribution 23 * in the file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #include <net/mac80211.h> 65 66 #include "mvm.h" 67 #include "sta.h" 68 #include "rs.h" 69 70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm); 71 72 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 73 u32 sta_id, 74 struct ieee80211_key_conf *key, bool mcast, 75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 76 u8 key_offset, bool mfp); 77 78 /* 79 * New version of ADD_STA_sta command added new fields at the end of the 80 * structure, so sending the size of the relevant API's structure is enough to 81 * support both API versions. 82 */ 83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) 84 { 85 if (iwl_mvm_has_new_rx_api(mvm) || 86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 87 return sizeof(struct iwl_mvm_add_sta_cmd); 88 else 89 return sizeof(struct iwl_mvm_add_sta_cmd_v7); 90 } 91 92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, 93 enum nl80211_iftype iftype) 94 { 95 int sta_id; 96 u32 reserved_ids = 0; 97 98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); 99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 100 101 lockdep_assert_held(&mvm->mutex); 102 103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ 104 if (iftype != NL80211_IFTYPE_STATION) 105 reserved_ids = BIT(0); 106 107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ 108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) { 109 if (BIT(sta_id) & reserved_ids) 110 continue; 111 112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 113 lockdep_is_held(&mvm->mutex))) 114 return sta_id; 115 } 116 return IWL_MVM_INVALID_STA; 117 } 118 119 /* send station add/update command to firmware */ 120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 121 bool update, unsigned int flags) 122 { 123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 124 struct iwl_mvm_add_sta_cmd add_sta_cmd = { 125 .sta_id = mvm_sta->sta_id, 126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 127 .add_modify = update ? 1 : 0, 128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | 129 STA_FLG_MIMO_EN_MSK | 130 STA_FLG_RTS_MIMO_PROT), 131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), 132 }; 133 int ret; 134 u32 status; 135 u32 agg_size = 0, mpdu_dens = 0; 136 137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 138 add_sta_cmd.station_type = mvm_sta->sta_type; 139 140 if (!update || (flags & STA_MODIFY_QUEUES)) { 141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); 142 143 if (!iwl_mvm_has_new_tx_api(mvm)) { 144 add_sta_cmd.tfd_queue_msk = 145 cpu_to_le32(mvm_sta->tfd_queue_msk); 146 147 if (flags & STA_MODIFY_QUEUES) 148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; 149 } else { 150 WARN_ON(flags & STA_MODIFY_QUEUES); 151 } 152 } 153 154 switch (sta->bandwidth) { 155 case IEEE80211_STA_RX_BW_160: 156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); 157 /* fall through */ 158 case IEEE80211_STA_RX_BW_80: 159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); 160 /* fall through */ 161 case IEEE80211_STA_RX_BW_40: 162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); 163 /* fall through */ 164 case IEEE80211_STA_RX_BW_20: 165 if (sta->ht_cap.ht_supported) 166 add_sta_cmd.station_flags |= 167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ); 168 break; 169 } 170 171 switch (sta->rx_nss) { 172 case 1: 173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 174 break; 175 case 2: 176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); 177 break; 178 case 3 ... 8: 179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); 180 break; 181 } 182 183 switch (sta->smps_mode) { 184 case IEEE80211_SMPS_AUTOMATIC: 185 case IEEE80211_SMPS_NUM_MODES: 186 WARN_ON(1); 187 break; 188 case IEEE80211_SMPS_STATIC: 189 /* override NSS */ 190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); 191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 192 break; 193 case IEEE80211_SMPS_DYNAMIC: 194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); 195 break; 196 case IEEE80211_SMPS_OFF: 197 /* nothing */ 198 break; 199 } 200 201 if (sta->ht_cap.ht_supported) { 202 add_sta_cmd.station_flags_msk |= 203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | 204 STA_FLG_AGG_MPDU_DENS_MSK); 205 206 mpdu_dens = sta->ht_cap.ampdu_density; 207 } 208 209 if (sta->vht_cap.vht_supported) { 210 agg_size = sta->vht_cap.cap & 211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; 212 agg_size >>= 213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 214 } else if (sta->ht_cap.ht_supported) { 215 agg_size = sta->ht_cap.ampdu_factor; 216 } 217 218 add_sta_cmd.station_flags |= 219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); 220 add_sta_cmd.station_flags |= 221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) 223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); 224 225 if (sta->wme) { 226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; 227 228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 229 add_sta_cmd.uapsd_acs |= BIT(AC_BK); 230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 231 add_sta_cmd.uapsd_acs |= BIT(AC_BE); 232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 233 add_sta_cmd.uapsd_acs |= BIT(AC_VI); 234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 235 add_sta_cmd.uapsd_acs |= BIT(AC_VO); 236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; 237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; 238 } 239 240 status = ADD_STA_SUCCESS; 241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 242 iwl_mvm_add_sta_cmd_size(mvm), 243 &add_sta_cmd, &status); 244 if (ret) 245 return ret; 246 247 switch (status & IWL_ADD_STA_STATUS_MASK) { 248 case ADD_STA_SUCCESS: 249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); 250 break; 251 default: 252 ret = -EIO; 253 IWL_ERR(mvm, "ADD_STA failed\n"); 254 break; 255 } 256 257 return ret; 258 } 259 260 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) 261 { 262 struct iwl_mvm_baid_data *data = 263 from_timer(data, t, session_timer); 264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; 265 struct iwl_mvm_baid_data *ba_data; 266 struct ieee80211_sta *sta; 267 struct iwl_mvm_sta *mvm_sta; 268 unsigned long timeout; 269 270 rcu_read_lock(); 271 272 ba_data = rcu_dereference(*rcu_ptr); 273 274 if (WARN_ON(!ba_data)) 275 goto unlock; 276 277 if (!ba_data->timeout) 278 goto unlock; 279 280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); 281 if (time_is_after_jiffies(timeout)) { 282 mod_timer(&ba_data->session_timer, timeout); 283 goto unlock; 284 } 285 286 /* Timer expired */ 287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); 288 289 /* 290 * sta should be valid unless the following happens: 291 * The firmware asserts which triggers a reconfig flow, but 292 * the reconfig fails before we set the pointer to sta into 293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop 294 * A-MDPU and hence the timer continues to run. Then, the 295 * timer expires and sta is NULL. 296 */ 297 if (!sta) 298 goto unlock; 299 300 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 301 ieee80211_rx_ba_timer_expired(mvm_sta->vif, 302 sta->addr, ba_data->tid); 303 unlock: 304 rcu_read_unlock(); 305 } 306 307 /* Disable aggregations for a bitmap of TIDs for a given station */ 308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, 309 unsigned long disable_agg_tids, 310 bool remove_queue) 311 { 312 struct iwl_mvm_add_sta_cmd cmd = {}; 313 struct ieee80211_sta *sta; 314 struct iwl_mvm_sta *mvmsta; 315 u32 status; 316 u8 sta_id; 317 int ret; 318 319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 320 return -EINVAL; 321 322 sta_id = mvm->queue_info[queue].ra_sta_id; 323 324 rcu_read_lock(); 325 326 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 327 328 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 329 rcu_read_unlock(); 330 return -EINVAL; 331 } 332 333 mvmsta = iwl_mvm_sta_from_mac80211(sta); 334 335 mvmsta->tid_disable_agg |= disable_agg_tids; 336 337 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 338 cmd.sta_id = mvmsta->sta_id; 339 cmd.add_modify = STA_MODE_MODIFY; 340 cmd.modify_mask = STA_MODIFY_QUEUES; 341 if (disable_agg_tids) 342 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 343 if (remove_queue) 344 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; 345 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 346 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 347 348 rcu_read_unlock(); 349 350 /* Notify FW of queue removal from the STA queues */ 351 status = ADD_STA_SUCCESS; 352 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 353 iwl_mvm_add_sta_cmd_size(mvm), 354 &cmd, &status); 355 356 return ret; 357 } 358 359 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, 360 int mac80211_queue, u8 tid, u8 flags) 361 { 362 struct iwl_scd_txq_cfg_cmd cmd = { 363 .scd_queue = queue, 364 .action = SCD_CFG_DISABLE_QUEUE, 365 }; 366 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; 367 int ret; 368 369 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) 370 return -EINVAL; 371 372 if (iwl_mvm_has_new_tx_api(mvm)) { 373 if (remove_mac_queue) 374 mvm->hw_queue_to_mac80211[queue] &= 375 ~BIT(mac80211_queue); 376 377 iwl_trans_txq_free(mvm->trans, queue); 378 379 return 0; 380 } 381 382 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) 383 return 0; 384 385 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 386 387 /* 388 * If there is another TID with the same AC - don't remove the MAC queue 389 * from the mapping 390 */ 391 if (tid < IWL_MAX_TID_COUNT) { 392 unsigned long tid_bitmap = 393 mvm->queue_info[queue].tid_bitmap; 394 int ac = tid_to_mac80211_ac[tid]; 395 int i; 396 397 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { 398 if (tid_to_mac80211_ac[i] == ac) 399 remove_mac_queue = false; 400 } 401 } 402 403 if (remove_mac_queue) 404 mvm->hw_queue_to_mac80211[queue] &= 405 ~BIT(mac80211_queue); 406 407 cmd.action = mvm->queue_info[queue].tid_bitmap ? 408 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; 409 if (cmd.action == SCD_CFG_DISABLE_QUEUE) 410 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; 411 412 IWL_DEBUG_TX_QUEUES(mvm, 413 "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", 414 queue, 415 mvm->queue_info[queue].tid_bitmap, 416 mvm->hw_queue_to_mac80211[queue]); 417 418 /* If the queue is still enabled - nothing left to do in this func */ 419 if (cmd.action == SCD_CFG_ENABLE_QUEUE) 420 return 0; 421 422 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 423 cmd.tid = mvm->queue_info[queue].txq_tid; 424 425 /* Make sure queue info is correct even though we overwrite it */ 426 WARN(mvm->queue_info[queue].tid_bitmap || 427 mvm->hw_queue_to_mac80211[queue], 428 "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n", 429 queue, mvm->hw_queue_to_mac80211[queue], 430 mvm->queue_info[queue].tid_bitmap); 431 432 /* If we are here - the queue is freed and we can zero out these vals */ 433 mvm->queue_info[queue].tid_bitmap = 0; 434 mvm->hw_queue_to_mac80211[queue] = 0; 435 436 /* Regardless if this is a reserved TXQ for a STA - mark it as false */ 437 mvm->queue_info[queue].reserved = false; 438 439 iwl_trans_txq_disable(mvm->trans, queue, false); 440 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, 441 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); 442 443 if (ret) 444 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 445 queue, ret); 446 return ret; 447 } 448 449 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 450 { 451 struct ieee80211_sta *sta; 452 struct iwl_mvm_sta *mvmsta; 453 unsigned long tid_bitmap; 454 unsigned long agg_tids = 0; 455 u8 sta_id; 456 int tid; 457 458 lockdep_assert_held(&mvm->mutex); 459 460 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 461 return -EINVAL; 462 463 sta_id = mvm->queue_info[queue].ra_sta_id; 464 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 465 466 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 467 lockdep_is_held(&mvm->mutex)); 468 469 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 470 return -EINVAL; 471 472 mvmsta = iwl_mvm_sta_from_mac80211(sta); 473 474 spin_lock_bh(&mvmsta->lock); 475 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 476 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 477 agg_tids |= BIT(tid); 478 } 479 spin_unlock_bh(&mvmsta->lock); 480 481 return agg_tids; 482 } 483 484 /* 485 * Remove a queue from a station's resources. 486 * Note that this only marks as free. It DOESN'T delete a BA agreement, and 487 * doesn't disable the queue 488 */ 489 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) 490 { 491 struct ieee80211_sta *sta; 492 struct iwl_mvm_sta *mvmsta; 493 unsigned long tid_bitmap; 494 unsigned long disable_agg_tids = 0; 495 u8 sta_id; 496 int tid; 497 498 lockdep_assert_held(&mvm->mutex); 499 500 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 501 return -EINVAL; 502 503 sta_id = mvm->queue_info[queue].ra_sta_id; 504 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 505 506 rcu_read_lock(); 507 508 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 509 510 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 511 rcu_read_unlock(); 512 return 0; 513 } 514 515 mvmsta = iwl_mvm_sta_from_mac80211(sta); 516 517 spin_lock_bh(&mvmsta->lock); 518 /* Unmap MAC queues and TIDs from this queue */ 519 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 520 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 521 disable_agg_tids |= BIT(tid); 522 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 523 } 524 525 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ 526 spin_unlock_bh(&mvmsta->lock); 527 528 rcu_read_unlock(); 529 530 /* 531 * The TX path may have been using this TXQ_ID from the tid_data, 532 * so make sure it's no longer running so that we can safely reuse 533 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE 534 * above, but nothing guarantees we've stopped using them. Thus, 535 * without this, we could get to iwl_mvm_disable_txq() and remove 536 * the queue while still sending frames to it. 537 */ 538 synchronize_net(); 539 540 return disable_agg_tids; 541 } 542 543 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, 544 u8 new_sta_id) 545 { 546 struct iwl_mvm_sta *mvmsta; 547 u8 txq_curr_ac, sta_id, tid; 548 unsigned long disable_agg_tids = 0; 549 bool same_sta; 550 int ret; 551 552 lockdep_assert_held(&mvm->mutex); 553 554 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 555 return -EINVAL; 556 557 txq_curr_ac = mvm->queue_info[queue].mac80211_ac; 558 sta_id = mvm->queue_info[queue].ra_sta_id; 559 tid = mvm->queue_info[queue].txq_tid; 560 561 same_sta = sta_id == new_sta_id; 562 563 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); 564 if (WARN_ON(!mvmsta)) 565 return -EINVAL; 566 567 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); 568 /* Disable the queue */ 569 if (disable_agg_tids) 570 iwl_mvm_invalidate_sta_queue(mvm, queue, 571 disable_agg_tids, false); 572 573 ret = iwl_mvm_disable_txq(mvm, queue, 574 mvmsta->vif->hw_queue[txq_curr_ac], 575 tid, 0); 576 if (ret) { 577 IWL_ERR(mvm, 578 "Failed to free inactive queue %d (ret=%d)\n", 579 queue, ret); 580 581 return ret; 582 } 583 584 /* If TXQ is allocated to another STA, update removal in FW */ 585 if (!same_sta) 586 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); 587 588 return 0; 589 } 590 591 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, 592 unsigned long tfd_queue_mask, u8 ac) 593 { 594 int queue = 0; 595 u8 ac_to_queue[IEEE80211_NUM_ACS]; 596 int i; 597 598 /* 599 * This protects us against grabbing a queue that's being reconfigured 600 * by the inactivity checker. 601 */ 602 lockdep_assert_held(&mvm->mutex); 603 604 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 605 return -EINVAL; 606 607 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); 608 609 /* See what ACs the existing queues for this STA have */ 610 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { 611 /* Only DATA queues can be shared */ 612 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && 613 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 614 continue; 615 616 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 617 } 618 619 /* 620 * The queue to share is chosen only from DATA queues as follows (in 621 * descending priority): 622 * 1. An AC_BE queue 623 * 2. Same AC queue 624 * 3. Highest AC queue that is lower than new AC 625 * 4. Any existing AC (there always is at least 1 DATA queue) 626 */ 627 628 /* Priority 1: An AC_BE queue */ 629 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) 630 queue = ac_to_queue[IEEE80211_AC_BE]; 631 /* Priority 2: Same AC queue */ 632 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) 633 queue = ac_to_queue[ac]; 634 /* Priority 3a: If new AC is VO and VI exists - use VI */ 635 else if (ac == IEEE80211_AC_VO && 636 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 637 queue = ac_to_queue[IEEE80211_AC_VI]; 638 /* Priority 3b: No BE so only AC less than the new one is BK */ 639 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) 640 queue = ac_to_queue[IEEE80211_AC_BK]; 641 /* Priority 4a: No BE nor BK - use VI if exists */ 642 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 643 queue = ac_to_queue[IEEE80211_AC_VI]; 644 /* Priority 4b: No BE, BK nor VI - use VO if exists */ 645 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) 646 queue = ac_to_queue[IEEE80211_AC_VO]; 647 648 /* Make sure queue found (or not) is legal */ 649 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && 650 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && 651 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { 652 IWL_ERR(mvm, "No DATA queues available to share\n"); 653 return -ENOSPC; 654 } 655 656 return queue; 657 } 658 659 /* 660 * If a given queue has a higher AC than the TID stream that is being compared 661 * to, the queue needs to be redirected to the lower AC. This function does that 662 * in such a case, otherwise - if no redirection required - it does nothing, 663 * unless the %force param is true. 664 */ 665 static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 666 int ac, int ssn, unsigned int wdg_timeout, 667 bool force) 668 { 669 struct iwl_scd_txq_cfg_cmd cmd = { 670 .scd_queue = queue, 671 .action = SCD_CFG_DISABLE_QUEUE, 672 }; 673 bool shared_queue; 674 unsigned long mq; 675 int ret; 676 677 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 678 return -EINVAL; 679 680 /* 681 * If the AC is lower than current one - FIFO needs to be redirected to 682 * the lowest one of the streams in the queue. Check if this is needed 683 * here. 684 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with 685 * value 3 and VO with value 0, so to check if ac X is lower than ac Y 686 * we need to check if the numerical value of X is LARGER than of Y. 687 */ 688 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { 689 IWL_DEBUG_TX_QUEUES(mvm, 690 "No redirection needed on TXQ #%d\n", 691 queue); 692 return 0; 693 } 694 695 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 696 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 697 cmd.tid = mvm->queue_info[queue].txq_tid; 698 mq = mvm->hw_queue_to_mac80211[queue]; 699 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; 700 701 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", 702 queue, iwl_mvm_ac_to_tx_fifo[ac]); 703 704 /* Stop MAC queues and wait for this queue to empty */ 705 iwl_mvm_stop_mac_queues(mvm, mq); 706 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); 707 if (ret) { 708 IWL_ERR(mvm, "Error draining queue %d before reconfig\n", 709 queue); 710 ret = -EIO; 711 goto out; 712 } 713 714 /* Before redirecting the queue we need to de-activate it */ 715 iwl_trans_txq_disable(mvm->trans, queue, false); 716 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 717 if (ret) 718 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, 719 ret); 720 721 /* Make sure the SCD wrptr is correctly set before reconfiguring */ 722 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); 723 724 /* Update the TID "owner" of the queue */ 725 mvm->queue_info[queue].txq_tid = tid; 726 727 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ 728 729 /* Redirect to lower AC */ 730 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], 731 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); 732 733 /* Update AC marking of the queue */ 734 mvm->queue_info[queue].mac80211_ac = ac; 735 736 /* 737 * Mark queue as shared in transport if shared 738 * Note this has to be done after queue enablement because enablement 739 * can also set this value, and there is no indication there to shared 740 * queues 741 */ 742 if (shared_queue) 743 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 744 745 out: 746 /* Continue using the MAC queues */ 747 iwl_mvm_start_mac_queues(mvm, mq); 748 749 return ret; 750 } 751 752 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, 753 u8 minq, u8 maxq) 754 { 755 int i; 756 757 lockdep_assert_held(&mvm->mutex); 758 759 /* This should not be hit with new TX path */ 760 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 761 return -ENOSPC; 762 763 /* Start by looking for a free queue */ 764 for (i = minq; i <= maxq; i++) 765 if (mvm->queue_info[i].tid_bitmap == 0 && 766 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) 767 return i; 768 769 return -ENOSPC; 770 } 771 772 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, 773 u8 sta_id, u8 tid, unsigned int timeout) 774 { 775 int queue, size = IWL_DEFAULT_QUEUE_SIZE; 776 777 if (tid == IWL_MAX_TID_COUNT) { 778 tid = IWL_MGMT_TID; 779 size = IWL_MGMT_QUEUE_SIZE; 780 } 781 queue = iwl_trans_txq_alloc(mvm->trans, 782 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), 783 sta_id, tid, SCD_QUEUE_CFG, size, timeout); 784 785 if (queue < 0) { 786 IWL_DEBUG_TX_QUEUES(mvm, 787 "Failed allocating TXQ for sta %d tid %d, ret: %d\n", 788 sta_id, tid, queue); 789 return queue; 790 } 791 792 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", 793 queue, sta_id, tid); 794 795 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); 796 IWL_DEBUG_TX_QUEUES(mvm, 797 "Enabling TXQ #%d (mac80211 map:0x%x)\n", 798 queue, mvm->hw_queue_to_mac80211[queue]); 799 800 return queue; 801 } 802 803 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, 804 struct ieee80211_sta *sta, u8 ac, 805 int tid) 806 { 807 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 808 unsigned int wdg_timeout = 809 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 810 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 811 int queue = -1; 812 813 lockdep_assert_held(&mvm->mutex); 814 815 IWL_DEBUG_TX_QUEUES(mvm, 816 "Allocating queue for sta %d on tid %d\n", 817 mvmsta->sta_id, tid); 818 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid, 819 wdg_timeout); 820 if (queue < 0) 821 return queue; 822 823 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); 824 825 spin_lock_bh(&mvmsta->lock); 826 mvmsta->tid_data[tid].txq_id = queue; 827 spin_unlock_bh(&mvmsta->lock); 828 829 return 0; 830 } 831 832 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, 833 int mac80211_queue, u8 sta_id, u8 tid) 834 { 835 bool enable_queue = true; 836 837 /* Make sure this TID isn't already enabled */ 838 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { 839 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", 840 queue, tid); 841 return false; 842 } 843 844 /* Update mappings and refcounts */ 845 if (mvm->queue_info[queue].tid_bitmap) 846 enable_queue = false; 847 848 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { 849 WARN(mac80211_queue >= 850 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), 851 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", 852 mac80211_queue, queue, sta_id, tid); 853 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); 854 } 855 856 mvm->queue_info[queue].tid_bitmap |= BIT(tid); 857 mvm->queue_info[queue].ra_sta_id = sta_id; 858 859 if (enable_queue) { 860 if (tid != IWL_MAX_TID_COUNT) 861 mvm->queue_info[queue].mac80211_ac = 862 tid_to_mac80211_ac[tid]; 863 else 864 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; 865 866 mvm->queue_info[queue].txq_tid = tid; 867 } 868 869 IWL_DEBUG_TX_QUEUES(mvm, 870 "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", 871 queue, mvm->queue_info[queue].tid_bitmap, 872 mvm->hw_queue_to_mac80211[queue]); 873 874 return enable_queue; 875 } 876 877 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, 878 int mac80211_queue, u16 ssn, 879 const struct iwl_trans_txq_scd_cfg *cfg, 880 unsigned int wdg_timeout) 881 { 882 struct iwl_scd_txq_cfg_cmd cmd = { 883 .scd_queue = queue, 884 .action = SCD_CFG_ENABLE_QUEUE, 885 .window = cfg->frame_limit, 886 .sta_id = cfg->sta_id, 887 .ssn = cpu_to_le16(ssn), 888 .tx_fifo = cfg->fifo, 889 .aggregate = cfg->aggregate, 890 .tid = cfg->tid, 891 }; 892 bool inc_ssn; 893 894 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 895 return false; 896 897 /* Send the enabling command if we need to */ 898 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, 899 cfg->sta_id, cfg->tid)) 900 return false; 901 902 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, 903 NULL, wdg_timeout); 904 if (inc_ssn) 905 le16_add_cpu(&cmd.ssn, 1); 906 907 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), 908 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); 909 910 return inc_ssn; 911 } 912 913 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) 914 { 915 struct iwl_scd_txq_cfg_cmd cmd = { 916 .scd_queue = queue, 917 .action = SCD_CFG_UPDATE_QUEUE_TID, 918 }; 919 int tid; 920 unsigned long tid_bitmap; 921 int ret; 922 923 lockdep_assert_held(&mvm->mutex); 924 925 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 926 return; 927 928 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 929 930 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) 931 return; 932 933 /* Find any TID for queue */ 934 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 935 cmd.tid = tid; 936 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 937 938 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 939 if (ret) { 940 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", 941 queue, ret); 942 return; 943 } 944 945 mvm->queue_info[queue].txq_tid = tid; 946 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", 947 queue, tid); 948 } 949 950 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) 951 { 952 struct ieee80211_sta *sta; 953 struct iwl_mvm_sta *mvmsta; 954 u8 sta_id; 955 int tid = -1; 956 unsigned long tid_bitmap; 957 unsigned int wdg_timeout; 958 int ssn; 959 int ret = true; 960 961 /* queue sharing is disabled on new TX path */ 962 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 963 return; 964 965 lockdep_assert_held(&mvm->mutex); 966 967 sta_id = mvm->queue_info[queue].ra_sta_id; 968 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 969 970 /* Find TID for queue, and make sure it is the only one on the queue */ 971 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 972 if (tid_bitmap != BIT(tid)) { 973 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", 974 queue, tid_bitmap); 975 return; 976 } 977 978 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, 979 tid); 980 981 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 982 lockdep_is_held(&mvm->mutex)); 983 984 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 985 return; 986 987 mvmsta = iwl_mvm_sta_from_mac80211(sta); 988 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 989 990 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 991 992 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, 993 tid_to_mac80211_ac[tid], ssn, 994 wdg_timeout, true); 995 if (ret) { 996 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); 997 return; 998 } 999 1000 /* If aggs should be turned back on - do it */ 1001 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { 1002 struct iwl_mvm_add_sta_cmd cmd = {0}; 1003 1004 mvmsta->tid_disable_agg &= ~BIT(tid); 1005 1006 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1007 cmd.sta_id = mvmsta->sta_id; 1008 cmd.add_modify = STA_MODE_MODIFY; 1009 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; 1010 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 1011 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 1012 1013 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 1014 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 1015 if (!ret) { 1016 IWL_DEBUG_TX_QUEUES(mvm, 1017 "TXQ #%d is now aggregated again\n", 1018 queue); 1019 1020 /* Mark queue intenally as aggregating again */ 1021 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); 1022 } 1023 } 1024 1025 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1026 } 1027 1028 /* 1029 * Remove inactive TIDs of a given queue. 1030 * If all queue TIDs are inactive - mark the queue as inactive 1031 * If only some the queue TIDs are inactive - unmap them from the queue 1032 * 1033 * Returns %true if all TIDs were removed and the queue could be reused. 1034 */ 1035 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, 1036 struct iwl_mvm_sta *mvmsta, int queue, 1037 unsigned long tid_bitmap, 1038 unsigned long *unshare_queues, 1039 unsigned long *changetid_queues) 1040 { 1041 int tid; 1042 1043 lockdep_assert_held(&mvmsta->lock); 1044 lockdep_assert_held(&mvm->mutex); 1045 1046 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1047 return false; 1048 1049 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ 1050 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1051 /* If some TFDs are still queued - don't mark TID as inactive */ 1052 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) 1053 tid_bitmap &= ~BIT(tid); 1054 1055 /* Don't mark as inactive any TID that has an active BA */ 1056 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) 1057 tid_bitmap &= ~BIT(tid); 1058 } 1059 1060 /* If all TIDs in the queue are inactive - return it can be reused */ 1061 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { 1062 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); 1063 return true; 1064 } 1065 1066 /* 1067 * If we are here, this is a shared queue and not all TIDs timed-out. 1068 * Remove the ones that did. 1069 */ 1070 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1071 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; 1072 u16 tid_bitmap; 1073 1074 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 1075 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); 1076 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 1077 1078 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1079 1080 /* 1081 * We need to take into account a situation in which a TXQ was 1082 * allocated to TID x, and then turned shared by adding TIDs y 1083 * and z. If TID x becomes inactive and is removed from the TXQ, 1084 * ownership must be given to one of the remaining TIDs. 1085 * This is mainly because if TID x continues - a new queue can't 1086 * be allocated for it as long as it is an owner of another TXQ. 1087 * 1088 * Mark this queue in the right bitmap, we'll send the command 1089 * to the firmware later. 1090 */ 1091 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) 1092 set_bit(queue, changetid_queues); 1093 1094 IWL_DEBUG_TX_QUEUES(mvm, 1095 "Removing inactive TID %d from shared Q:%d\n", 1096 tid, queue); 1097 } 1098 1099 IWL_DEBUG_TX_QUEUES(mvm, 1100 "TXQ #%d left with tid bitmap 0x%x\n", queue, 1101 mvm->queue_info[queue].tid_bitmap); 1102 1103 /* 1104 * There may be different TIDs with the same mac queues, so make 1105 * sure all TIDs have existing corresponding mac queues enabled 1106 */ 1107 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1108 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1109 mvm->hw_queue_to_mac80211[queue] |= 1110 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); 1111 } 1112 1113 /* If the queue is marked as shared - "unshare" it */ 1114 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && 1115 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { 1116 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", 1117 queue); 1118 set_bit(queue, unshare_queues); 1119 } 1120 1121 return false; 1122 } 1123 1124 /* 1125 * Check for inactivity - this includes checking if any queue 1126 * can be unshared and finding one (and only one) that can be 1127 * reused. 1128 * This function is also invoked as a sort of clean-up task, 1129 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. 1130 * 1131 * Returns the queue number, or -ENOSPC. 1132 */ 1133 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) 1134 { 1135 unsigned long now = jiffies; 1136 unsigned long unshare_queues = 0; 1137 unsigned long changetid_queues = 0; 1138 int i, ret, free_queue = -ENOSPC; 1139 1140 lockdep_assert_held(&mvm->mutex); 1141 1142 if (iwl_mvm_has_new_tx_api(mvm)) 1143 return -ENOSPC; 1144 1145 rcu_read_lock(); 1146 1147 /* we skip the CMD queue below by starting at 1 */ 1148 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); 1149 1150 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { 1151 struct ieee80211_sta *sta; 1152 struct iwl_mvm_sta *mvmsta; 1153 u8 sta_id; 1154 int tid; 1155 unsigned long inactive_tid_bitmap = 0; 1156 unsigned long queue_tid_bitmap; 1157 1158 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; 1159 if (!queue_tid_bitmap) 1160 continue; 1161 1162 /* If TXQ isn't in active use anyway - nothing to do here... */ 1163 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && 1164 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) 1165 continue; 1166 1167 /* Check to see if there are inactive TIDs on this queue */ 1168 for_each_set_bit(tid, &queue_tid_bitmap, 1169 IWL_MAX_TID_COUNT + 1) { 1170 if (time_after(mvm->queue_info[i].last_frame_time[tid] + 1171 IWL_MVM_DQA_QUEUE_TIMEOUT, now)) 1172 continue; 1173 1174 inactive_tid_bitmap |= BIT(tid); 1175 } 1176 1177 /* If all TIDs are active - finish check on this queue */ 1178 if (!inactive_tid_bitmap) 1179 continue; 1180 1181 /* 1182 * If we are here - the queue hadn't been served recently and is 1183 * in use 1184 */ 1185 1186 sta_id = mvm->queue_info[i].ra_sta_id; 1187 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1188 1189 /* 1190 * If the STA doesn't exist anymore, it isn't an error. It could 1191 * be that it was removed since getting the queues, and in this 1192 * case it should've inactivated its queues anyway. 1193 */ 1194 if (IS_ERR_OR_NULL(sta)) 1195 continue; 1196 1197 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1198 1199 spin_lock_bh(&mvmsta->lock); 1200 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, 1201 inactive_tid_bitmap, 1202 &unshare_queues, 1203 &changetid_queues); 1204 if (ret >= 0 && free_queue < 0) 1205 free_queue = ret; 1206 /* only unlock sta lock - we still need the queue info lock */ 1207 spin_unlock_bh(&mvmsta->lock); 1208 } 1209 1210 rcu_read_unlock(); 1211 1212 /* Reconfigure queues requiring reconfiguation */ 1213 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) 1214 iwl_mvm_unshare_queue(mvm, i); 1215 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) 1216 iwl_mvm_change_queue_tid(mvm, i); 1217 1218 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { 1219 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, 1220 alloc_for_sta); 1221 if (ret) 1222 return ret; 1223 } 1224 1225 return free_queue; 1226 } 1227 1228 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 1229 struct ieee80211_sta *sta, u8 ac, int tid, 1230 struct ieee80211_hdr *hdr) 1231 { 1232 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1233 struct iwl_trans_txq_scd_cfg cfg = { 1234 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 1235 .sta_id = mvmsta->sta_id, 1236 .tid = tid, 1237 .frame_limit = IWL_FRAME_LIMIT, 1238 }; 1239 unsigned int wdg_timeout = 1240 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1241 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 1242 int queue = -1; 1243 unsigned long disable_agg_tids = 0; 1244 enum iwl_mvm_agg_state queue_state; 1245 bool shared_queue = false, inc_ssn; 1246 int ssn; 1247 unsigned long tfd_queue_mask; 1248 int ret; 1249 1250 lockdep_assert_held(&mvm->mutex); 1251 1252 if (iwl_mvm_has_new_tx_api(mvm)) 1253 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 1254 1255 spin_lock_bh(&mvmsta->lock); 1256 tfd_queue_mask = mvmsta->tfd_queue_msk; 1257 spin_unlock_bh(&mvmsta->lock); 1258 1259 /* 1260 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one 1261 * exists 1262 */ 1263 if (!ieee80211_is_data_qos(hdr->frame_control) || 1264 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1265 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1266 IWL_MVM_DQA_MIN_MGMT_QUEUE, 1267 IWL_MVM_DQA_MAX_MGMT_QUEUE); 1268 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 1269 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 1270 queue); 1271 1272 /* If no such queue is found, we'll use a DATA queue instead */ 1273 } 1274 1275 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 1276 (mvm->queue_info[mvmsta->reserved_queue].status == 1277 IWL_MVM_QUEUE_RESERVED)) { 1278 queue = mvmsta->reserved_queue; 1279 mvm->queue_info[queue].reserved = true; 1280 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 1281 } 1282 1283 if (queue < 0) 1284 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1285 IWL_MVM_DQA_MIN_DATA_QUEUE, 1286 IWL_MVM_DQA_MAX_DATA_QUEUE); 1287 if (queue < 0) { 1288 /* try harder - perhaps kill an inactive queue */ 1289 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); 1290 } 1291 1292 /* No free queue - we'll have to share */ 1293 if (queue <= 0) { 1294 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 1295 if (queue > 0) { 1296 shared_queue = true; 1297 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 1298 } 1299 } 1300 1301 /* 1302 * Mark TXQ as ready, even though it hasn't been fully configured yet, 1303 * to make sure no one else takes it. 1304 * This will allow avoiding re-acquiring the lock at the end of the 1305 * configuration. On error we'll mark it back as free. 1306 */ 1307 if (queue > 0 && !shared_queue) 1308 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1309 1310 /* This shouldn't happen - out of queues */ 1311 if (WARN_ON(queue <= 0)) { 1312 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 1313 tid, cfg.sta_id); 1314 return queue; 1315 } 1316 1317 /* 1318 * Actual en/disablement of aggregations is through the ADD_STA HCMD, 1319 * but for configuring the SCD to send A-MPDUs we need to mark the queue 1320 * as aggregatable. 1321 * Mark all DATA queues as allowing to be aggregated at some point 1322 */ 1323 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1324 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1325 1326 IWL_DEBUG_TX_QUEUES(mvm, 1327 "Allocating %squeue #%d to sta %d on tid %d\n", 1328 shared_queue ? "shared " : "", queue, 1329 mvmsta->sta_id, tid); 1330 1331 if (shared_queue) { 1332 /* Disable any open aggs on this queue */ 1333 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 1334 1335 if (disable_agg_tids) { 1336 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 1337 queue); 1338 iwl_mvm_invalidate_sta_queue(mvm, queue, 1339 disable_agg_tids, false); 1340 } 1341 } 1342 1343 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1344 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue, 1345 ssn, &cfg, wdg_timeout); 1346 if (inc_ssn) { 1347 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; 1348 le16_add_cpu(&hdr->seq_ctrl, 0x10); 1349 } 1350 1351 /* 1352 * Mark queue as shared in transport if shared 1353 * Note this has to be done after queue enablement because enablement 1354 * can also set this value, and there is no indication there to shared 1355 * queues 1356 */ 1357 if (shared_queue) 1358 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 1359 1360 spin_lock_bh(&mvmsta->lock); 1361 /* 1362 * This looks racy, but it is not. We have only one packet for 1363 * this ra/tid in our Tx path since we stop the Qdisc when we 1364 * need to allocate a new TFD queue. 1365 */ 1366 if (inc_ssn) 1367 mvmsta->tid_data[tid].seq_number += 0x10; 1368 mvmsta->tid_data[tid].txq_id = queue; 1369 mvmsta->tfd_queue_msk |= BIT(queue); 1370 queue_state = mvmsta->tid_data[tid].state; 1371 1372 if (mvmsta->reserved_queue == queue) 1373 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 1374 spin_unlock_bh(&mvmsta->lock); 1375 1376 if (!shared_queue) { 1377 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 1378 if (ret) 1379 goto out_err; 1380 1381 /* If we need to re-enable aggregations... */ 1382 if (queue_state == IWL_AGG_ON) { 1383 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 1384 if (ret) 1385 goto out_err; 1386 } 1387 } else { 1388 /* Redirect queue, if needed */ 1389 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, 1390 wdg_timeout, false); 1391 if (ret) 1392 goto out_err; 1393 } 1394 1395 return 0; 1396 1397 out_err: 1398 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); 1399 1400 return ret; 1401 } 1402 1403 static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 1404 { 1405 if (tid == IWL_MAX_TID_COUNT) 1406 return IEEE80211_AC_VO; /* MGMT */ 1407 1408 return tid_to_mac80211_ac[tid]; 1409 } 1410 1411 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, 1412 struct ieee80211_sta *sta, int tid) 1413 { 1414 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1415 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 1416 struct sk_buff *skb; 1417 struct ieee80211_hdr *hdr; 1418 struct sk_buff_head deferred_tx; 1419 u8 mac_queue; 1420 bool no_queue = false; /* Marks if there is a problem with the queue */ 1421 u8 ac; 1422 1423 lockdep_assert_held(&mvm->mutex); 1424 1425 skb = skb_peek(&tid_data->deferred_tx_frames); 1426 if (!skb) 1427 return; 1428 hdr = (void *)skb->data; 1429 1430 ac = iwl_mvm_tid_to_ac_queue(tid); 1431 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; 1432 1433 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE && 1434 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { 1435 IWL_ERR(mvm, 1436 "Can't alloc TXQ for sta %d tid %d - dropping frame\n", 1437 mvmsta->sta_id, tid); 1438 1439 /* 1440 * Mark queue as problematic so later the deferred traffic is 1441 * freed, as we can do nothing with it 1442 */ 1443 no_queue = true; 1444 } 1445 1446 __skb_queue_head_init(&deferred_tx); 1447 1448 /* Disable bottom-halves when entering TX path */ 1449 local_bh_disable(); 1450 spin_lock(&mvmsta->lock); 1451 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); 1452 mvmsta->deferred_traffic_tid_map &= ~BIT(tid); 1453 spin_unlock(&mvmsta->lock); 1454 1455 while ((skb = __skb_dequeue(&deferred_tx))) 1456 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) 1457 ieee80211_free_txskb(mvm->hw, skb); 1458 local_bh_enable(); 1459 1460 /* Wake queue */ 1461 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); 1462 } 1463 1464 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) 1465 { 1466 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, 1467 add_stream_wk); 1468 struct ieee80211_sta *sta; 1469 struct iwl_mvm_sta *mvmsta; 1470 unsigned long deferred_tid_traffic; 1471 int sta_id, tid; 1472 1473 mutex_lock(&mvm->mutex); 1474 1475 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1476 1477 /* Go over all stations with deferred traffic */ 1478 for_each_set_bit(sta_id, mvm->sta_deferred_frames, 1479 IWL_MVM_STATION_COUNT) { 1480 clear_bit(sta_id, mvm->sta_deferred_frames); 1481 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1482 lockdep_is_held(&mvm->mutex)); 1483 if (IS_ERR_OR_NULL(sta)) 1484 continue; 1485 1486 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1487 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; 1488 1489 for_each_set_bit(tid, &deferred_tid_traffic, 1490 IWL_MAX_TID_COUNT + 1) 1491 iwl_mvm_tx_deferred_stream(mvm, sta, tid); 1492 } 1493 1494 mutex_unlock(&mvm->mutex); 1495 } 1496 1497 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, 1498 struct ieee80211_sta *sta, 1499 enum nl80211_iftype vif_type) 1500 { 1501 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1502 int queue; 1503 1504 /* queue reserving is disabled on new TX path */ 1505 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1506 return 0; 1507 1508 /* run the general cleanup/unsharing of queues */ 1509 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1510 1511 /* Make sure we have free resources for this STA */ 1512 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 1513 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && 1514 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 1515 IWL_MVM_QUEUE_FREE)) 1516 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 1517 else 1518 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1519 IWL_MVM_DQA_MIN_DATA_QUEUE, 1520 IWL_MVM_DQA_MAX_DATA_QUEUE); 1521 if (queue < 0) { 1522 /* try again - this time kick out a queue if needed */ 1523 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); 1524 if (queue < 0) { 1525 IWL_ERR(mvm, "No available queues for new station\n"); 1526 return -ENOSPC; 1527 } 1528 } 1529 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 1530 1531 mvmsta->reserved_queue = queue; 1532 1533 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 1534 queue, mvmsta->sta_id); 1535 1536 return 0; 1537 } 1538 1539 /* 1540 * In DQA mode, after a HW restart the queues should be allocated as before, in 1541 * order to avoid race conditions when there are shared queues. This function 1542 * does the re-mapping and queue allocation. 1543 * 1544 * Note that re-enabling aggregations isn't done in this function. 1545 */ 1546 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, 1547 struct iwl_mvm_sta *mvm_sta) 1548 { 1549 unsigned int wdg_timeout = 1550 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); 1551 int i; 1552 struct iwl_trans_txq_scd_cfg cfg = { 1553 .sta_id = mvm_sta->sta_id, 1554 .frame_limit = IWL_FRAME_LIMIT, 1555 }; 1556 1557 /* Make sure reserved queue is still marked as such (if allocated) */ 1558 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) 1559 mvm->queue_info[mvm_sta->reserved_queue].status = 1560 IWL_MVM_QUEUE_RESERVED; 1561 1562 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1563 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1564 int txq_id = tid_data->txq_id; 1565 int ac; 1566 u8 mac_queue; 1567 1568 if (txq_id == IWL_MVM_INVALID_QUEUE) 1569 continue; 1570 1571 skb_queue_head_init(&tid_data->deferred_tx_frames); 1572 1573 ac = tid_to_mac80211_ac[i]; 1574 mac_queue = mvm_sta->vif->hw_queue[ac]; 1575 1576 if (iwl_mvm_has_new_tx_api(mvm)) { 1577 IWL_DEBUG_TX_QUEUES(mvm, 1578 "Re-mapping sta %d tid %d\n", 1579 mvm_sta->sta_id, i); 1580 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, 1581 mvm_sta->sta_id, 1582 i, wdg_timeout); 1583 tid_data->txq_id = txq_id; 1584 1585 /* 1586 * Since we don't set the seq number after reset, and HW 1587 * sets it now, FW reset will cause the seq num to start 1588 * at 0 again, so driver will need to update it 1589 * internally as well, so it keeps in sync with real val 1590 */ 1591 tid_data->seq_number = 0; 1592 } else { 1593 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 1594 1595 cfg.tid = i; 1596 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); 1597 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1598 txq_id == 1599 IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1600 1601 IWL_DEBUG_TX_QUEUES(mvm, 1602 "Re-mapping sta %d tid %d to queue %d\n", 1603 mvm_sta->sta_id, i, txq_id); 1604 1605 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, 1606 wdg_timeout); 1607 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; 1608 } 1609 } 1610 } 1611 1612 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, 1613 struct iwl_mvm_int_sta *sta, 1614 const u8 *addr, 1615 u16 mac_id, u16 color) 1616 { 1617 struct iwl_mvm_add_sta_cmd cmd; 1618 int ret; 1619 u32 status = ADD_STA_SUCCESS; 1620 1621 lockdep_assert_held(&mvm->mutex); 1622 1623 memset(&cmd, 0, sizeof(cmd)); 1624 cmd.sta_id = sta->sta_id; 1625 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 1626 color)); 1627 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 1628 cmd.station_type = sta->type; 1629 1630 if (!iwl_mvm_has_new_tx_api(mvm)) 1631 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); 1632 cmd.tid_disable_tx = cpu_to_le16(0xffff); 1633 1634 if (addr) 1635 memcpy(cmd.addr, addr, ETH_ALEN); 1636 1637 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1638 iwl_mvm_add_sta_cmd_size(mvm), 1639 &cmd, &status); 1640 if (ret) 1641 return ret; 1642 1643 switch (status & IWL_ADD_STA_STATUS_MASK) { 1644 case ADD_STA_SUCCESS: 1645 IWL_DEBUG_INFO(mvm, "Internal station added.\n"); 1646 return 0; 1647 default: 1648 ret = -EIO; 1649 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", 1650 status); 1651 break; 1652 } 1653 return ret; 1654 } 1655 1656 int iwl_mvm_add_sta(struct iwl_mvm *mvm, 1657 struct ieee80211_vif *vif, 1658 struct ieee80211_sta *sta) 1659 { 1660 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1661 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1662 struct iwl_mvm_rxq_dup_data *dup_data; 1663 int i, ret, sta_id; 1664 bool sta_update = false; 1665 unsigned int sta_flags = 0; 1666 1667 lockdep_assert_held(&mvm->mutex); 1668 1669 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1670 sta_id = iwl_mvm_find_free_sta_id(mvm, 1671 ieee80211_vif_type_p2p(vif)); 1672 else 1673 sta_id = mvm_sta->sta_id; 1674 1675 if (sta_id == IWL_MVM_INVALID_STA) 1676 return -ENOSPC; 1677 1678 spin_lock_init(&mvm_sta->lock); 1679 1680 /* if this is a HW restart re-alloc existing queues */ 1681 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1682 struct iwl_mvm_int_sta tmp_sta = { 1683 .sta_id = sta_id, 1684 .type = mvm_sta->sta_type, 1685 }; 1686 1687 /* 1688 * First add an empty station since allocating 1689 * a queue requires a valid station 1690 */ 1691 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, 1692 mvmvif->id, mvmvif->color); 1693 if (ret) 1694 goto err; 1695 1696 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); 1697 sta_update = true; 1698 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; 1699 goto update_fw; 1700 } 1701 1702 mvm_sta->sta_id = sta_id; 1703 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, 1704 mvmvif->color); 1705 mvm_sta->vif = vif; 1706 if (!mvm->trans->cfg->gen2) 1707 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 1708 else 1709 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; 1710 mvm_sta->tx_protection = 0; 1711 mvm_sta->tt_tx_protection = false; 1712 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; 1713 1714 /* HW restart, don't assume the memory has been zeroed */ 1715 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ 1716 mvm_sta->tfd_queue_msk = 0; 1717 1718 /* for HW restart - reset everything but the sequence number */ 1719 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1720 u16 seq = mvm_sta->tid_data[i].seq_number; 1721 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); 1722 mvm_sta->tid_data[i].seq_number = seq; 1723 1724 /* 1725 * Mark all queues for this STA as unallocated and defer TX 1726 * frames until the queue is allocated 1727 */ 1728 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1729 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); 1730 } 1731 mvm_sta->deferred_traffic_tid_map = 0; 1732 mvm_sta->agg_tids = 0; 1733 1734 if (iwl_mvm_has_new_rx_api(mvm) && 1735 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1736 int q; 1737 1738 dup_data = kcalloc(mvm->trans->num_rx_queues, 1739 sizeof(*dup_data), GFP_KERNEL); 1740 if (!dup_data) 1741 return -ENOMEM; 1742 /* 1743 * Initialize all the last_seq values to 0xffff which can never 1744 * compare equal to the frame's seq_ctrl in the check in 1745 * iwl_mvm_is_dup() since the lower 4 bits are the fragment 1746 * number and fragmented packets don't reach that function. 1747 * 1748 * This thus allows receiving a packet with seqno 0 and the 1749 * retry bit set as the very first packet on a new TID. 1750 */ 1751 for (q = 0; q < mvm->trans->num_rx_queues; q++) 1752 memset(dup_data[q].last_seq, 0xff, 1753 sizeof(dup_data[q].last_seq)); 1754 mvm_sta->dup_data = dup_data; 1755 } 1756 1757 if (!iwl_mvm_has_new_tx_api(mvm)) { 1758 ret = iwl_mvm_reserve_sta_stream(mvm, sta, 1759 ieee80211_vif_type_p2p(vif)); 1760 if (ret) 1761 goto err; 1762 } 1763 1764 /* 1765 * if rs is registered with mac80211, then "add station" will be handled 1766 * via the corresponding ops, otherwise need to notify rate scaling here 1767 */ 1768 if (iwl_mvm_has_tlc_offload(mvm)) 1769 iwl_mvm_rs_add_sta(mvm, mvm_sta); 1770 1771 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); 1772 1773 update_fw: 1774 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); 1775 if (ret) 1776 goto err; 1777 1778 if (vif->type == NL80211_IFTYPE_STATION) { 1779 if (!sta->tdls) { 1780 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); 1781 mvmvif->ap_sta_id = sta_id; 1782 } else { 1783 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); 1784 } 1785 } 1786 1787 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); 1788 1789 return 0; 1790 1791 err: 1792 return ret; 1793 } 1794 1795 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 1796 bool drain) 1797 { 1798 struct iwl_mvm_add_sta_cmd cmd = {}; 1799 int ret; 1800 u32 status; 1801 1802 lockdep_assert_held(&mvm->mutex); 1803 1804 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1805 cmd.sta_id = mvmsta->sta_id; 1806 cmd.add_modify = STA_MODE_MODIFY; 1807 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; 1808 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 1809 1810 status = ADD_STA_SUCCESS; 1811 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1812 iwl_mvm_add_sta_cmd_size(mvm), 1813 &cmd, &status); 1814 if (ret) 1815 return ret; 1816 1817 switch (status & IWL_ADD_STA_STATUS_MASK) { 1818 case ADD_STA_SUCCESS: 1819 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", 1820 mvmsta->sta_id); 1821 break; 1822 default: 1823 ret = -EIO; 1824 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", 1825 mvmsta->sta_id); 1826 break; 1827 } 1828 1829 return ret; 1830 } 1831 1832 /* 1833 * Remove a station from the FW table. Before sending the command to remove 1834 * the station validate that the station is indeed known to the driver (sanity 1835 * only). 1836 */ 1837 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) 1838 { 1839 struct ieee80211_sta *sta; 1840 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { 1841 .sta_id = sta_id, 1842 }; 1843 int ret; 1844 1845 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1846 lockdep_is_held(&mvm->mutex)); 1847 1848 /* Note: internal stations are marked as error values */ 1849 if (!sta) { 1850 IWL_ERR(mvm, "Invalid station id\n"); 1851 return -EINVAL; 1852 } 1853 1854 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, 1855 sizeof(rm_sta_cmd), &rm_sta_cmd); 1856 if (ret) { 1857 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 1858 return ret; 1859 } 1860 1861 return 0; 1862 } 1863 1864 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, 1865 struct ieee80211_vif *vif, 1866 struct iwl_mvm_sta *mvm_sta) 1867 { 1868 int ac; 1869 int i; 1870 1871 lockdep_assert_held(&mvm->mutex); 1872 1873 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1874 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) 1875 continue; 1876 1877 ac = iwl_mvm_tid_to_ac_queue(i); 1878 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, 1879 vif->hw_queue[ac], i, 0); 1880 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1881 } 1882 } 1883 1884 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, 1885 struct iwl_mvm_sta *mvm_sta) 1886 { 1887 int i; 1888 1889 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1890 u16 txq_id; 1891 int ret; 1892 1893 spin_lock_bh(&mvm_sta->lock); 1894 txq_id = mvm_sta->tid_data[i].txq_id; 1895 spin_unlock_bh(&mvm_sta->lock); 1896 1897 if (txq_id == IWL_MVM_INVALID_QUEUE) 1898 continue; 1899 1900 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); 1901 if (ret) 1902 return ret; 1903 } 1904 1905 return 0; 1906 } 1907 1908 int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 1909 struct ieee80211_vif *vif, 1910 struct ieee80211_sta *sta) 1911 { 1912 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1913 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1914 u8 sta_id = mvm_sta->sta_id; 1915 int ret; 1916 1917 lockdep_assert_held(&mvm->mutex); 1918 1919 if (iwl_mvm_has_new_rx_api(mvm)) 1920 kfree(mvm_sta->dup_data); 1921 1922 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 1923 if (ret) 1924 return ret; 1925 1926 /* flush its queues here since we are freeing mvm_sta */ 1927 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); 1928 if (ret) 1929 return ret; 1930 if (iwl_mvm_has_new_tx_api(mvm)) { 1931 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); 1932 } else { 1933 u32 q_mask = mvm_sta->tfd_queue_msk; 1934 1935 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 1936 q_mask); 1937 } 1938 if (ret) 1939 return ret; 1940 1941 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); 1942 1943 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); 1944 1945 /* If there is a TXQ still marked as reserved - free it */ 1946 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { 1947 u8 reserved_txq = mvm_sta->reserved_queue; 1948 enum iwl_mvm_queue_status *status; 1949 1950 /* 1951 * If no traffic has gone through the reserved TXQ - it 1952 * is still marked as IWL_MVM_QUEUE_RESERVED, and 1953 * should be manually marked as free again 1954 */ 1955 status = &mvm->queue_info[reserved_txq].status; 1956 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && 1957 (*status != IWL_MVM_QUEUE_FREE), 1958 "sta_id %d reserved txq %d status %d", 1959 sta_id, reserved_txq, *status)) 1960 return -EINVAL; 1961 1962 *status = IWL_MVM_QUEUE_FREE; 1963 } 1964 1965 if (vif->type == NL80211_IFTYPE_STATION && 1966 mvmvif->ap_sta_id == sta_id) { 1967 /* if associated - we can't remove the AP STA now */ 1968 if (vif->bss_conf.assoc) 1969 return ret; 1970 1971 /* unassoc - go ahead - remove the AP STA now */ 1972 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1973 1974 /* clear d0i3_ap_sta_id if no longer relevant */ 1975 if (mvm->d0i3_ap_sta_id == sta_id) 1976 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; 1977 } 1978 1979 /* 1980 * This shouldn't happen - the TDLS channel switch should be canceled 1981 * before the STA is removed. 1982 */ 1983 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { 1984 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 1985 cancel_delayed_work(&mvm->tdls_cs.dwork); 1986 } 1987 1988 /* 1989 * Make sure that the tx response code sees the station as -EBUSY and 1990 * calls the drain worker. 1991 */ 1992 spin_lock_bh(&mvm_sta->lock); 1993 spin_unlock_bh(&mvm_sta->lock); 1994 1995 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); 1996 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); 1997 1998 return ret; 1999 } 2000 2001 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, 2002 struct ieee80211_vif *vif, 2003 u8 sta_id) 2004 { 2005 int ret = iwl_mvm_rm_sta_common(mvm, sta_id); 2006 2007 lockdep_assert_held(&mvm->mutex); 2008 2009 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 2010 return ret; 2011 } 2012 2013 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, 2014 struct iwl_mvm_int_sta *sta, 2015 u32 qmask, enum nl80211_iftype iftype, 2016 enum iwl_sta_type type) 2017 { 2018 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 2019 sta->sta_id == IWL_MVM_INVALID_STA) { 2020 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); 2021 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) 2022 return -ENOSPC; 2023 } 2024 2025 sta->tfd_queue_msk = qmask; 2026 sta->type = type; 2027 2028 /* put a non-NULL value so iterating over the stations won't stop */ 2029 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); 2030 return 0; 2031 } 2032 2033 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) 2034 { 2035 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 2036 memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 2037 sta->sta_id = IWL_MVM_INVALID_STA; 2038 } 2039 2040 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, 2041 u8 sta_id, u8 fifo) 2042 { 2043 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? 2044 mvm->cfg->base_params->wd_timeout : 2045 IWL_WATCHDOG_DISABLED; 2046 2047 if (iwl_mvm_has_new_tx_api(mvm)) { 2048 int tvqm_queue = 2049 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, 2050 IWL_MAX_TID_COUNT, 2051 wdg_timeout); 2052 *queue = tvqm_queue; 2053 } else { 2054 struct iwl_trans_txq_scd_cfg cfg = { 2055 .fifo = fifo, 2056 .sta_id = sta_id, 2057 .tid = IWL_MAX_TID_COUNT, 2058 .aggregate = false, 2059 .frame_limit = IWL_FRAME_LIMIT, 2060 }; 2061 2062 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout); 2063 } 2064 } 2065 2066 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) 2067 { 2068 int ret; 2069 2070 lockdep_assert_held(&mvm->mutex); 2071 2072 /* Allocate aux station and assign to it the aux queue */ 2073 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), 2074 NL80211_IFTYPE_UNSPECIFIED, 2075 IWL_STA_AUX_ACTIVITY); 2076 if (ret) 2077 return ret; 2078 2079 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 2080 if (!iwl_mvm_has_new_tx_api(mvm)) 2081 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, 2082 mvm->aux_sta.sta_id, 2083 IWL_MVM_TX_FIFO_MCAST); 2084 2085 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, 2086 MAC_INDEX_AUX, 0); 2087 if (ret) { 2088 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2089 return ret; 2090 } 2091 2092 /* 2093 * For 22000 firmware and on we cannot add queue to a station unknown 2094 * to firmware so enable queue here - after the station was added 2095 */ 2096 if (iwl_mvm_has_new_tx_api(mvm)) 2097 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, 2098 mvm->aux_sta.sta_id, 2099 IWL_MVM_TX_FIFO_MCAST); 2100 2101 return 0; 2102 } 2103 2104 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2105 { 2106 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2107 int ret; 2108 2109 lockdep_assert_held(&mvm->mutex); 2110 2111 /* Map snif queue to fifo - must happen before adding snif station */ 2112 if (!iwl_mvm_has_new_tx_api(mvm)) 2113 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, 2114 mvm->snif_sta.sta_id, 2115 IWL_MVM_TX_FIFO_BE); 2116 2117 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, 2118 mvmvif->id, 0); 2119 if (ret) 2120 return ret; 2121 2122 /* 2123 * For 22000 firmware and on we cannot add queue to a station unknown 2124 * to firmware so enable queue here - after the station was added 2125 */ 2126 if (iwl_mvm_has_new_tx_api(mvm)) 2127 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, 2128 mvm->snif_sta.sta_id, 2129 IWL_MVM_TX_FIFO_BE); 2130 2131 return 0; 2132 } 2133 2134 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2135 { 2136 int ret; 2137 2138 lockdep_assert_held(&mvm->mutex); 2139 2140 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, 2141 IWL_MAX_TID_COUNT, 0); 2142 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 2143 if (ret) 2144 IWL_WARN(mvm, "Failed sending remove station\n"); 2145 2146 return ret; 2147 } 2148 2149 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) 2150 { 2151 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); 2152 } 2153 2154 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) 2155 { 2156 lockdep_assert_held(&mvm->mutex); 2157 2158 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2159 } 2160 2161 /* 2162 * Send the add station command for the vif's broadcast station. 2163 * Assumes that the station was already allocated. 2164 * 2165 * @mvm: the mvm component 2166 * @vif: the interface to which the broadcast station is added 2167 * @bsta: the broadcast station to add. 2168 */ 2169 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2170 { 2171 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2172 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 2173 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 2174 const u8 *baddr = _baddr; 2175 int queue; 2176 int ret; 2177 unsigned int wdg_timeout = 2178 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2179 struct iwl_trans_txq_scd_cfg cfg = { 2180 .fifo = IWL_MVM_TX_FIFO_VO, 2181 .sta_id = mvmvif->bcast_sta.sta_id, 2182 .tid = IWL_MAX_TID_COUNT, 2183 .aggregate = false, 2184 .frame_limit = IWL_FRAME_LIMIT, 2185 }; 2186 2187 lockdep_assert_held(&mvm->mutex); 2188 2189 if (!iwl_mvm_has_new_tx_api(mvm)) { 2190 if (vif->type == NL80211_IFTYPE_AP || 2191 vif->type == NL80211_IFTYPE_ADHOC) 2192 queue = mvm->probe_queue; 2193 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 2194 queue = mvm->p2p_dev_queue; 2195 else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) 2196 return -EINVAL; 2197 2198 bsta->tfd_queue_msk |= BIT(queue); 2199 2200 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, 2201 &cfg, wdg_timeout); 2202 } 2203 2204 if (vif->type == NL80211_IFTYPE_ADHOC) 2205 baddr = vif->bss_conf.bssid; 2206 2207 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) 2208 return -ENOSPC; 2209 2210 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, 2211 mvmvif->id, mvmvif->color); 2212 if (ret) 2213 return ret; 2214 2215 /* 2216 * For 22000 firmware and on we cannot add queue to a station unknown 2217 * to firmware so enable queue here - after the station was added 2218 */ 2219 if (iwl_mvm_has_new_tx_api(mvm)) { 2220 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], 2221 bsta->sta_id, 2222 IWL_MAX_TID_COUNT, 2223 wdg_timeout); 2224 2225 if (vif->type == NL80211_IFTYPE_AP || 2226 vif->type == NL80211_IFTYPE_ADHOC) 2227 mvm->probe_queue = queue; 2228 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 2229 mvm->p2p_dev_queue = queue; 2230 } 2231 2232 return 0; 2233 } 2234 2235 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, 2236 struct ieee80211_vif *vif) 2237 { 2238 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2239 int queue; 2240 2241 lockdep_assert_held(&mvm->mutex); 2242 2243 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0); 2244 2245 switch (vif->type) { 2246 case NL80211_IFTYPE_AP: 2247 case NL80211_IFTYPE_ADHOC: 2248 queue = mvm->probe_queue; 2249 break; 2250 case NL80211_IFTYPE_P2P_DEVICE: 2251 queue = mvm->p2p_dev_queue; 2252 break; 2253 default: 2254 WARN(1, "Can't free bcast queue on vif type %d\n", 2255 vif->type); 2256 return; 2257 } 2258 2259 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); 2260 if (iwl_mvm_has_new_tx_api(mvm)) 2261 return; 2262 2263 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue))); 2264 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue); 2265 } 2266 2267 /* Send the FW a request to remove the station from it's internal data 2268 * structures, but DO NOT remove the entry from the local data structures. */ 2269 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2270 { 2271 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2272 int ret; 2273 2274 lockdep_assert_held(&mvm->mutex); 2275 2276 iwl_mvm_free_bcast_sta_queues(mvm, vif); 2277 2278 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); 2279 if (ret) 2280 IWL_WARN(mvm, "Failed sending remove station\n"); 2281 return ret; 2282 } 2283 2284 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2285 { 2286 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2287 2288 lockdep_assert_held(&mvm->mutex); 2289 2290 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, 2291 ieee80211_vif_type_p2p(vif), 2292 IWL_STA_GENERAL_PURPOSE); 2293 } 2294 2295 /* Allocate a new station entry for the broadcast station to the given vif, 2296 * and send it to the FW. 2297 * Note that each P2P mac should have its own broadcast station. 2298 * 2299 * @mvm: the mvm component 2300 * @vif: the interface to which the broadcast station is added 2301 * @bsta: the broadcast station to add. */ 2302 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2303 { 2304 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2305 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 2306 int ret; 2307 2308 lockdep_assert_held(&mvm->mutex); 2309 2310 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 2311 if (ret) 2312 return ret; 2313 2314 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2315 2316 if (ret) 2317 iwl_mvm_dealloc_int_sta(mvm, bsta); 2318 2319 return ret; 2320 } 2321 2322 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2323 { 2324 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2325 2326 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); 2327 } 2328 2329 /* 2330 * Send the FW a request to remove the station from it's internal data 2331 * structures, and in addition remove it from the local data structure. 2332 */ 2333 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2334 { 2335 int ret; 2336 2337 lockdep_assert_held(&mvm->mutex); 2338 2339 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); 2340 2341 iwl_mvm_dealloc_bcast_sta(mvm, vif); 2342 2343 return ret; 2344 } 2345 2346 /* 2347 * Allocate a new station entry for the multicast station to the given vif, 2348 * and send it to the FW. 2349 * Note that each AP/GO mac should have its own multicast station. 2350 * 2351 * @mvm: the mvm component 2352 * @vif: the interface to which the multicast station is added 2353 */ 2354 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2355 { 2356 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2357 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; 2358 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; 2359 const u8 *maddr = _maddr; 2360 struct iwl_trans_txq_scd_cfg cfg = { 2361 .fifo = IWL_MVM_TX_FIFO_MCAST, 2362 .sta_id = msta->sta_id, 2363 .tid = 0, 2364 .aggregate = false, 2365 .frame_limit = IWL_FRAME_LIMIT, 2366 }; 2367 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2368 int ret; 2369 2370 lockdep_assert_held(&mvm->mutex); 2371 2372 if (WARN_ON(vif->type != NL80211_IFTYPE_AP && 2373 vif->type != NL80211_IFTYPE_ADHOC)) 2374 return -ENOTSUPP; 2375 2376 /* 2377 * In IBSS, ieee80211_check_queues() sets the cab_queue to be 2378 * invalid, so make sure we use the queue we want. 2379 * Note that this is done here as we want to avoid making DQA 2380 * changes in mac80211 layer. 2381 */ 2382 if (vif->type == NL80211_IFTYPE_ADHOC) { 2383 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; 2384 mvmvif->cab_queue = vif->cab_queue; 2385 } 2386 2387 /* 2388 * While in previous FWs we had to exclude cab queue from TFD queue 2389 * mask, now it is needed as any other queue. 2390 */ 2391 if (!iwl_mvm_has_new_tx_api(mvm) && 2392 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2393 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2394 &cfg, timeout); 2395 msta->tfd_queue_msk |= BIT(vif->cab_queue); 2396 } 2397 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, 2398 mvmvif->id, mvmvif->color); 2399 if (ret) { 2400 iwl_mvm_dealloc_int_sta(mvm, msta); 2401 return ret; 2402 } 2403 2404 /* 2405 * Enable cab queue after the ADD_STA command is sent. 2406 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG 2407 * command with unknown station id, and for FW that doesn't support 2408 * station API since the cab queue is not included in the 2409 * tfd_queue_mask. 2410 */ 2411 if (iwl_mvm_has_new_tx_api(mvm)) { 2412 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, 2413 msta->sta_id, 2414 0, 2415 timeout); 2416 mvmvif->cab_queue = queue; 2417 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2418 IWL_UCODE_TLV_API_STA_TYPE)) 2419 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2420 &cfg, timeout); 2421 2422 if (mvmvif->ap_wep_key) { 2423 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); 2424 2425 if (key_offset == STA_KEY_IDX_INVALID) 2426 return -ENOSPC; 2427 2428 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, 2429 mvmvif->ap_wep_key, 1, 0, NULL, 0, 2430 key_offset, 0); 2431 if (ret) 2432 return ret; 2433 } 2434 2435 return 0; 2436 } 2437 2438 /* 2439 * Send the FW a request to remove the station from it's internal data 2440 * structures, and in addition remove it from the local data structure. 2441 */ 2442 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2443 { 2444 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2445 int ret; 2446 2447 lockdep_assert_held(&mvm->mutex); 2448 2449 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); 2450 2451 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, 2452 0, 0); 2453 2454 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); 2455 if (ret) 2456 IWL_WARN(mvm, "Failed sending remove station\n"); 2457 2458 return ret; 2459 } 2460 2461 #define IWL_MAX_RX_BA_SESSIONS 16 2462 2463 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) 2464 { 2465 struct iwl_mvm_delba_notif notif = { 2466 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, 2467 .metadata.sync = 1, 2468 .delba.baid = baid, 2469 }; 2470 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); 2471 }; 2472 2473 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, 2474 struct iwl_mvm_baid_data *data) 2475 { 2476 int i; 2477 2478 iwl_mvm_sync_rxq_del_ba(mvm, data->baid); 2479 2480 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2481 int j; 2482 struct iwl_mvm_reorder_buffer *reorder_buf = 2483 &data->reorder_buf[i]; 2484 struct iwl_mvm_reorder_buf_entry *entries = 2485 &data->entries[i * data->entries_per_queue]; 2486 2487 spin_lock_bh(&reorder_buf->lock); 2488 if (likely(!reorder_buf->num_stored)) { 2489 spin_unlock_bh(&reorder_buf->lock); 2490 continue; 2491 } 2492 2493 /* 2494 * This shouldn't happen in regular DELBA since the internal 2495 * delBA notification should trigger a release of all frames in 2496 * the reorder buffer. 2497 */ 2498 WARN_ON(1); 2499 2500 for (j = 0; j < reorder_buf->buf_size; j++) 2501 __skb_queue_purge(&entries[j].e.frames); 2502 /* 2503 * Prevent timer re-arm. This prevents a very far fetched case 2504 * where we timed out on the notification. There may be prior 2505 * RX frames pending in the RX queue before the notification 2506 * that might get processed between now and the actual deletion 2507 * and we would re-arm the timer although we are deleting the 2508 * reorder buffer. 2509 */ 2510 reorder_buf->removed = true; 2511 spin_unlock_bh(&reorder_buf->lock); 2512 del_timer_sync(&reorder_buf->reorder_timer); 2513 } 2514 } 2515 2516 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, 2517 struct iwl_mvm_baid_data *data, 2518 u16 ssn, u16 buf_size) 2519 { 2520 int i; 2521 2522 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2523 struct iwl_mvm_reorder_buffer *reorder_buf = 2524 &data->reorder_buf[i]; 2525 struct iwl_mvm_reorder_buf_entry *entries = 2526 &data->entries[i * data->entries_per_queue]; 2527 int j; 2528 2529 reorder_buf->num_stored = 0; 2530 reorder_buf->head_sn = ssn; 2531 reorder_buf->buf_size = buf_size; 2532 /* rx reorder timer */ 2533 timer_setup(&reorder_buf->reorder_timer, 2534 iwl_mvm_reorder_timer_expired, 0); 2535 spin_lock_init(&reorder_buf->lock); 2536 reorder_buf->mvm = mvm; 2537 reorder_buf->queue = i; 2538 reorder_buf->valid = false; 2539 for (j = 0; j < reorder_buf->buf_size; j++) 2540 __skb_queue_head_init(&entries[j].e.frames); 2541 } 2542 } 2543 2544 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2545 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) 2546 { 2547 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2548 struct iwl_mvm_add_sta_cmd cmd = {}; 2549 struct iwl_mvm_baid_data *baid_data = NULL; 2550 int ret; 2551 u32 status; 2552 2553 lockdep_assert_held(&mvm->mutex); 2554 2555 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { 2556 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); 2557 return -ENOSPC; 2558 } 2559 2560 if (iwl_mvm_has_new_rx_api(mvm) && start) { 2561 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); 2562 2563 /* sparse doesn't like the __align() so don't check */ 2564 #ifndef __CHECKER__ 2565 /* 2566 * The division below will be OK if either the cache line size 2567 * can be divided by the entry size (ALIGN will round up) or if 2568 * if the entry size can be divided by the cache line size, in 2569 * which case the ALIGN() will do nothing. 2570 */ 2571 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && 2572 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); 2573 #endif 2574 2575 /* 2576 * Upward align the reorder buffer size to fill an entire cache 2577 * line for each queue, to avoid sharing cache lines between 2578 * different queues. 2579 */ 2580 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); 2581 2582 /* 2583 * Allocate here so if allocation fails we can bail out early 2584 * before starting the BA session in the firmware 2585 */ 2586 baid_data = kzalloc(sizeof(*baid_data) + 2587 mvm->trans->num_rx_queues * 2588 reorder_buf_size, 2589 GFP_KERNEL); 2590 if (!baid_data) 2591 return -ENOMEM; 2592 2593 /* 2594 * This division is why we need the above BUILD_BUG_ON(), 2595 * if that doesn't hold then this will not be right. 2596 */ 2597 baid_data->entries_per_queue = 2598 reorder_buf_size / sizeof(baid_data->entries[0]); 2599 } 2600 2601 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 2602 cmd.sta_id = mvm_sta->sta_id; 2603 cmd.add_modify = STA_MODE_MODIFY; 2604 if (start) { 2605 cmd.add_immediate_ba_tid = (u8) tid; 2606 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 2607 cmd.rx_ba_window = cpu_to_le16(buf_size); 2608 } else { 2609 cmd.remove_immediate_ba_tid = (u8) tid; 2610 } 2611 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : 2612 STA_MODIFY_REMOVE_BA_TID; 2613 2614 status = ADD_STA_SUCCESS; 2615 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2616 iwl_mvm_add_sta_cmd_size(mvm), 2617 &cmd, &status); 2618 if (ret) 2619 goto out_free; 2620 2621 switch (status & IWL_ADD_STA_STATUS_MASK) { 2622 case ADD_STA_SUCCESS: 2623 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 2624 start ? "start" : "stopp"); 2625 break; 2626 case ADD_STA_IMMEDIATE_BA_FAILURE: 2627 IWL_WARN(mvm, "RX BA Session refused by fw\n"); 2628 ret = -ENOSPC; 2629 break; 2630 default: 2631 ret = -EIO; 2632 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", 2633 start ? "start" : "stopp", status); 2634 break; 2635 } 2636 2637 if (ret) 2638 goto out_free; 2639 2640 if (start) { 2641 u8 baid; 2642 2643 mvm->rx_ba_sessions++; 2644 2645 if (!iwl_mvm_has_new_rx_api(mvm)) 2646 return 0; 2647 2648 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { 2649 ret = -EINVAL; 2650 goto out_free; 2651 } 2652 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> 2653 IWL_ADD_STA_BAID_SHIFT); 2654 baid_data->baid = baid; 2655 baid_data->timeout = timeout; 2656 baid_data->last_rx = jiffies; 2657 baid_data->rcu_ptr = &mvm->baid_map[baid]; 2658 timer_setup(&baid_data->session_timer, 2659 iwl_mvm_rx_agg_session_expired, 0); 2660 baid_data->mvm = mvm; 2661 baid_data->tid = tid; 2662 baid_data->sta_id = mvm_sta->sta_id; 2663 2664 mvm_sta->tid_to_baid[tid] = baid; 2665 if (timeout) 2666 mod_timer(&baid_data->session_timer, 2667 TU_TO_EXP_TIME(timeout * 2)); 2668 2669 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); 2670 /* 2671 * protect the BA data with RCU to cover a case where our 2672 * internal RX sync mechanism will timeout (not that it's 2673 * supposed to happen) and we will free the session data while 2674 * RX is being processed in parallel 2675 */ 2676 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", 2677 mvm_sta->sta_id, tid, baid); 2678 WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); 2679 rcu_assign_pointer(mvm->baid_map[baid], baid_data); 2680 } else { 2681 u8 baid = mvm_sta->tid_to_baid[tid]; 2682 2683 if (mvm->rx_ba_sessions > 0) 2684 /* check that restart flow didn't zero the counter */ 2685 mvm->rx_ba_sessions--; 2686 if (!iwl_mvm_has_new_rx_api(mvm)) 2687 return 0; 2688 2689 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) 2690 return -EINVAL; 2691 2692 baid_data = rcu_access_pointer(mvm->baid_map[baid]); 2693 if (WARN_ON(!baid_data)) 2694 return -EINVAL; 2695 2696 /* synchronize all rx queues so we can safely delete */ 2697 iwl_mvm_free_reorder(mvm, baid_data); 2698 del_timer_sync(&baid_data->session_timer); 2699 RCU_INIT_POINTER(mvm->baid_map[baid], NULL); 2700 kfree_rcu(baid_data, rcu_head); 2701 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); 2702 } 2703 return 0; 2704 2705 out_free: 2706 kfree(baid_data); 2707 return ret; 2708 } 2709 2710 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2711 int tid, u8 queue, bool start) 2712 { 2713 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2714 struct iwl_mvm_add_sta_cmd cmd = {}; 2715 int ret; 2716 u32 status; 2717 2718 lockdep_assert_held(&mvm->mutex); 2719 2720 if (start) { 2721 mvm_sta->tfd_queue_msk |= BIT(queue); 2722 mvm_sta->tid_disable_agg &= ~BIT(tid); 2723 } else { 2724 /* In DQA-mode the queue isn't removed on agg termination */ 2725 mvm_sta->tid_disable_agg |= BIT(tid); 2726 } 2727 2728 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 2729 cmd.sta_id = mvm_sta->sta_id; 2730 cmd.add_modify = STA_MODE_MODIFY; 2731 if (!iwl_mvm_has_new_tx_api(mvm)) 2732 cmd.modify_mask = STA_MODIFY_QUEUES; 2733 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 2734 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 2735 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 2736 2737 status = ADD_STA_SUCCESS; 2738 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2739 iwl_mvm_add_sta_cmd_size(mvm), 2740 &cmd, &status); 2741 if (ret) 2742 return ret; 2743 2744 switch (status & IWL_ADD_STA_STATUS_MASK) { 2745 case ADD_STA_SUCCESS: 2746 break; 2747 default: 2748 ret = -EIO; 2749 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", 2750 start ? "start" : "stopp", status); 2751 break; 2752 } 2753 2754 return ret; 2755 } 2756 2757 const u8 tid_to_mac80211_ac[] = { 2758 IEEE80211_AC_BE, 2759 IEEE80211_AC_BK, 2760 IEEE80211_AC_BK, 2761 IEEE80211_AC_BE, 2762 IEEE80211_AC_VI, 2763 IEEE80211_AC_VI, 2764 IEEE80211_AC_VO, 2765 IEEE80211_AC_VO, 2766 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ 2767 }; 2768 2769 static const u8 tid_to_ucode_ac[] = { 2770 AC_BE, 2771 AC_BK, 2772 AC_BK, 2773 AC_BE, 2774 AC_VI, 2775 AC_VI, 2776 AC_VO, 2777 AC_VO, 2778 }; 2779 2780 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2781 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 2782 { 2783 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2784 struct iwl_mvm_tid_data *tid_data; 2785 u16 normalized_ssn; 2786 int txq_id; 2787 int ret; 2788 2789 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 2790 return -EINVAL; 2791 2792 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && 2793 mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 2794 IWL_ERR(mvm, 2795 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", 2796 mvmsta->tid_data[tid].state); 2797 return -ENXIO; 2798 } 2799 2800 lockdep_assert_held(&mvm->mutex); 2801 2802 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && 2803 iwl_mvm_has_new_tx_api(mvm)) { 2804 u8 ac = tid_to_mac80211_ac[tid]; 2805 2806 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 2807 if (ret) 2808 return ret; 2809 } 2810 2811 spin_lock_bh(&mvmsta->lock); 2812 2813 /* possible race condition - we entered D0i3 while starting agg */ 2814 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { 2815 spin_unlock_bh(&mvmsta->lock); 2816 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); 2817 return -EIO; 2818 } 2819 2820 /* 2821 * Note the possible cases: 2822 * 1. An enabled TXQ - TXQ needs to become agg'ed 2823 * 2. The TXQ hasn't yet been enabled, so find a free one and mark 2824 * it as reserved 2825 */ 2826 txq_id = mvmsta->tid_data[tid].txq_id; 2827 if (txq_id == IWL_MVM_INVALID_QUEUE) { 2828 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 2829 IWL_MVM_DQA_MIN_DATA_QUEUE, 2830 IWL_MVM_DQA_MAX_DATA_QUEUE); 2831 if (txq_id < 0) { 2832 ret = txq_id; 2833 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 2834 goto out; 2835 } 2836 2837 /* TXQ hasn't yet been enabled, so mark it only as reserved */ 2838 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; 2839 } else if (unlikely(mvm->queue_info[txq_id].status == 2840 IWL_MVM_QUEUE_SHARED)) { 2841 ret = -ENXIO; 2842 IWL_DEBUG_TX_QUEUES(mvm, 2843 "Can't start tid %d agg on shared queue!\n", 2844 tid); 2845 goto out; 2846 } 2847 2848 IWL_DEBUG_TX_QUEUES(mvm, 2849 "AGG for tid %d will be on queue #%d\n", 2850 tid, txq_id); 2851 2852 tid_data = &mvmsta->tid_data[tid]; 2853 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 2854 tid_data->txq_id = txq_id; 2855 *ssn = tid_data->ssn; 2856 2857 IWL_DEBUG_TX_QUEUES(mvm, 2858 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", 2859 mvmsta->sta_id, tid, txq_id, tid_data->ssn, 2860 tid_data->next_reclaimed); 2861 2862 /* 2863 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 2864 * to align the wrap around of ssn so we compare relevant values. 2865 */ 2866 normalized_ssn = tid_data->ssn; 2867 if (mvm->trans->cfg->gen2) 2868 normalized_ssn &= 0xff; 2869 2870 if (normalized_ssn == tid_data->next_reclaimed) { 2871 tid_data->state = IWL_AGG_STARTING; 2872 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2873 } else { 2874 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; 2875 } 2876 2877 ret = 0; 2878 2879 out: 2880 spin_unlock_bh(&mvmsta->lock); 2881 2882 return ret; 2883 } 2884 2885 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2886 struct ieee80211_sta *sta, u16 tid, u16 buf_size, 2887 bool amsdu) 2888 { 2889 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2890 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2891 unsigned int wdg_timeout = 2892 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 2893 int queue, ret; 2894 bool alloc_queue = true; 2895 enum iwl_mvm_queue_status queue_status; 2896 u16 ssn; 2897 2898 struct iwl_trans_txq_scd_cfg cfg = { 2899 .sta_id = mvmsta->sta_id, 2900 .tid = tid, 2901 .frame_limit = buf_size, 2902 .aggregate = true, 2903 }; 2904 2905 /* 2906 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation 2907 * manager, so this function should never be called in this case. 2908 */ 2909 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) 2910 return -EINVAL; 2911 2912 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) 2913 != IWL_MAX_TID_COUNT); 2914 2915 spin_lock_bh(&mvmsta->lock); 2916 ssn = tid_data->ssn; 2917 queue = tid_data->txq_id; 2918 tid_data->state = IWL_AGG_ON; 2919 mvmsta->agg_tids |= BIT(tid); 2920 tid_data->ssn = 0xffff; 2921 tid_data->amsdu_in_ampdu_allowed = amsdu; 2922 spin_unlock_bh(&mvmsta->lock); 2923 2924 if (iwl_mvm_has_new_tx_api(mvm)) { 2925 /* 2926 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() 2927 * would have failed, so if we are here there is no need to 2928 * allocate a queue. 2929 * However, if aggregation size is different than the default 2930 * size, the scheduler should be reconfigured. 2931 * We cannot do this with the new TX API, so return unsupported 2932 * for now, until it will be offloaded to firmware.. 2933 * Note that if SCD default value changes - this condition 2934 * should be updated as well. 2935 */ 2936 if (buf_size < IWL_FRAME_LIMIT) 2937 return -ENOTSUPP; 2938 2939 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2940 if (ret) 2941 return -EIO; 2942 goto out; 2943 } 2944 2945 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 2946 2947 queue_status = mvm->queue_info[queue].status; 2948 2949 /* Maybe there is no need to even alloc a queue... */ 2950 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) 2951 alloc_queue = false; 2952 2953 /* 2954 * Only reconfig the SCD for the queue if the window size has 2955 * changed from current (become smaller) 2956 */ 2957 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { 2958 /* 2959 * If reconfiguring an existing queue, it first must be 2960 * drained 2961 */ 2962 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 2963 BIT(queue)); 2964 if (ret) { 2965 IWL_ERR(mvm, 2966 "Error draining queue before reconfig\n"); 2967 return ret; 2968 } 2969 2970 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, 2971 mvmsta->sta_id, tid, 2972 buf_size, ssn); 2973 if (ret) { 2974 IWL_ERR(mvm, 2975 "Error reconfiguring TXQ #%d\n", queue); 2976 return ret; 2977 } 2978 } 2979 2980 if (alloc_queue) 2981 iwl_mvm_enable_txq(mvm, queue, 2982 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, 2983 &cfg, wdg_timeout); 2984 2985 /* Send ADD_STA command to enable aggs only if the queue isn't shared */ 2986 if (queue_status != IWL_MVM_QUEUE_SHARED) { 2987 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2988 if (ret) 2989 return -EIO; 2990 } 2991 2992 /* No need to mark as reserved */ 2993 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 2994 2995 out: 2996 /* 2997 * Even though in theory the peer could have different 2998 * aggregation reorder buffer sizes for different sessions, 2999 * our ucode doesn't allow for that and has a global limit 3000 * for each station. Therefore, use the minimum of all the 3001 * aggregation sessions and our default value. 3002 */ 3003 mvmsta->max_agg_bufsize = 3004 min(mvmsta->max_agg_bufsize, buf_size); 3005 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; 3006 3007 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", 3008 sta->addr, tid); 3009 3010 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false); 3011 } 3012 3013 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, 3014 struct iwl_mvm_sta *mvmsta, 3015 struct iwl_mvm_tid_data *tid_data) 3016 { 3017 u16 txq_id = tid_data->txq_id; 3018 3019 lockdep_assert_held(&mvm->mutex); 3020 3021 if (iwl_mvm_has_new_tx_api(mvm)) 3022 return; 3023 3024 /* 3025 * The TXQ is marked as reserved only if no traffic came through yet 3026 * This means no traffic has been sent on this TID (agg'd or not), so 3027 * we no longer have use for the queue. Since it hasn't even been 3028 * allocated through iwl_mvm_enable_txq, so we can just mark it back as 3029 * free. 3030 */ 3031 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { 3032 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 3033 tid_data->txq_id = IWL_MVM_INVALID_QUEUE; 3034 } 3035 } 3036 3037 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3038 struct ieee80211_sta *sta, u16 tid) 3039 { 3040 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3041 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3042 u16 txq_id; 3043 int err; 3044 3045 /* 3046 * If mac80211 is cleaning its state, then say that we finished since 3047 * our state has been cleared anyway. 3048 */ 3049 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 3050 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3051 return 0; 3052 } 3053 3054 spin_lock_bh(&mvmsta->lock); 3055 3056 txq_id = tid_data->txq_id; 3057 3058 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", 3059 mvmsta->sta_id, tid, txq_id, tid_data->state); 3060 3061 mvmsta->agg_tids &= ~BIT(tid); 3062 3063 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 3064 3065 switch (tid_data->state) { 3066 case IWL_AGG_ON: 3067 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 3068 3069 IWL_DEBUG_TX_QUEUES(mvm, 3070 "ssn = %d, next_recl = %d\n", 3071 tid_data->ssn, tid_data->next_reclaimed); 3072 3073 tid_data->ssn = 0xffff; 3074 tid_data->state = IWL_AGG_OFF; 3075 spin_unlock_bh(&mvmsta->lock); 3076 3077 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3078 3079 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 3080 return 0; 3081 case IWL_AGG_STARTING: 3082 case IWL_EMPTYING_HW_QUEUE_ADDBA: 3083 /* 3084 * The agg session has been stopped before it was set up. This 3085 * can happen when the AddBA timer times out for example. 3086 */ 3087 3088 /* No barriers since we are under mutex */ 3089 lockdep_assert_held(&mvm->mutex); 3090 3091 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3092 tid_data->state = IWL_AGG_OFF; 3093 err = 0; 3094 break; 3095 default: 3096 IWL_ERR(mvm, 3097 "Stopping AGG while state not ON or starting for %d on %d (%d)\n", 3098 mvmsta->sta_id, tid, tid_data->state); 3099 IWL_ERR(mvm, 3100 "\ttid_data->txq_id = %d\n", tid_data->txq_id); 3101 err = -EINVAL; 3102 } 3103 3104 spin_unlock_bh(&mvmsta->lock); 3105 3106 return err; 3107 } 3108 3109 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3110 struct ieee80211_sta *sta, u16 tid) 3111 { 3112 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3113 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3114 u16 txq_id; 3115 enum iwl_mvm_agg_state old_state; 3116 3117 /* 3118 * First set the agg state to OFF to avoid calling 3119 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. 3120 */ 3121 spin_lock_bh(&mvmsta->lock); 3122 txq_id = tid_data->txq_id; 3123 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 3124 mvmsta->sta_id, tid, txq_id, tid_data->state); 3125 old_state = tid_data->state; 3126 tid_data->state = IWL_AGG_OFF; 3127 mvmsta->agg_tids &= ~BIT(tid); 3128 spin_unlock_bh(&mvmsta->lock); 3129 3130 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 3131 3132 if (old_state >= IWL_AGG_ON) { 3133 iwl_mvm_drain_sta(mvm, mvmsta, true); 3134 3135 if (iwl_mvm_has_new_tx_api(mvm)) { 3136 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, 3137 BIT(tid), 0)) 3138 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 3139 iwl_trans_wait_txq_empty(mvm->trans, txq_id); 3140 } else { 3141 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) 3142 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 3143 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); 3144 } 3145 3146 iwl_mvm_drain_sta(mvm, mvmsta, false); 3147 3148 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 3149 } 3150 3151 return 0; 3152 } 3153 3154 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 3155 { 3156 int i, max = -1, max_offs = -1; 3157 3158 lockdep_assert_held(&mvm->mutex); 3159 3160 /* Pick the unused key offset with the highest 'deleted' 3161 * counter. Every time a key is deleted, all the counters 3162 * are incremented and the one that was just deleted is 3163 * reset to zero. Thus, the highest counter is the one 3164 * that was deleted longest ago. Pick that one. 3165 */ 3166 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3167 if (test_bit(i, mvm->fw_key_table)) 3168 continue; 3169 if (mvm->fw_key_deleted[i] > max) { 3170 max = mvm->fw_key_deleted[i]; 3171 max_offs = i; 3172 } 3173 } 3174 3175 if (max_offs < 0) 3176 return STA_KEY_IDX_INVALID; 3177 3178 return max_offs; 3179 } 3180 3181 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, 3182 struct ieee80211_vif *vif, 3183 struct ieee80211_sta *sta) 3184 { 3185 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3186 3187 if (sta) 3188 return iwl_mvm_sta_from_mac80211(sta); 3189 3190 /* 3191 * The device expects GTKs for station interfaces to be 3192 * installed as GTKs for the AP station. If we have no 3193 * station ID, then use AP's station ID. 3194 */ 3195 if (vif->type == NL80211_IFTYPE_STATION && 3196 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 3197 u8 sta_id = mvmvif->ap_sta_id; 3198 3199 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], 3200 lockdep_is_held(&mvm->mutex)); 3201 3202 /* 3203 * It is possible that the 'sta' parameter is NULL, 3204 * for example when a GTK is removed - the sta_id will then 3205 * be the AP ID, and no station was passed by mac80211. 3206 */ 3207 if (IS_ERR_OR_NULL(sta)) 3208 return NULL; 3209 3210 return iwl_mvm_sta_from_mac80211(sta); 3211 } 3212 3213 return NULL; 3214 } 3215 3216 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 3217 u32 sta_id, 3218 struct ieee80211_key_conf *key, bool mcast, 3219 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 3220 u8 key_offset, bool mfp) 3221 { 3222 union { 3223 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 3224 struct iwl_mvm_add_sta_key_cmd cmd; 3225 } u = {}; 3226 __le16 key_flags; 3227 int ret; 3228 u32 status; 3229 u16 keyidx; 3230 u64 pn = 0; 3231 int i, size; 3232 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 3233 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 3234 3235 if (sta_id == IWL_MVM_INVALID_STA) 3236 return -EINVAL; 3237 3238 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & 3239 STA_KEY_FLG_KEYID_MSK; 3240 key_flags = cpu_to_le16(keyidx); 3241 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); 3242 3243 switch (key->cipher) { 3244 case WLAN_CIPHER_SUITE_TKIP: 3245 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); 3246 if (new_api) { 3247 memcpy((void *)&u.cmd.tx_mic_key, 3248 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 3249 IWL_MIC_KEY_SIZE); 3250 3251 memcpy((void *)&u.cmd.rx_mic_key, 3252 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 3253 IWL_MIC_KEY_SIZE); 3254 pn = atomic64_read(&key->tx_pn); 3255 3256 } else { 3257 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; 3258 for (i = 0; i < 5; i++) 3259 u.cmd_v1.tkip_rx_ttak[i] = 3260 cpu_to_le16(tkip_p1k[i]); 3261 } 3262 memcpy(u.cmd.common.key, key->key, key->keylen); 3263 break; 3264 case WLAN_CIPHER_SUITE_CCMP: 3265 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); 3266 memcpy(u.cmd.common.key, key->key, key->keylen); 3267 if (new_api) 3268 pn = atomic64_read(&key->tx_pn); 3269 break; 3270 case WLAN_CIPHER_SUITE_WEP104: 3271 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); 3272 /* fall through */ 3273 case WLAN_CIPHER_SUITE_WEP40: 3274 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); 3275 memcpy(u.cmd.common.key + 3, key->key, key->keylen); 3276 break; 3277 case WLAN_CIPHER_SUITE_GCMP_256: 3278 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); 3279 /* fall through */ 3280 case WLAN_CIPHER_SUITE_GCMP: 3281 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); 3282 memcpy(u.cmd.common.key, key->key, key->keylen); 3283 if (new_api) 3284 pn = atomic64_read(&key->tx_pn); 3285 break; 3286 default: 3287 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); 3288 memcpy(u.cmd.common.key, key->key, key->keylen); 3289 } 3290 3291 if (mcast) 3292 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3293 if (mfp) 3294 key_flags |= cpu_to_le16(STA_KEY_MFP); 3295 3296 u.cmd.common.key_offset = key_offset; 3297 u.cmd.common.key_flags = key_flags; 3298 u.cmd.common.sta_id = sta_id; 3299 3300 if (new_api) { 3301 u.cmd.transmit_seq_cnt = cpu_to_le64(pn); 3302 size = sizeof(u.cmd); 3303 } else { 3304 size = sizeof(u.cmd_v1); 3305 } 3306 3307 status = ADD_STA_SUCCESS; 3308 if (cmd_flags & CMD_ASYNC) 3309 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, 3310 &u.cmd); 3311 else 3312 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, 3313 &u.cmd, &status); 3314 3315 switch (status) { 3316 case ADD_STA_SUCCESS: 3317 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); 3318 break; 3319 default: 3320 ret = -EIO; 3321 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); 3322 break; 3323 } 3324 3325 return ret; 3326 } 3327 3328 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, 3329 struct ieee80211_key_conf *keyconf, 3330 u8 sta_id, bool remove_key) 3331 { 3332 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; 3333 3334 /* verify the key details match the required command's expectations */ 3335 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || 3336 (keyconf->keyidx != 4 && keyconf->keyidx != 5) || 3337 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && 3338 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && 3339 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) 3340 return -EINVAL; 3341 3342 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && 3343 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) 3344 return -EINVAL; 3345 3346 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); 3347 igtk_cmd.sta_id = cpu_to_le32(sta_id); 3348 3349 if (remove_key) { 3350 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3351 } else { 3352 struct ieee80211_key_seq seq; 3353 const u8 *pn; 3354 3355 switch (keyconf->cipher) { 3356 case WLAN_CIPHER_SUITE_AES_CMAC: 3357 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); 3358 break; 3359 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3360 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3361 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); 3362 break; 3363 default: 3364 return -EINVAL; 3365 } 3366 3367 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); 3368 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3369 igtk_cmd.ctrl_flags |= 3370 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); 3371 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3372 pn = seq.aes_cmac.pn; 3373 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | 3374 ((u64) pn[4] << 8) | 3375 ((u64) pn[3] << 16) | 3376 ((u64) pn[2] << 24) | 3377 ((u64) pn[1] << 32) | 3378 ((u64) pn[0] << 40)); 3379 } 3380 3381 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", 3382 remove_key ? "removing" : "installing", 3383 igtk_cmd.sta_id); 3384 3385 if (!iwl_mvm_has_new_rx_api(mvm)) { 3386 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { 3387 .ctrl_flags = igtk_cmd.ctrl_flags, 3388 .key_id = igtk_cmd.key_id, 3389 .sta_id = igtk_cmd.sta_id, 3390 .receive_seq_cnt = igtk_cmd.receive_seq_cnt 3391 }; 3392 3393 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, 3394 ARRAY_SIZE(igtk_cmd_v1.igtk)); 3395 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3396 sizeof(igtk_cmd_v1), &igtk_cmd_v1); 3397 } 3398 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3399 sizeof(igtk_cmd), &igtk_cmd); 3400 } 3401 3402 3403 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, 3404 struct ieee80211_vif *vif, 3405 struct ieee80211_sta *sta) 3406 { 3407 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3408 3409 if (sta) 3410 return sta->addr; 3411 3412 if (vif->type == NL80211_IFTYPE_STATION && 3413 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 3414 u8 sta_id = mvmvif->ap_sta_id; 3415 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 3416 lockdep_is_held(&mvm->mutex)); 3417 return sta->addr; 3418 } 3419 3420 3421 return NULL; 3422 } 3423 3424 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3425 struct ieee80211_vif *vif, 3426 struct ieee80211_sta *sta, 3427 struct ieee80211_key_conf *keyconf, 3428 u8 key_offset, 3429 bool mcast) 3430 { 3431 int ret; 3432 const u8 *addr; 3433 struct ieee80211_key_seq seq; 3434 u16 p1k[5]; 3435 u32 sta_id; 3436 bool mfp = false; 3437 3438 if (sta) { 3439 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3440 3441 sta_id = mvm_sta->sta_id; 3442 mfp = sta->mfp; 3443 } else if (vif->type == NL80211_IFTYPE_AP && 3444 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 3445 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3446 3447 sta_id = mvmvif->mcast_sta.sta_id; 3448 } else { 3449 IWL_ERR(mvm, "Failed to find station id\n"); 3450 return -EINVAL; 3451 } 3452 3453 switch (keyconf->cipher) { 3454 case WLAN_CIPHER_SUITE_TKIP: 3455 addr = iwl_mvm_get_mac_addr(mvm, vif, sta); 3456 /* get phase 1 key from mac80211 */ 3457 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3458 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 3459 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3460 seq.tkip.iv32, p1k, 0, key_offset, 3461 mfp); 3462 break; 3463 case WLAN_CIPHER_SUITE_CCMP: 3464 case WLAN_CIPHER_SUITE_WEP40: 3465 case WLAN_CIPHER_SUITE_WEP104: 3466 case WLAN_CIPHER_SUITE_GCMP: 3467 case WLAN_CIPHER_SUITE_GCMP_256: 3468 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3469 0, NULL, 0, key_offset, mfp); 3470 break; 3471 default: 3472 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3473 0, NULL, 0, key_offset, mfp); 3474 } 3475 3476 return ret; 3477 } 3478 3479 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, 3480 struct ieee80211_key_conf *keyconf, 3481 bool mcast) 3482 { 3483 union { 3484 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 3485 struct iwl_mvm_add_sta_key_cmd cmd; 3486 } u = {}; 3487 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 3488 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 3489 __le16 key_flags; 3490 int ret, size; 3491 u32 status; 3492 3493 /* This is a valid situation for GTK removal */ 3494 if (sta_id == IWL_MVM_INVALID_STA) 3495 return 0; 3496 3497 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 3498 STA_KEY_FLG_KEYID_MSK); 3499 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); 3500 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); 3501 3502 if (mcast) 3503 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3504 3505 /* 3506 * The fields assigned here are in the same location at the start 3507 * of the command, so we can do this union trick. 3508 */ 3509 u.cmd.common.key_flags = key_flags; 3510 u.cmd.common.key_offset = keyconf->hw_key_idx; 3511 u.cmd.common.sta_id = sta_id; 3512 3513 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); 3514 3515 status = ADD_STA_SUCCESS; 3516 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, 3517 &status); 3518 3519 switch (status) { 3520 case ADD_STA_SUCCESS: 3521 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); 3522 break; 3523 default: 3524 ret = -EIO; 3525 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); 3526 break; 3527 } 3528 3529 return ret; 3530 } 3531 3532 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3533 struct ieee80211_vif *vif, 3534 struct ieee80211_sta *sta, 3535 struct ieee80211_key_conf *keyconf, 3536 u8 key_offset) 3537 { 3538 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3539 struct iwl_mvm_sta *mvm_sta; 3540 u8 sta_id = IWL_MVM_INVALID_STA; 3541 int ret; 3542 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 3543 3544 lockdep_assert_held(&mvm->mutex); 3545 3546 if (vif->type != NL80211_IFTYPE_AP || 3547 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 3548 /* Get the station id from the mvm local station table */ 3549 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3550 if (!mvm_sta) { 3551 IWL_ERR(mvm, "Failed to find station\n"); 3552 return -EINVAL; 3553 } 3554 sta_id = mvm_sta->sta_id; 3555 3556 /* 3557 * It is possible that the 'sta' parameter is NULL, and thus 3558 * there is a need to retrieve the sta from the local station 3559 * table. 3560 */ 3561 if (!sta) { 3562 sta = rcu_dereference_protected( 3563 mvm->fw_id_to_mac_id[sta_id], 3564 lockdep_is_held(&mvm->mutex)); 3565 if (IS_ERR_OR_NULL(sta)) { 3566 IWL_ERR(mvm, "Invalid station id\n"); 3567 return -EINVAL; 3568 } 3569 } 3570 3571 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 3572 return -EINVAL; 3573 } else { 3574 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3575 3576 sta_id = mvmvif->mcast_sta.sta_id; 3577 } 3578 3579 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3580 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3581 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3582 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); 3583 goto end; 3584 } 3585 3586 /* If the key_offset is not pre-assigned, we need to find a 3587 * new offset to use. In normal cases, the offset is not 3588 * pre-assigned, but during HW_RESTART we want to reuse the 3589 * same indices, so we pass them when this function is called. 3590 * 3591 * In D3 entry, we need to hardcoded the indices (because the 3592 * firmware hardcodes the PTK offset to 0). In this case, we 3593 * need to make sure we don't overwrite the hw_key_idx in the 3594 * keyconf structure, because otherwise we cannot configure 3595 * the original ones back when resuming. 3596 */ 3597 if (key_offset == STA_KEY_IDX_INVALID) { 3598 key_offset = iwl_mvm_set_fw_key_idx(mvm); 3599 if (key_offset == STA_KEY_IDX_INVALID) 3600 return -ENOSPC; 3601 keyconf->hw_key_idx = key_offset; 3602 } 3603 3604 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); 3605 if (ret) 3606 goto end; 3607 3608 /* 3609 * For WEP, the same key is used for multicast and unicast. Upload it 3610 * again, using the same key offset, and now pointing the other one 3611 * to the same key slot (offset). 3612 * If this fails, remove the original as well. 3613 */ 3614 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3615 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && 3616 sta) { 3617 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, 3618 key_offset, !mcast); 3619 if (ret) { 3620 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3621 goto end; 3622 } 3623 } 3624 3625 __set_bit(key_offset, mvm->fw_key_table); 3626 3627 end: 3628 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 3629 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 3630 sta ? sta->addr : zero_addr, ret); 3631 return ret; 3632 } 3633 3634 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 3635 struct ieee80211_vif *vif, 3636 struct ieee80211_sta *sta, 3637 struct ieee80211_key_conf *keyconf) 3638 { 3639 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3640 struct iwl_mvm_sta *mvm_sta; 3641 u8 sta_id = IWL_MVM_INVALID_STA; 3642 int ret, i; 3643 3644 lockdep_assert_held(&mvm->mutex); 3645 3646 /* Get the station from the mvm local station table */ 3647 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3648 if (mvm_sta) 3649 sta_id = mvm_sta->sta_id; 3650 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) 3651 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id; 3652 3653 3654 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3655 keyconf->keyidx, sta_id); 3656 3657 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3658 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3659 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) 3660 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3661 3662 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3663 IWL_ERR(mvm, "offset %d not used in fw key table.\n", 3664 keyconf->hw_key_idx); 3665 return -ENOENT; 3666 } 3667 3668 /* track which key was deleted last */ 3669 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3670 if (mvm->fw_key_deleted[i] < U8_MAX) 3671 mvm->fw_key_deleted[i]++; 3672 } 3673 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; 3674 3675 if (sta && !mvm_sta) { 3676 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); 3677 return 0; 3678 } 3679 3680 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3681 if (ret) 3682 return ret; 3683 3684 /* delete WEP key twice to get rid of (now useless) offset */ 3685 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3686 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) 3687 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); 3688 3689 return ret; 3690 } 3691 3692 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, 3693 struct ieee80211_vif *vif, 3694 struct ieee80211_key_conf *keyconf, 3695 struct ieee80211_sta *sta, u32 iv32, 3696 u16 *phase1key) 3697 { 3698 struct iwl_mvm_sta *mvm_sta; 3699 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3700 bool mfp = sta ? sta->mfp : false; 3701 3702 rcu_read_lock(); 3703 3704 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3705 if (WARN_ON_ONCE(!mvm_sta)) 3706 goto unlock; 3707 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, 3708 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, 3709 mfp); 3710 3711 unlock: 3712 rcu_read_unlock(); 3713 } 3714 3715 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, 3716 struct ieee80211_sta *sta) 3717 { 3718 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3719 struct iwl_mvm_add_sta_cmd cmd = { 3720 .add_modify = STA_MODE_MODIFY, 3721 .sta_id = mvmsta->sta_id, 3722 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 3723 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3724 }; 3725 int ret; 3726 3727 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 3728 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3729 if (ret) 3730 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3731 } 3732 3733 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 3734 struct ieee80211_sta *sta, 3735 enum ieee80211_frame_release_type reason, 3736 u16 cnt, u16 tids, bool more_data, 3737 bool single_sta_queue) 3738 { 3739 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3740 struct iwl_mvm_add_sta_cmd cmd = { 3741 .add_modify = STA_MODE_MODIFY, 3742 .sta_id = mvmsta->sta_id, 3743 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 3744 .sleep_tx_count = cpu_to_le16(cnt), 3745 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3746 }; 3747 int tid, ret; 3748 unsigned long _tids = tids; 3749 3750 /* convert TIDs to ACs - we don't support TSPEC so that's OK 3751 * Note that this field is reserved and unused by firmware not 3752 * supporting GO uAPSD, so it's safe to always do this. 3753 */ 3754 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 3755 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 3756 3757 /* If we're releasing frames from aggregation or dqa queues then check 3758 * if all the queues that we're releasing frames from, combined, have: 3759 * - more frames than the service period, in which case more_data 3760 * needs to be set 3761 * - fewer than 'cnt' frames, in which case we need to adjust the 3762 * firmware command (but do that unconditionally) 3763 */ 3764 if (single_sta_queue) { 3765 int remaining = cnt; 3766 int sleep_tx_count; 3767 3768 spin_lock_bh(&mvmsta->lock); 3769 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { 3770 struct iwl_mvm_tid_data *tid_data; 3771 u16 n_queued; 3772 3773 tid_data = &mvmsta->tid_data[tid]; 3774 3775 n_queued = iwl_mvm_tid_queued(mvm, tid_data); 3776 if (n_queued > remaining) { 3777 more_data = true; 3778 remaining = 0; 3779 break; 3780 } 3781 remaining -= n_queued; 3782 } 3783 sleep_tx_count = cnt - remaining; 3784 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) 3785 mvmsta->sleep_tx_count = sleep_tx_count; 3786 spin_unlock_bh(&mvmsta->lock); 3787 3788 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); 3789 if (WARN_ON(cnt - remaining == 0)) { 3790 ieee80211_sta_eosp(sta); 3791 return; 3792 } 3793 } 3794 3795 /* Note: this is ignored by firmware not supporting GO uAPSD */ 3796 if (more_data) 3797 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; 3798 3799 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { 3800 mvmsta->next_status_eosp = true; 3801 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; 3802 } else { 3803 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; 3804 } 3805 3806 /* block the Tx queues until the FW updated the sleep Tx count */ 3807 iwl_trans_block_txq_ptrs(mvm->trans, true); 3808 3809 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 3810 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, 3811 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3812 if (ret) 3813 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3814 } 3815 3816 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, 3817 struct iwl_rx_cmd_buffer *rxb) 3818 { 3819 struct iwl_rx_packet *pkt = rxb_addr(rxb); 3820 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; 3821 struct ieee80211_sta *sta; 3822 u32 sta_id = le32_to_cpu(notif->sta_id); 3823 3824 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) 3825 return; 3826 3827 rcu_read_lock(); 3828 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 3829 if (!IS_ERR_OR_NULL(sta)) 3830 ieee80211_sta_eosp(sta); 3831 rcu_read_unlock(); 3832 } 3833 3834 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 3835 struct iwl_mvm_sta *mvmsta, bool disable) 3836 { 3837 struct iwl_mvm_add_sta_cmd cmd = { 3838 .add_modify = STA_MODE_MODIFY, 3839 .sta_id = mvmsta->sta_id, 3840 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 3841 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 3842 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3843 }; 3844 int ret; 3845 3846 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 3847 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3848 if (ret) 3849 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3850 } 3851 3852 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 3853 struct ieee80211_sta *sta, 3854 bool disable) 3855 { 3856 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3857 3858 spin_lock_bh(&mvm_sta->lock); 3859 3860 if (mvm_sta->disable_tx == disable) { 3861 spin_unlock_bh(&mvm_sta->lock); 3862 return; 3863 } 3864 3865 mvm_sta->disable_tx = disable; 3866 3867 /* Tell mac80211 to start/stop queuing tx for this station */ 3868 ieee80211_sta_block_awake(mvm->hw, sta, disable); 3869 3870 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); 3871 3872 spin_unlock_bh(&mvm_sta->lock); 3873 } 3874 3875 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, 3876 struct iwl_mvm_vif *mvmvif, 3877 struct iwl_mvm_int_sta *sta, 3878 bool disable) 3879 { 3880 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); 3881 struct iwl_mvm_add_sta_cmd cmd = { 3882 .add_modify = STA_MODE_MODIFY, 3883 .sta_id = sta->sta_id, 3884 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 3885 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 3886 .mac_id_n_color = cpu_to_le32(id), 3887 }; 3888 int ret; 3889 3890 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, 3891 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3892 if (ret) 3893 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3894 } 3895 3896 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 3897 struct iwl_mvm_vif *mvmvif, 3898 bool disable) 3899 { 3900 struct ieee80211_sta *sta; 3901 struct iwl_mvm_sta *mvm_sta; 3902 int i; 3903 3904 lockdep_assert_held(&mvm->mutex); 3905 3906 /* Block/unblock all the stations of the given mvmvif */ 3907 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 3908 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 3909 lockdep_is_held(&mvm->mutex)); 3910 if (IS_ERR_OR_NULL(sta)) 3911 continue; 3912 3913 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3914 if (mvm_sta->mac_id_n_color != 3915 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) 3916 continue; 3917 3918 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); 3919 } 3920 3921 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 3922 return; 3923 3924 /* Need to block/unblock also multicast station */ 3925 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) 3926 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 3927 &mvmvif->mcast_sta, disable); 3928 3929 /* 3930 * Only unblock the broadcast station (FW blocks it for immediate 3931 * quiet, not the driver) 3932 */ 3933 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) 3934 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 3935 &mvmvif->bcast_sta, disable); 3936 } 3937 3938 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 3939 { 3940 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3941 struct iwl_mvm_sta *mvmsta; 3942 3943 rcu_read_lock(); 3944 3945 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); 3946 3947 if (!WARN_ON(!mvmsta)) 3948 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); 3949 3950 rcu_read_unlock(); 3951 } 3952 3953 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) 3954 { 3955 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 3956 3957 /* 3958 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 3959 * to align the wrap around of ssn so we compare relevant values. 3960 */ 3961 if (mvm->trans->cfg->gen2) 3962 sn &= 0xff; 3963 3964 return ieee80211_sn_sub(sn, tid_data->next_reclaimed); 3965 } 3966