1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 - 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * The full GNU General Public License is included in this distribution 23 * in the file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 - 2019 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #include <linux/kernel.h> 65 #include <linux/slab.h> 66 #include <linux/skbuff.h> 67 #include <linux/netdevice.h> 68 #include <linux/etherdevice.h> 69 #include <linux/ip.h> 70 #include <linux/if_arp.h> 71 #include <linux/time.h> 72 #include <net/mac80211.h> 73 #include <net/ieee80211_radiotap.h> 74 #include <net/tcp.h> 75 76 #include "iwl-op-mode.h" 77 #include "iwl-io.h" 78 #include "mvm.h" 79 #include "sta.h" 80 #include "time-event.h" 81 #include "iwl-eeprom-parse.h" 82 #include "iwl-phy-db.h" 83 #include "testmode.h" 84 #include "fw/error-dump.h" 85 #include "iwl-prph.h" 86 #include "iwl-nvm-parse.h" 87 88 static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 89 { 90 .max = 1, 91 .types = BIT(NL80211_IFTYPE_STATION), 92 }, 93 { 94 .max = 1, 95 .types = BIT(NL80211_IFTYPE_AP) | 96 BIT(NL80211_IFTYPE_P2P_CLIENT) | 97 BIT(NL80211_IFTYPE_P2P_GO), 98 }, 99 { 100 .max = 1, 101 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 102 }, 103 }; 104 105 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { 106 { 107 .num_different_channels = 2, 108 .max_interfaces = 3, 109 .limits = iwl_mvm_limits, 110 .n_limits = ARRAY_SIZE(iwl_mvm_limits), 111 }, 112 }; 113 114 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 115 /* 116 * Use the reserved field to indicate magic values. 117 * these values will only be used internally by the driver, 118 * and won't make it to the fw (reserved will be 0). 119 * BC_FILTER_MAGIC_IP - configure the val of this attribute to 120 * be the vif's ip address. in case there is not a single 121 * ip address (0, or more than 1), this attribute will 122 * be skipped. 123 * BC_FILTER_MAGIC_MAC - set the val of this attribute to 124 * the LSB bytes of the vif's mac address 125 */ 126 enum { 127 BC_FILTER_MAGIC_NONE = 0, 128 BC_FILTER_MAGIC_IP, 129 BC_FILTER_MAGIC_MAC, 130 }; 131 132 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { 133 { 134 /* arp */ 135 .discard = 0, 136 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, 137 .attrs = { 138 { 139 /* frame type - arp, hw type - ethernet */ 140 .offset_type = 141 BCAST_FILTER_OFFSET_PAYLOAD_START, 142 .offset = sizeof(rfc1042_header), 143 .val = cpu_to_be32(0x08060001), 144 .mask = cpu_to_be32(0xffffffff), 145 }, 146 { 147 /* arp dest ip */ 148 .offset_type = 149 BCAST_FILTER_OFFSET_PAYLOAD_START, 150 .offset = sizeof(rfc1042_header) + 2 + 151 sizeof(struct arphdr) + 152 ETH_ALEN + sizeof(__be32) + 153 ETH_ALEN, 154 .mask = cpu_to_be32(0xffffffff), 155 /* mark it as special field */ 156 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), 157 }, 158 }, 159 }, 160 { 161 /* dhcp offer bcast */ 162 .discard = 0, 163 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, 164 .attrs = { 165 { 166 /* udp dest port - 68 (bootp client)*/ 167 .offset_type = BCAST_FILTER_OFFSET_IP_END, 168 .offset = offsetof(struct udphdr, dest), 169 .val = cpu_to_be32(0x00440000), 170 .mask = cpu_to_be32(0xffff0000), 171 }, 172 { 173 /* dhcp - lsb bytes of client hw address */ 174 .offset_type = BCAST_FILTER_OFFSET_IP_END, 175 .offset = 38, 176 .mask = cpu_to_be32(0xffffffff), 177 /* mark it as special field */ 178 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), 179 }, 180 }, 181 }, 182 /* last filter must be empty */ 183 {}, 184 }; 185 #endif 186 187 static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { 188 .max_peers = IWL_MVM_TOF_MAX_APS, 189 .report_ap_tsf = 1, 190 .randomize_mac_addr = 1, 191 192 .ftm = { 193 .supported = 1, 194 .asap = 1, 195 .non_asap = 1, 196 .request_lci = 1, 197 .request_civicloc = 1, 198 .max_bursts_exponent = -1, /* all supported */ 199 .max_ftms_per_burst = 0, /* no limits */ 200 .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 201 BIT(NL80211_CHAN_WIDTH_20) | 202 BIT(NL80211_CHAN_WIDTH_40) | 203 BIT(NL80211_CHAN_WIDTH_80), 204 .preambles = BIT(NL80211_PREAMBLE_LEGACY) | 205 BIT(NL80211_PREAMBLE_HT) | 206 BIT(NL80211_PREAMBLE_VHT), 207 }, 208 }; 209 210 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 211 { 212 if (!iwl_mvm_is_d0i3_supported(mvm)) 213 return; 214 215 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type); 216 spin_lock_bh(&mvm->refs_lock); 217 mvm->refs[ref_type]++; 218 spin_unlock_bh(&mvm->refs_lock); 219 iwl_trans_ref(mvm->trans); 220 } 221 222 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 223 { 224 if (!iwl_mvm_is_d0i3_supported(mvm)) 225 return; 226 227 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); 228 spin_lock_bh(&mvm->refs_lock); 229 if (WARN_ON(!mvm->refs[ref_type])) { 230 spin_unlock_bh(&mvm->refs_lock); 231 return; 232 } 233 mvm->refs[ref_type]--; 234 spin_unlock_bh(&mvm->refs_lock); 235 iwl_trans_unref(mvm->trans); 236 } 237 238 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, 239 enum iwl_mvm_ref_type except_ref) 240 { 241 int i, j; 242 243 if (!iwl_mvm_is_d0i3_supported(mvm)) 244 return; 245 246 spin_lock_bh(&mvm->refs_lock); 247 for (i = 0; i < IWL_MVM_REF_COUNT; i++) { 248 if (except_ref == i || !mvm->refs[i]) 249 continue; 250 251 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n", 252 i, mvm->refs[i]); 253 for (j = 0; j < mvm->refs[i]; j++) 254 iwl_trans_unref(mvm->trans); 255 mvm->refs[i] = 0; 256 } 257 spin_unlock_bh(&mvm->refs_lock); 258 } 259 260 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) 261 { 262 int i; 263 bool taken = false; 264 265 if (!iwl_mvm_is_d0i3_supported(mvm)) 266 return true; 267 268 spin_lock_bh(&mvm->refs_lock); 269 for (i = 0; i < IWL_MVM_REF_COUNT; i++) { 270 if (mvm->refs[i]) { 271 taken = true; 272 break; 273 } 274 } 275 spin_unlock_bh(&mvm->refs_lock); 276 277 return taken; 278 } 279 280 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 281 { 282 iwl_mvm_ref(mvm, ref_type); 283 284 if (!wait_event_timeout(mvm->d0i3_exit_waitq, 285 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), 286 HZ)) { 287 WARN_ON_ONCE(1); 288 iwl_mvm_unref(mvm, ref_type); 289 return -EIO; 290 } 291 292 return 0; 293 } 294 295 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) 296 { 297 int i; 298 299 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); 300 for (i = 0; i < NUM_PHY_CTX; i++) { 301 mvm->phy_ctxts[i].id = i; 302 mvm->phy_ctxts[i].ref = 0; 303 } 304 } 305 306 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 307 const char *alpha2, 308 enum iwl_mcc_source src_id, 309 bool *changed) 310 { 311 struct ieee80211_regdomain *regd = NULL; 312 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 313 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 314 struct iwl_mcc_update_resp *resp; 315 316 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); 317 318 lockdep_assert_held(&mvm->mutex); 319 320 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); 321 if (IS_ERR_OR_NULL(resp)) { 322 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", 323 PTR_ERR_OR_ZERO(resp)); 324 goto out; 325 } 326 327 if (changed) { 328 u32 status = le32_to_cpu(resp->status); 329 330 *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || 331 status == MCC_RESP_ILLEGAL); 332 } 333 334 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 335 __le32_to_cpu(resp->n_channels), 336 resp->channels, 337 __le16_to_cpu(resp->mcc), 338 __le16_to_cpu(resp->geo_info)); 339 /* Store the return source id */ 340 src_id = resp->source_id; 341 kfree(resp); 342 if (IS_ERR_OR_NULL(regd)) { 343 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", 344 PTR_ERR_OR_ZERO(regd)); 345 goto out; 346 } 347 348 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", 349 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); 350 mvm->lar_regdom_set = true; 351 mvm->mcc_src = src_id; 352 353 out: 354 return regd; 355 } 356 357 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) 358 { 359 bool changed; 360 struct ieee80211_regdomain *regd; 361 362 if (!iwl_mvm_is_lar_supported(mvm)) 363 return; 364 365 regd = iwl_mvm_get_current_regdomain(mvm, &changed); 366 if (!IS_ERR_OR_NULL(regd)) { 367 /* only update the regulatory core if changed */ 368 if (changed) 369 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 370 371 kfree(regd); 372 } 373 } 374 375 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 376 bool *changed) 377 { 378 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", 379 iwl_mvm_is_wifi_mcc_supported(mvm) ? 380 MCC_SOURCE_GET_CURRENT : 381 MCC_SOURCE_OLD_FW, changed); 382 } 383 384 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) 385 { 386 enum iwl_mcc_source used_src; 387 struct ieee80211_regdomain *regd; 388 int ret; 389 bool changed; 390 const struct ieee80211_regdomain *r = 391 rtnl_dereference(mvm->hw->wiphy->regd); 392 393 if (!r) 394 return -ENOENT; 395 396 /* save the last source in case we overwrite it below */ 397 used_src = mvm->mcc_src; 398 if (iwl_mvm_is_wifi_mcc_supported(mvm)) { 399 /* Notify the firmware we support wifi location updates */ 400 regd = iwl_mvm_get_current_regdomain(mvm, NULL); 401 if (!IS_ERR_OR_NULL(regd)) 402 kfree(regd); 403 } 404 405 /* Now set our last stored MCC and source */ 406 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, 407 &changed); 408 if (IS_ERR_OR_NULL(regd)) 409 return -EIO; 410 411 /* update cfg80211 if the regdomain was changed */ 412 if (changed) 413 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); 414 else 415 ret = 0; 416 417 kfree(regd); 418 return ret; 419 } 420 421 const static u8 he_if_types_ext_capa_sta[] = { 422 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 423 [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, 424 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 425 [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, 426 }; 427 428 const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { 429 { 430 .iftype = NL80211_IFTYPE_STATION, 431 .extended_capabilities = he_if_types_ext_capa_sta, 432 .extended_capabilities_mask = he_if_types_ext_capa_sta, 433 .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), 434 }, 435 }; 436 437 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 438 { 439 struct ieee80211_hw *hw = mvm->hw; 440 int num_mac, ret, i; 441 static const u32 mvm_ciphers[] = { 442 WLAN_CIPHER_SUITE_WEP40, 443 WLAN_CIPHER_SUITE_WEP104, 444 WLAN_CIPHER_SUITE_TKIP, 445 WLAN_CIPHER_SUITE_CCMP, 446 }; 447 #ifdef CONFIG_PM_SLEEP 448 bool unified = fw_has_capa(&mvm->fw->ucode_capa, 449 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 450 #endif 451 452 /* Tell mac80211 our characteristics */ 453 ieee80211_hw_set(hw, SIGNAL_DBM); 454 ieee80211_hw_set(hw, SPECTRUM_MGMT); 455 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 456 ieee80211_hw_set(hw, WANT_MONITOR_VIF); 457 ieee80211_hw_set(hw, SUPPORTS_PS); 458 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 459 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 460 ieee80211_hw_set(hw, TIMING_BEACON_ONLY); 461 ieee80211_hw_set(hw, CONNECTION_MONITOR); 462 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 463 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 464 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 465 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 466 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); 467 ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); 468 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); 469 ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); 470 ieee80211_hw_set(hw, STA_MMPDU_TXQ); 471 ieee80211_hw_set(hw, TX_AMSDU); 472 ieee80211_hw_set(hw, TX_FRAG_LIST); 473 474 if (iwl_mvm_has_tlc_offload(mvm)) { 475 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); 476 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 477 } 478 479 if (iwl_mvm_has_new_rx_api(mvm)) 480 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 481 482 if (fw_has_capa(&mvm->fw->ucode_capa, 483 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { 484 ieee80211_hw_set(hw, AP_LINK_PS); 485 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { 486 /* 487 * we absolutely need this for the new TX API since that comes 488 * with many more queues than the current code can deal with 489 * for station powersave 490 */ 491 return -EINVAL; 492 } 493 494 if (mvm->trans->num_rx_queues > 1) 495 ieee80211_hw_set(hw, USES_RSS); 496 497 if (mvm->trans->max_skb_frags) 498 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; 499 500 hw->queues = IEEE80211_MAX_QUEUES; 501 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 502 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 503 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 504 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 505 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; 506 507 hw->radiotap_timestamp.units_pos = 508 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | 509 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; 510 /* this is the case for CCK frames, it's better (only 8) for OFDM */ 511 hw->radiotap_timestamp.accuracy = 22; 512 513 if (!iwl_mvm_has_tlc_offload(mvm)) 514 hw->rate_control_algorithm = RS_NAME; 515 516 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; 517 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 518 hw->max_tx_fragments = mvm->trans->max_skb_frags; 519 520 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); 521 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); 522 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); 523 hw->wiphy->cipher_suites = mvm->ciphers; 524 525 if (iwl_mvm_has_new_rx_api(mvm)) { 526 mvm->ciphers[hw->wiphy->n_cipher_suites] = 527 WLAN_CIPHER_SUITE_GCMP; 528 hw->wiphy->n_cipher_suites++; 529 mvm->ciphers[hw->wiphy->n_cipher_suites] = 530 WLAN_CIPHER_SUITE_GCMP_256; 531 hw->wiphy->n_cipher_suites++; 532 } 533 534 /* Enable 11w if software crypto is not enabled (as the 535 * firmware will interpret some mgmt packets, so enabling it 536 * with software crypto isn't safe). 537 */ 538 if (!iwlwifi_mod_params.swcrypto) { 539 ieee80211_hw_set(hw, MFP_CAPABLE); 540 mvm->ciphers[hw->wiphy->n_cipher_suites] = 541 WLAN_CIPHER_SUITE_AES_CMAC; 542 hw->wiphy->n_cipher_suites++; 543 if (iwl_mvm_has_new_rx_api(mvm)) { 544 mvm->ciphers[hw->wiphy->n_cipher_suites] = 545 WLAN_CIPHER_SUITE_BIP_GMAC_128; 546 hw->wiphy->n_cipher_suites++; 547 mvm->ciphers[hw->wiphy->n_cipher_suites] = 548 WLAN_CIPHER_SUITE_BIP_GMAC_256; 549 hw->wiphy->n_cipher_suites++; 550 } 551 } 552 553 /* currently FW API supports only one optional cipher scheme */ 554 if (mvm->fw->cs[0].cipher) { 555 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0]; 556 struct ieee80211_cipher_scheme *cs = &mvm->cs[0]; 557 558 mvm->hw->n_cipher_schemes = 1; 559 560 cs->cipher = le32_to_cpu(fwcs->cipher); 561 cs->iftype = BIT(NL80211_IFTYPE_STATION); 562 cs->hdr_len = fwcs->hdr_len; 563 cs->pn_len = fwcs->pn_len; 564 cs->pn_off = fwcs->pn_off; 565 cs->key_idx_off = fwcs->key_idx_off; 566 cs->key_idx_mask = fwcs->key_idx_mask; 567 cs->key_idx_shift = fwcs->key_idx_shift; 568 cs->mic_len = fwcs->mic_len; 569 570 mvm->hw->cipher_schemes = mvm->cs; 571 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher; 572 hw->wiphy->n_cipher_suites++; 573 } 574 575 if (fw_has_capa(&mvm->fw->ucode_capa, 576 IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { 577 wiphy_ext_feature_set(hw->wiphy, 578 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); 579 hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; 580 } 581 582 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 583 hw->wiphy->features |= 584 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | 585 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | 586 NL80211_FEATURE_ND_RANDOM_MAC_ADDR; 587 588 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 589 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 590 hw->chanctx_data_size = sizeof(u16); 591 hw->txq_data_size = sizeof(struct iwl_mvm_txq); 592 593 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 594 BIT(NL80211_IFTYPE_P2P_CLIENT) | 595 BIT(NL80211_IFTYPE_AP) | 596 BIT(NL80211_IFTYPE_P2P_GO) | 597 BIT(NL80211_IFTYPE_P2P_DEVICE) | 598 BIT(NL80211_IFTYPE_ADHOC); 599 600 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 601 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 602 hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; 603 604 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 605 if (iwl_mvm_is_lar_supported(mvm)) 606 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 607 else 608 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 609 REGULATORY_DISABLE_BEACON_HINTS; 610 611 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 612 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 613 614 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 615 hw->wiphy->n_iface_combinations = 616 ARRAY_SIZE(iwl_mvm_iface_combinations); 617 618 hw->wiphy->max_remain_on_channel_duration = 10000; 619 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 620 621 /* Extract MAC address */ 622 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 623 hw->wiphy->addresses = mvm->addresses; 624 hw->wiphy->n_addresses = 1; 625 626 /* Extract additional MAC addresses if available */ 627 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? 628 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; 629 630 for (i = 1; i < num_mac; i++) { 631 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, 632 ETH_ALEN); 633 mvm->addresses[i].addr[5]++; 634 hw->wiphy->n_addresses++; 635 } 636 637 iwl_mvm_reset_phy_ctxts(mvm); 638 639 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); 640 641 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 642 643 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); 644 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || 645 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); 646 647 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 648 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; 649 else 650 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; 651 652 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) 653 hw->wiphy->bands[NL80211_BAND_2GHZ] = 654 &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 655 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { 656 hw->wiphy->bands[NL80211_BAND_5GHZ] = 657 &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 658 659 if (fw_has_capa(&mvm->fw->ucode_capa, 660 IWL_UCODE_TLV_CAPA_BEAMFORMER) && 661 fw_has_api(&mvm->fw->ucode_capa, 662 IWL_UCODE_TLV_API_LQ_SS_PARAMS)) 663 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= 664 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 665 } 666 667 hw->wiphy->hw_version = mvm->trans->hw_id; 668 669 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 670 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 671 else 672 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 673 674 hw->wiphy->max_sched_scan_reqs = 1; 675 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 676 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 677 /* we create the 802.11 header and zero length SSID IE. */ 678 hw->wiphy->max_sched_scan_ie_len = 679 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; 680 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; 681 hw->wiphy->max_sched_scan_plan_interval = U16_MAX; 682 683 /* 684 * the firmware uses u8 for num of iterations, but 0xff is saved for 685 * infinite loop, so the maximum number of iterations is actually 254. 686 */ 687 hw->wiphy->max_sched_scan_plan_iterations = 254; 688 689 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 690 NL80211_FEATURE_LOW_PRIORITY_SCAN | 691 NL80211_FEATURE_P2P_GO_OPPPS | 692 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 693 NL80211_FEATURE_DYNAMIC_SMPS | 694 NL80211_FEATURE_STATIC_SMPS | 695 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 696 697 if (fw_has_capa(&mvm->fw->ucode_capa, 698 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) 699 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; 700 if (fw_has_capa(&mvm->fw->ucode_capa, 701 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) 702 hw->wiphy->features |= NL80211_FEATURE_QUIET; 703 704 if (fw_has_capa(&mvm->fw->ucode_capa, 705 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) 706 hw->wiphy->features |= 707 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; 708 709 if (fw_has_capa(&mvm->fw->ucode_capa, 710 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 711 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; 712 713 if (fw_has_api(&mvm->fw->ucode_capa, 714 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { 715 wiphy_ext_feature_set(hw->wiphy, 716 NL80211_EXT_FEATURE_SCAN_START_TIME); 717 wiphy_ext_feature_set(hw->wiphy, 718 NL80211_EXT_FEATURE_BSS_PARENT_TSF); 719 wiphy_ext_feature_set(hw->wiphy, 720 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 721 } 722 723 if (iwl_mvm_is_oce_supported(mvm)) { 724 wiphy_ext_feature_set(hw->wiphy, 725 NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); 726 wiphy_ext_feature_set(hw->wiphy, 727 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); 728 wiphy_ext_feature_set(hw->wiphy, 729 NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); 730 wiphy_ext_feature_set(hw->wiphy, 731 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); 732 } 733 734 if (mvm->nvm_data->sku_cap_11ax_enable && 735 !iwlwifi_mod_params.disable_11ax) { 736 hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; 737 hw->wiphy->num_iftype_ext_capab = 738 ARRAY_SIZE(he_iftypes_ext_capa); 739 740 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); 741 ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); 742 } 743 744 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 745 746 #ifdef CONFIG_PM_SLEEP 747 if (iwl_mvm_is_d0i3_supported(mvm) && 748 device_can_wakeup(mvm->trans->dev)) { 749 mvm->wowlan.flags = WIPHY_WOWLAN_ANY; 750 hw->wiphy->wowlan = &mvm->wowlan; 751 } 752 753 if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && 754 mvm->trans->ops->d3_suspend && 755 mvm->trans->ops->d3_resume && 756 device_can_wakeup(mvm->trans->dev)) { 757 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | 758 WIPHY_WOWLAN_DISCONNECT | 759 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 760 WIPHY_WOWLAN_RFKILL_RELEASE | 761 WIPHY_WOWLAN_NET_DETECT; 762 if (!iwlwifi_mod_params.swcrypto) 763 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 764 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 765 WIPHY_WOWLAN_4WAY_HANDSHAKE; 766 767 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 768 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 769 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 770 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; 771 hw->wiphy->wowlan = &mvm->wowlan; 772 } 773 #endif 774 775 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 776 /* assign default bcast filtering configuration */ 777 mvm->bcast_filters = iwl_mvm_default_bcast_filters; 778 #endif 779 780 ret = iwl_mvm_leds_init(mvm); 781 if (ret) 782 return ret; 783 784 if (fw_has_capa(&mvm->fw->ucode_capa, 785 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { 786 IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); 787 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 788 ieee80211_hw_set(hw, TDLS_WIDER_BW); 789 } 790 791 if (fw_has_capa(&mvm->fw->ucode_capa, 792 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { 793 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); 794 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; 795 } 796 797 hw->netdev_features |= mvm->cfg->features; 798 if (!iwl_mvm_is_csum_supported(mvm)) { 799 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | 800 NETIF_F_RXCSUM); 801 /* We may support SW TX CSUM */ 802 if (IWL_MVM_SW_TX_CSUM_OFFLOAD) 803 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; 804 } 805 806 if (mvm->cfg->vht_mu_mimo_supported) 807 wiphy_ext_feature_set(hw->wiphy, 808 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); 809 810 ret = ieee80211_register_hw(mvm->hw); 811 if (ret) { 812 iwl_mvm_leds_exit(mvm); 813 } 814 815 return ret; 816 } 817 818 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, 819 struct ieee80211_sta *sta, 820 struct sk_buff *skb) 821 { 822 struct iwl_mvm_sta *mvmsta; 823 bool defer = false; 824 825 /* 826 * double check the IN_D0I3 flag both before and after 827 * taking the spinlock, in order to prevent taking 828 * the spinlock when not needed. 829 */ 830 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))) 831 return false; 832 833 spin_lock(&mvm->d0i3_tx_lock); 834 /* 835 * testing the flag again ensures the skb dequeue 836 * loop (on d0i3 exit) hasn't run yet. 837 */ 838 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) 839 goto out; 840 841 mvmsta = iwl_mvm_sta_from_mac80211(sta); 842 if (mvmsta->sta_id == IWL_MVM_INVALID_STA || 843 mvmsta->sta_id != mvm->d0i3_ap_sta_id) 844 goto out; 845 846 __skb_queue_tail(&mvm->d0i3_tx, skb); 847 848 /* trigger wakeup */ 849 iwl_mvm_ref(mvm, IWL_MVM_REF_TX); 850 iwl_mvm_unref(mvm, IWL_MVM_REF_TX); 851 852 defer = true; 853 out: 854 spin_unlock(&mvm->d0i3_tx_lock); 855 return defer; 856 } 857 858 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 859 struct ieee80211_tx_control *control, 860 struct sk_buff *skb) 861 { 862 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 863 struct ieee80211_sta *sta = control->sta; 864 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 865 struct ieee80211_hdr *hdr = (void *)skb->data; 866 bool offchannel = IEEE80211_SKB_CB(skb)->flags & 867 IEEE80211_TX_CTL_TX_OFFCHAN; 868 869 if (iwl_mvm_is_radio_killed(mvm)) { 870 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); 871 goto drop; 872 } 873 874 if (offchannel && 875 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && 876 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) 877 goto drop; 878 879 /* treat non-bufferable MMPDUs on AP interfaces as broadcast */ 880 if ((info->control.vif->type == NL80211_IFTYPE_AP || 881 info->control.vif->type == NL80211_IFTYPE_ADHOC) && 882 ieee80211_is_mgmt(hdr->frame_control) && 883 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) 884 sta = NULL; 885 886 /* If there is no sta, and it's not offchannel - send through AP */ 887 if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && 888 !offchannel) { 889 struct iwl_mvm_vif *mvmvif = 890 iwl_mvm_vif_from_mac80211(info->control.vif); 891 u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); 892 893 if (ap_sta_id < IWL_MVM_STATION_COUNT) { 894 /* mac80211 holds rcu read lock */ 895 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); 896 if (IS_ERR_OR_NULL(sta)) 897 goto drop; 898 } 899 } 900 901 if (sta) { 902 if (iwl_mvm_defer_tx(mvm, sta, skb)) 903 return; 904 if (iwl_mvm_tx_skb(mvm, skb, sta)) 905 goto drop; 906 return; 907 } 908 909 if (iwl_mvm_tx_skb_non_sta(mvm, skb)) 910 goto drop; 911 return; 912 drop: 913 ieee80211_free_txskb(hw, skb); 914 } 915 916 void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 917 { 918 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 919 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 920 struct sk_buff *skb = NULL; 921 922 /* 923 * No need for threads to be pending here, they can leave the first 924 * taker all the work. 925 * 926 * mvmtxq->tx_request logic: 927 * 928 * If 0, no one is currently TXing, set to 1 to indicate current thread 929 * will now start TX and other threads should quit. 930 * 931 * If 1, another thread is currently TXing, set to 2 to indicate to 932 * that thread that there was another request. Since that request may 933 * have raced with the check whether the queue is empty, the TXing 934 * thread should check the queue's status one more time before leaving. 935 * This check is done in order to not leave any TX hanging in the queue 936 * until the next TX invocation (which may not even happen). 937 * 938 * If 2, another thread is currently TXing, and it will already double 939 * check the queue, so do nothing. 940 */ 941 if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) 942 return; 943 944 rcu_read_lock(); 945 do { 946 while (likely(!mvmtxq->stopped && 947 (mvm->trans->system_pm_mode == 948 IWL_PLAT_PM_MODE_DISABLED))) { 949 skb = ieee80211_tx_dequeue(hw, txq); 950 951 if (!skb) { 952 if (txq->sta) 953 IWL_DEBUG_TX(mvm, 954 "TXQ of sta %pM tid %d is now empty\n", 955 txq->sta->addr, 956 txq->tid); 957 break; 958 } 959 960 if (!txq->sta) 961 iwl_mvm_tx_skb_non_sta(mvm, skb); 962 else 963 iwl_mvm_tx_skb(mvm, skb, txq->sta); 964 } 965 } while (atomic_dec_return(&mvmtxq->tx_request)); 966 rcu_read_unlock(); 967 } 968 969 static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, 970 struct ieee80211_txq *txq) 971 { 972 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 973 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 974 975 /* 976 * Please note that racing is handled very carefully here: 977 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is 978 * deleted afterwards. 979 * This means that if: 980 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): 981 * queue is allocated and we can TX. 982 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): 983 * a race, should defer the frame. 984 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): 985 * need to allocate the queue and defer the frame. 986 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): 987 * queue is already scheduled for allocation, no need to allocate, 988 * should defer the frame. 989 */ 990 991 /* If the queue is allocated TX and return. */ 992 if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { 993 /* 994 * Check that list is empty to avoid a race where txq_id is 995 * already updated, but the queue allocation work wasn't 996 * finished 997 */ 998 if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) 999 return; 1000 1001 iwl_mvm_mac_itxq_xmit(hw, txq); 1002 return; 1003 } 1004 1005 /* The list is being deleted only after the queue is fully allocated. */ 1006 if (!list_empty(&mvmtxq->list)) 1007 return; 1008 1009 list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); 1010 schedule_work(&mvm->add_stream_wk); 1011 } 1012 1013 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ 1014 do { \ 1015 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ 1016 break; \ 1017 iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ 1018 } while (0) 1019 1020 static void 1021 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1022 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, 1023 enum ieee80211_ampdu_mlme_action action) 1024 { 1025 struct iwl_fw_dbg_trigger_tlv *trig; 1026 struct iwl_fw_dbg_trigger_ba *ba_trig; 1027 1028 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 1029 FW_DBG_TRIGGER_BA); 1030 if (!trig) 1031 return; 1032 1033 ba_trig = (void *)trig->data; 1034 1035 switch (action) { 1036 case IEEE80211_AMPDU_TX_OPERATIONAL: { 1037 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1038 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 1039 1040 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, 1041 "TX AGG START: MAC %pM tid %d ssn %d\n", 1042 sta->addr, tid, tid_data->ssn); 1043 break; 1044 } 1045 case IEEE80211_AMPDU_TX_STOP_CONT: 1046 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, 1047 "TX AGG STOP: MAC %pM tid %d\n", 1048 sta->addr, tid); 1049 break; 1050 case IEEE80211_AMPDU_RX_START: 1051 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, 1052 "RX AGG START: MAC %pM tid %d ssn %d\n", 1053 sta->addr, tid, rx_ba_ssn); 1054 break; 1055 case IEEE80211_AMPDU_RX_STOP: 1056 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, 1057 "RX AGG STOP: MAC %pM tid %d\n", 1058 sta->addr, tid); 1059 break; 1060 default: 1061 break; 1062 } 1063 } 1064 1065 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 1066 struct ieee80211_vif *vif, 1067 struct ieee80211_ampdu_params *params) 1068 { 1069 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1070 int ret; 1071 bool tx_agg_ref = false; 1072 struct ieee80211_sta *sta = params->sta; 1073 enum ieee80211_ampdu_mlme_action action = params->action; 1074 u16 tid = params->tid; 1075 u16 *ssn = ¶ms->ssn; 1076 u16 buf_size = params->buf_size; 1077 bool amsdu = params->amsdu; 1078 u16 timeout = params->timeout; 1079 1080 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 1081 sta->addr, tid, action); 1082 1083 if (!(mvm->nvm_data->sku_cap_11n_enable)) 1084 return -EACCES; 1085 1086 /* return from D0i3 before starting a new Tx aggregation */ 1087 switch (action) { 1088 case IEEE80211_AMPDU_TX_START: 1089 case IEEE80211_AMPDU_TX_STOP_CONT: 1090 case IEEE80211_AMPDU_TX_STOP_FLUSH: 1091 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 1092 case IEEE80211_AMPDU_TX_OPERATIONAL: 1093 /* 1094 * for tx start, wait synchronously until D0i3 exit to 1095 * get the correct sequence number for the tid. 1096 * additionally, some other ampdu actions use direct 1097 * target access, which is not handled automatically 1098 * by the trans layer (unlike commands), so wait for 1099 * d0i3 exit in these cases as well. 1100 */ 1101 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG); 1102 if (ret) 1103 return ret; 1104 1105 tx_agg_ref = true; 1106 break; 1107 default: 1108 break; 1109 } 1110 1111 mutex_lock(&mvm->mutex); 1112 1113 switch (action) { 1114 case IEEE80211_AMPDU_RX_START: 1115 if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == 1116 iwl_mvm_sta_from_mac80211(sta)->sta_id) { 1117 struct iwl_mvm_vif *mvmvif; 1118 u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; 1119 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; 1120 1121 mdata->opened_rx_ba_sessions = true; 1122 mvmvif = iwl_mvm_vif_from_mac80211(vif); 1123 cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); 1124 } 1125 if (!iwl_enable_rx_ampdu()) { 1126 ret = -EINVAL; 1127 break; 1128 } 1129 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, 1130 timeout); 1131 break; 1132 case IEEE80211_AMPDU_RX_STOP: 1133 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, 1134 timeout); 1135 break; 1136 case IEEE80211_AMPDU_TX_START: 1137 if (!iwl_enable_tx_ampdu()) { 1138 ret = -EINVAL; 1139 break; 1140 } 1141 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 1142 break; 1143 case IEEE80211_AMPDU_TX_STOP_CONT: 1144 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 1145 break; 1146 case IEEE80211_AMPDU_TX_STOP_FLUSH: 1147 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 1148 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); 1149 break; 1150 case IEEE80211_AMPDU_TX_OPERATIONAL: 1151 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, 1152 buf_size, amsdu); 1153 break; 1154 default: 1155 WARN_ON_ONCE(1); 1156 ret = -EINVAL; 1157 break; 1158 } 1159 1160 if (!ret) { 1161 u16 rx_ba_ssn = 0; 1162 1163 if (action == IEEE80211_AMPDU_RX_START) 1164 rx_ba_ssn = *ssn; 1165 1166 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, 1167 rx_ba_ssn, action); 1168 } 1169 mutex_unlock(&mvm->mutex); 1170 1171 /* 1172 * If the tid is marked as started, we won't use it for offloaded 1173 * traffic on the next D0i3 entry. It's safe to unref. 1174 */ 1175 if (tx_agg_ref) 1176 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); 1177 1178 return ret; 1179 } 1180 1181 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, 1182 struct ieee80211_vif *vif) 1183 { 1184 struct iwl_mvm *mvm = data; 1185 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1186 1187 mvmvif->uploaded = false; 1188 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1189 1190 spin_lock_bh(&mvm->time_event_lock); 1191 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 1192 spin_unlock_bh(&mvm->time_event_lock); 1193 1194 mvmvif->phy_ctxt = NULL; 1195 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); 1196 memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); 1197 } 1198 1199 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 1200 { 1201 /* cleanup all stale references (scan, roc), but keep the 1202 * ucode_down ref until reconfig is complete 1203 */ 1204 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); 1205 1206 iwl_mvm_stop_device(mvm); 1207 1208 mvm->cur_aid = 0; 1209 1210 mvm->scan_status = 0; 1211 mvm->ps_disabled = false; 1212 mvm->calibrating = false; 1213 1214 /* just in case one was running */ 1215 iwl_mvm_cleanup_roc_te(mvm); 1216 ieee80211_remain_on_channel_expired(mvm->hw); 1217 1218 iwl_mvm_ftm_restart(mvm); 1219 1220 /* 1221 * cleanup all interfaces, even inactive ones, as some might have 1222 * gone down during the HW restart 1223 */ 1224 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); 1225 1226 mvm->p2p_device_vif = NULL; 1227 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; 1228 1229 iwl_mvm_reset_phy_ctxts(mvm); 1230 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 1231 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 1232 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); 1233 1234 ieee80211_wake_queues(mvm->hw); 1235 1236 /* clear any stale d0i3 state */ 1237 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); 1238 1239 mvm->vif_count = 0; 1240 mvm->rx_ba_sessions = 0; 1241 mvm->fwrt.dump.conf = FW_DBG_INVALID; 1242 mvm->monitor_on = false; 1243 1244 /* keep statistics ticking */ 1245 iwl_mvm_accu_radio_stats(mvm); 1246 } 1247 1248 int __iwl_mvm_mac_start(struct iwl_mvm *mvm) 1249 { 1250 int ret; 1251 1252 lockdep_assert_held(&mvm->mutex); 1253 1254 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { 1255 /* 1256 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART 1257 * so later code will - from now on - see that we're doing it. 1258 */ 1259 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1260 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1261 /* Clean up some internal and mac80211 state on restart */ 1262 iwl_mvm_restart_cleanup(mvm); 1263 } else { 1264 /* Hold the reference to prevent runtime suspend while 1265 * the start procedure runs. It's a bit confusing 1266 * that the UCODE_DOWN reference is taken, but it just 1267 * means "UCODE is not UP yet". ( TODO: rename this 1268 * reference). 1269 */ 1270 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1271 } 1272 ret = iwl_mvm_up(mvm); 1273 1274 iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_POST_INIT); 1275 1276 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1277 /* Something went wrong - we need to finish some cleanup 1278 * that normally iwl_mvm_mac_restart_complete() below 1279 * would do. 1280 */ 1281 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1282 #ifdef CONFIG_PM 1283 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1284 #endif 1285 } 1286 1287 return ret; 1288 } 1289 1290 static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 1291 { 1292 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1293 int ret; 1294 1295 /* Some hw restart cleanups must not hold the mutex */ 1296 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1297 /* 1298 * Make sure we are out of d0i3. This is needed 1299 * to make sure the reference accounting is correct 1300 * (and there is no stale d0i3_exit_work). 1301 */ 1302 wait_event_timeout(mvm->d0i3_exit_waitq, 1303 !test_bit(IWL_MVM_STATUS_IN_D0I3, 1304 &mvm->status), 1305 HZ); 1306 } 1307 1308 mutex_lock(&mvm->mutex); 1309 ret = __iwl_mvm_mac_start(mvm); 1310 mutex_unlock(&mvm->mutex); 1311 1312 return ret; 1313 } 1314 1315 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) 1316 { 1317 int ret; 1318 1319 mutex_lock(&mvm->mutex); 1320 1321 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1322 #ifdef CONFIG_PM 1323 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1324 #endif 1325 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1326 if (ret) 1327 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1328 ret); 1329 1330 /* allow transport/FW low power modes */ 1331 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1332 1333 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); 1334 1335 /* 1336 * If we have TDLS peers, remove them. We don't know the last seqno/PN 1337 * of packets the FW sent out, so we must reconnect. 1338 */ 1339 iwl_mvm_teardown_tdls_peers(mvm); 1340 1341 mutex_unlock(&mvm->mutex); 1342 } 1343 1344 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) 1345 { 1346 if (iwl_mvm_is_d0i3_supported(mvm) && 1347 iwl_mvm_enter_d0i3_on_suspend(mvm)) 1348 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq, 1349 !test_bit(IWL_MVM_STATUS_IN_D0I3, 1350 &mvm->status), 1351 HZ), 1352 "D0i3 exit on resume timed out\n"); 1353 } 1354 1355 static void 1356 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, 1357 enum ieee80211_reconfig_type reconfig_type) 1358 { 1359 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1360 1361 switch (reconfig_type) { 1362 case IEEE80211_RECONFIG_TYPE_RESTART: 1363 iwl_mvm_restart_complete(mvm); 1364 break; 1365 case IEEE80211_RECONFIG_TYPE_SUSPEND: 1366 iwl_mvm_resume_complete(mvm); 1367 break; 1368 } 1369 } 1370 1371 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) 1372 { 1373 lockdep_assert_held(&mvm->mutex); 1374 1375 /* firmware counters are obviously reset now, but we shouldn't 1376 * partially track so also clear the fw_reset_accu counters. 1377 */ 1378 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); 1379 1380 /* async_handlers_wk is now blocked */ 1381 1382 /* 1383 * The work item could be running or queued if the 1384 * ROC time event stops just as we get here. 1385 */ 1386 flush_work(&mvm->roc_done_wk); 1387 1388 iwl_mvm_stop_device(mvm); 1389 1390 iwl_mvm_async_handlers_purge(mvm); 1391 /* async_handlers_list is empty and will stay empty: HW is stopped */ 1392 1393 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1394 iwl_mvm_del_aux_sta(mvm); 1395 1396 /* 1397 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the 1398 * hw (as restart_complete() won't be called in this case) and mac80211 1399 * won't execute the restart. 1400 * But make sure to cleanup interfaces that have gone down before/during 1401 * HW restart was requested. 1402 */ 1403 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 1404 test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 1405 &mvm->status)) 1406 ieee80211_iterate_interfaces(mvm->hw, 0, 1407 iwl_mvm_cleanup_iterator, mvm); 1408 1409 /* We shouldn't have any UIDs still set. Loop over all the UIDs to 1410 * make sure there's nothing left there and warn if any is found. 1411 */ 1412 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1413 int i; 1414 1415 for (i = 0; i < mvm->max_scans; i++) { 1416 if (WARN_ONCE(mvm->scan_uid_status[i], 1417 "UMAC scan UID %d status was not cleaned\n", 1418 i)) 1419 mvm->scan_uid_status[i] = 0; 1420 } 1421 } 1422 } 1423 1424 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) 1425 { 1426 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1427 1428 flush_work(&mvm->d0i3_exit_work); 1429 flush_work(&mvm->async_handlers_wk); 1430 flush_work(&mvm->add_stream_wk); 1431 1432 /* 1433 * Lock and clear the firmware running bit here already, so that 1434 * new commands coming in elsewhere, e.g. from debugfs, will not 1435 * be able to proceed. This is important here because one of those 1436 * debugfs files causes the firmware dump to be triggered, and if we 1437 * don't stop debugfs accesses before canceling that it could be 1438 * retriggered after we flush it but before we've cleared the bit. 1439 */ 1440 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1441 1442 iwl_fw_cancel_dump(&mvm->fwrt); 1443 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); 1444 cancel_delayed_work_sync(&mvm->scan_timeout_dwork); 1445 iwl_fw_free_dump_desc(&mvm->fwrt); 1446 1447 mutex_lock(&mvm->mutex); 1448 __iwl_mvm_mac_stop(mvm); 1449 mutex_unlock(&mvm->mutex); 1450 1451 /* 1452 * The worker might have been waiting for the mutex, let it run and 1453 * discover that its list is now empty. 1454 */ 1455 cancel_work_sync(&mvm->async_handlers_wk); 1456 } 1457 1458 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) 1459 { 1460 u16 i; 1461 1462 lockdep_assert_held(&mvm->mutex); 1463 1464 for (i = 0; i < NUM_PHY_CTX; i++) 1465 if (!mvm->phy_ctxts[i].ref) 1466 return &mvm->phy_ctxts[i]; 1467 1468 IWL_ERR(mvm, "No available PHY context\n"); 1469 return NULL; 1470 } 1471 1472 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1473 s16 tx_power) 1474 { 1475 int len; 1476 union { 1477 struct iwl_dev_tx_power_cmd v5; 1478 struct iwl_dev_tx_power_cmd_v4 v4; 1479 } cmd = { 1480 .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), 1481 .v5.v3.mac_context_id = 1482 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), 1483 .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power), 1484 }; 1485 1486 if (tx_power == IWL_DEFAULT_MAX_TX_POWER) 1487 cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); 1488 1489 if (fw_has_api(&mvm->fw->ucode_capa, 1490 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) 1491 len = sizeof(cmd.v5); 1492 else if (fw_has_capa(&mvm->fw->ucode_capa, 1493 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 1494 len = sizeof(cmd.v4); 1495 else 1496 len = sizeof(cmd.v4.v3); 1497 1498 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); 1499 } 1500 1501 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, 1502 struct ieee80211_vif *vif) 1503 { 1504 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1505 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1506 int ret; 1507 1508 mutex_lock(&mvm->mutex); 1509 1510 if (mvmvif->csa_failed) { 1511 mvmvif->csa_failed = false; 1512 ret = -EIO; 1513 goto out_unlock; 1514 } 1515 1516 if (vif->type == NL80211_IFTYPE_STATION) { 1517 struct iwl_mvm_sta *mvmsta; 1518 1519 mvmvif->csa_bcn_pending = false; 1520 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, 1521 mvmvif->ap_sta_id); 1522 1523 if (WARN_ON(!mvmsta)) { 1524 ret = -EIO; 1525 goto out_unlock; 1526 } 1527 1528 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 1529 1530 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1531 1532 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 1533 if (ret) 1534 goto out_unlock; 1535 1536 iwl_mvm_stop_session_protection(mvm, vif); 1537 } 1538 1539 mvmvif->ps_disabled = false; 1540 1541 ret = iwl_mvm_power_update_ps(mvm); 1542 1543 out_unlock: 1544 mutex_unlock(&mvm->mutex); 1545 1546 return ret; 1547 } 1548 1549 static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, 1550 struct ieee80211_vif *vif) 1551 { 1552 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1553 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1554 struct iwl_chan_switch_te_cmd cmd = { 1555 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 1556 mvmvif->color)), 1557 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), 1558 }; 1559 1560 IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); 1561 1562 mutex_lock(&mvm->mutex); 1563 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, 1564 WIDE_ID(MAC_CONF_GROUP, 1565 CHANNEL_SWITCH_TIME_EVENT_CMD), 1566 0, sizeof(cmd), &cmd)); 1567 mutex_unlock(&mvm->mutex); 1568 1569 WARN_ON(iwl_mvm_post_channel_switch(hw, vif)); 1570 } 1571 1572 static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) 1573 { 1574 struct iwl_mvm *mvm; 1575 struct iwl_mvm_vif *mvmvif; 1576 struct ieee80211_vif *vif; 1577 1578 mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); 1579 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); 1580 mvm = mvmvif->mvm; 1581 1582 iwl_mvm_abort_channel_switch(mvm->hw, vif); 1583 ieee80211_chswitch_done(vif, false); 1584 } 1585 1586 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 1587 struct ieee80211_vif *vif) 1588 { 1589 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1590 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1591 int ret; 1592 1593 mvmvif->mvm = mvm; 1594 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1595 1596 /* 1597 * make sure D0i3 exit is completed, otherwise a target access 1598 * during tx queue configuration could be done when still in 1599 * D0i3 state. 1600 */ 1601 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF); 1602 if (ret) 1603 return ret; 1604 1605 /* 1606 * Not much to do here. The stack will not allow interface 1607 * types or combinations that we didn't advertise, so we 1608 * don't really have to check the types. 1609 */ 1610 1611 mutex_lock(&mvm->mutex); 1612 1613 /* make sure that beacon statistics don't go backwards with FW reset */ 1614 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1615 mvmvif->beacon_stats.accu_num_beacons += 1616 mvmvif->beacon_stats.num_beacons; 1617 1618 /* Allocate resources for the MAC context, and add it to the fw */ 1619 ret = iwl_mvm_mac_ctxt_init(mvm, vif); 1620 if (ret) 1621 goto out_unlock; 1622 1623 rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); 1624 1625 /* Counting number of interfaces is needed for legacy PM */ 1626 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1627 mvm->vif_count++; 1628 1629 /* 1630 * The AP binding flow can be done only after the beacon 1631 * template is configured (which happens only in the mac80211 1632 * start_ap() flow), and adding the broadcast station can happen 1633 * only after the binding. 1634 * In addition, since modifying the MAC before adding a bcast 1635 * station is not allowed by the FW, delay the adding of MAC context to 1636 * the point where we can also add the bcast station. 1637 * In short: there's not much we can do at this point, other than 1638 * allocating resources :) 1639 */ 1640 if (vif->type == NL80211_IFTYPE_AP || 1641 vif->type == NL80211_IFTYPE_ADHOC) { 1642 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1643 if (ret) { 1644 IWL_ERR(mvm, "Failed to allocate bcast sta\n"); 1645 goto out_release; 1646 } 1647 1648 /* 1649 * Only queue for this station is the mcast queue, 1650 * which shouldn't be in TFD mask anyway 1651 */ 1652 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 1653 0, vif->type, 1654 IWL_STA_MULTICAST); 1655 if (ret) 1656 goto out_release; 1657 1658 iwl_mvm_vif_dbgfs_register(mvm, vif); 1659 goto out_unlock; 1660 } 1661 1662 mvmvif->features |= hw->netdev_features; 1663 1664 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 1665 if (ret) 1666 goto out_release; 1667 1668 ret = iwl_mvm_power_update_mac(mvm); 1669 if (ret) 1670 goto out_remove_mac; 1671 1672 /* beacon filtering */ 1673 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 1674 if (ret) 1675 goto out_remove_mac; 1676 1677 if (!mvm->bf_allowed_vif && 1678 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { 1679 mvm->bf_allowed_vif = mvmvif; 1680 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 1681 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 1682 } 1683 1684 /* 1685 * P2P_DEVICE interface does not have a channel context assigned to it, 1686 * so a dedicated PHY context is allocated to it and the corresponding 1687 * MAC context is bound to it at this stage. 1688 */ 1689 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1690 1691 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 1692 if (!mvmvif->phy_ctxt) { 1693 ret = -ENOSPC; 1694 goto out_free_bf; 1695 } 1696 1697 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 1698 ret = iwl_mvm_binding_add_vif(mvm, vif); 1699 if (ret) 1700 goto out_unref_phy; 1701 1702 ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); 1703 if (ret) 1704 goto out_unbind; 1705 1706 /* Save a pointer to p2p device vif, so it can later be used to 1707 * update the p2p device MAC when a GO is started/stopped */ 1708 mvm->p2p_device_vif = vif; 1709 } 1710 1711 iwl_mvm_tcm_add_vif(mvm, vif); 1712 INIT_DELAYED_WORK(&mvmvif->csa_work, 1713 iwl_mvm_channel_switch_disconnect_wk); 1714 1715 if (vif->type == NL80211_IFTYPE_MONITOR) 1716 mvm->monitor_on = true; 1717 1718 iwl_mvm_vif_dbgfs_register(mvm, vif); 1719 goto out_unlock; 1720 1721 out_unbind: 1722 iwl_mvm_binding_remove_vif(mvm, vif); 1723 out_unref_phy: 1724 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1725 out_free_bf: 1726 if (mvm->bf_allowed_vif == mvmvif) { 1727 mvm->bf_allowed_vif = NULL; 1728 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1729 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1730 } 1731 out_remove_mac: 1732 mvmvif->phy_ctxt = NULL; 1733 iwl_mvm_mac_ctxt_remove(mvm, vif); 1734 out_release: 1735 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1736 mvm->vif_count--; 1737 out_unlock: 1738 mutex_unlock(&mvm->mutex); 1739 1740 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF); 1741 1742 return ret; 1743 } 1744 1745 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, 1746 struct ieee80211_vif *vif) 1747 { 1748 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1749 /* 1750 * Flush the ROC worker which will flush the OFFCHANNEL queue. 1751 * We assume here that all the packets sent to the OFFCHANNEL 1752 * queue are sent in ROC session. 1753 */ 1754 flush_work(&mvm->roc_done_wk); 1755 } 1756 } 1757 1758 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 1759 struct ieee80211_vif *vif) 1760 { 1761 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1762 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1763 struct iwl_probe_resp_data *probe_data; 1764 1765 iwl_mvm_prepare_mac_removal(mvm, vif); 1766 1767 if (!(vif->type == NL80211_IFTYPE_AP || 1768 vif->type == NL80211_IFTYPE_ADHOC)) 1769 iwl_mvm_tcm_rm_vif(mvm, vif); 1770 1771 mutex_lock(&mvm->mutex); 1772 1773 probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, 1774 lockdep_is_held(&mvm->mutex)); 1775 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1776 if (probe_data) 1777 kfree_rcu(probe_data, rcu_head); 1778 1779 if (mvm->bf_allowed_vif == mvmvif) { 1780 mvm->bf_allowed_vif = NULL; 1781 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1782 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1783 } 1784 1785 if (vif->bss_conf.ftm_responder) 1786 memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); 1787 1788 iwl_mvm_vif_dbgfs_clean(mvm, vif); 1789 1790 /* 1791 * For AP/GO interface, the tear down of the resources allocated to the 1792 * interface is be handled as part of the stop_ap flow. 1793 */ 1794 if (vif->type == NL80211_IFTYPE_AP || 1795 vif->type == NL80211_IFTYPE_ADHOC) { 1796 #ifdef CONFIG_NL80211_TESTMODE 1797 if (vif == mvm->noa_vif) { 1798 mvm->noa_vif = NULL; 1799 mvm->noa_duration = 0; 1800 } 1801 #endif 1802 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); 1803 iwl_mvm_dealloc_bcast_sta(mvm, vif); 1804 goto out_release; 1805 } 1806 1807 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1808 mvm->p2p_device_vif = NULL; 1809 iwl_mvm_rm_p2p_bcast_sta(mvm, vif); 1810 iwl_mvm_binding_remove_vif(mvm, vif); 1811 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1812 mvmvif->phy_ctxt = NULL; 1813 } 1814 1815 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) 1816 mvm->vif_count--; 1817 1818 iwl_mvm_power_update_mac(mvm); 1819 iwl_mvm_mac_ctxt_remove(mvm, vif); 1820 1821 RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); 1822 1823 if (vif->type == NL80211_IFTYPE_MONITOR) 1824 mvm->monitor_on = false; 1825 1826 out_release: 1827 mutex_unlock(&mvm->mutex); 1828 } 1829 1830 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) 1831 { 1832 return 0; 1833 } 1834 1835 struct iwl_mvm_mc_iter_data { 1836 struct iwl_mvm *mvm; 1837 int port_id; 1838 }; 1839 1840 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, 1841 struct ieee80211_vif *vif) 1842 { 1843 struct iwl_mvm_mc_iter_data *data = _data; 1844 struct iwl_mvm *mvm = data->mvm; 1845 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1846 struct iwl_host_cmd hcmd = { 1847 .id = MCAST_FILTER_CMD, 1848 .flags = CMD_ASYNC, 1849 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1850 }; 1851 int ret, len; 1852 1853 /* if we don't have free ports, mcast frames will be dropped */ 1854 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) 1855 return; 1856 1857 if (vif->type != NL80211_IFTYPE_STATION || 1858 !vif->bss_conf.assoc) 1859 return; 1860 1861 cmd->port_id = data->port_id++; 1862 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1863 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1864 1865 hcmd.len[0] = len; 1866 hcmd.data[0] = cmd; 1867 1868 ret = iwl_mvm_send_cmd(mvm, &hcmd); 1869 if (ret) 1870 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1871 } 1872 1873 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) 1874 { 1875 struct iwl_mvm_mc_iter_data iter_data = { 1876 .mvm = mvm, 1877 }; 1878 1879 lockdep_assert_held(&mvm->mutex); 1880 1881 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1882 return; 1883 1884 ieee80211_iterate_active_interfaces_atomic( 1885 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1886 iwl_mvm_mc_iface_iterator, &iter_data); 1887 } 1888 1889 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, 1890 struct netdev_hw_addr_list *mc_list) 1891 { 1892 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1893 struct iwl_mcast_filter_cmd *cmd; 1894 struct netdev_hw_addr *addr; 1895 int addr_count; 1896 bool pass_all; 1897 int len; 1898 1899 addr_count = netdev_hw_addr_list_count(mc_list); 1900 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || 1901 IWL_MVM_FW_MCAST_FILTER_PASS_ALL; 1902 if (pass_all) 1903 addr_count = 0; 1904 1905 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); 1906 cmd = kzalloc(len, GFP_ATOMIC); 1907 if (!cmd) 1908 return 0; 1909 1910 if (pass_all) { 1911 cmd->pass_all = 1; 1912 return (u64)(unsigned long)cmd; 1913 } 1914 1915 netdev_hw_addr_list_for_each(addr, mc_list) { 1916 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", 1917 cmd->count, addr->addr); 1918 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], 1919 addr->addr, ETH_ALEN); 1920 cmd->count++; 1921 } 1922 1923 return (u64)(unsigned long)cmd; 1924 } 1925 1926 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, 1927 unsigned int changed_flags, 1928 unsigned int *total_flags, 1929 u64 multicast) 1930 { 1931 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1932 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; 1933 1934 mutex_lock(&mvm->mutex); 1935 1936 /* replace previous configuration */ 1937 kfree(mvm->mcast_filter_cmd); 1938 mvm->mcast_filter_cmd = cmd; 1939 1940 if (!cmd) 1941 goto out; 1942 1943 if (changed_flags & FIF_ALLMULTI) 1944 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); 1945 1946 if (cmd->pass_all) 1947 cmd->count = 0; 1948 1949 iwl_mvm_recalc_multicast(mvm); 1950 out: 1951 mutex_unlock(&mvm->mutex); 1952 *total_flags = 0; 1953 } 1954 1955 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, 1956 struct ieee80211_vif *vif, 1957 unsigned int filter_flags, 1958 unsigned int changed_flags) 1959 { 1960 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1961 1962 /* We support only filter for probe requests */ 1963 if (!(changed_flags & FIF_PROBE_REQ)) 1964 return; 1965 1966 /* Supported only for p2p client interfaces */ 1967 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || 1968 !vif->p2p) 1969 return; 1970 1971 mutex_lock(&mvm->mutex); 1972 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1973 mutex_unlock(&mvm->mutex); 1974 } 1975 1976 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1977 struct iwl_bcast_iter_data { 1978 struct iwl_mvm *mvm; 1979 struct iwl_bcast_filter_cmd *cmd; 1980 u8 current_filter; 1981 }; 1982 1983 static void 1984 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, 1985 const struct iwl_fw_bcast_filter *in_filter, 1986 struct iwl_fw_bcast_filter *out_filter) 1987 { 1988 struct iwl_fw_bcast_filter_attr *attr; 1989 int i; 1990 1991 memcpy(out_filter, in_filter, sizeof(*out_filter)); 1992 1993 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { 1994 attr = &out_filter->attrs[i]; 1995 1996 if (!attr->mask) 1997 break; 1998 1999 switch (attr->reserved1) { 2000 case cpu_to_le16(BC_FILTER_MAGIC_IP): 2001 if (vif->bss_conf.arp_addr_cnt != 1) { 2002 attr->mask = 0; 2003 continue; 2004 } 2005 2006 attr->val = vif->bss_conf.arp_addr_list[0]; 2007 break; 2008 case cpu_to_le16(BC_FILTER_MAGIC_MAC): 2009 attr->val = *(__be32 *)&vif->addr[2]; 2010 break; 2011 default: 2012 break; 2013 } 2014 attr->reserved1 = 0; 2015 out_filter->num_attrs++; 2016 } 2017 } 2018 2019 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, 2020 struct ieee80211_vif *vif) 2021 { 2022 struct iwl_bcast_iter_data *data = _data; 2023 struct iwl_mvm *mvm = data->mvm; 2024 struct iwl_bcast_filter_cmd *cmd = data->cmd; 2025 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2026 struct iwl_fw_bcast_mac *bcast_mac; 2027 int i; 2028 2029 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) 2030 return; 2031 2032 bcast_mac = &cmd->macs[mvmvif->id]; 2033 2034 /* 2035 * enable filtering only for associated stations, but not for P2P 2036 * Clients 2037 */ 2038 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || 2039 !vif->bss_conf.assoc) 2040 return; 2041 2042 bcast_mac->default_discard = 1; 2043 2044 /* copy all configured filters */ 2045 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { 2046 /* 2047 * Make sure we don't exceed our filters limit. 2048 * if there is still a valid filter to be configured, 2049 * be on the safe side and just allow bcast for this mac. 2050 */ 2051 if (WARN_ON_ONCE(data->current_filter >= 2052 ARRAY_SIZE(cmd->filters))) { 2053 bcast_mac->default_discard = 0; 2054 bcast_mac->attached_filters = 0; 2055 break; 2056 } 2057 2058 iwl_mvm_set_bcast_filter(vif, 2059 &mvm->bcast_filters[i], 2060 &cmd->filters[data->current_filter]); 2061 2062 /* skip current filter if it contains no attributes */ 2063 if (!cmd->filters[data->current_filter].num_attrs) 2064 continue; 2065 2066 /* attach the filter to current mac */ 2067 bcast_mac->attached_filters |= 2068 cpu_to_le16(BIT(data->current_filter)); 2069 2070 data->current_filter++; 2071 } 2072 } 2073 2074 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, 2075 struct iwl_bcast_filter_cmd *cmd) 2076 { 2077 struct iwl_bcast_iter_data iter_data = { 2078 .mvm = mvm, 2079 .cmd = cmd, 2080 }; 2081 2082 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) 2083 return false; 2084 2085 memset(cmd, 0, sizeof(*cmd)); 2086 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); 2087 cmd->max_macs = ARRAY_SIZE(cmd->macs); 2088 2089 #ifdef CONFIG_IWLWIFI_DEBUGFS 2090 /* use debugfs filters/macs if override is configured */ 2091 if (mvm->dbgfs_bcast_filtering.override) { 2092 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, 2093 sizeof(cmd->filters)); 2094 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, 2095 sizeof(cmd->macs)); 2096 return true; 2097 } 2098 #endif 2099 2100 /* if no filters are configured, do nothing */ 2101 if (!mvm->bcast_filters) 2102 return false; 2103 2104 /* configure and attach these filters for each associated sta vif */ 2105 ieee80211_iterate_active_interfaces( 2106 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2107 iwl_mvm_bcast_filter_iterator, &iter_data); 2108 2109 return true; 2110 } 2111 2112 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) 2113 { 2114 struct iwl_bcast_filter_cmd cmd; 2115 2116 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 2117 return 0; 2118 2119 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 2120 return 0; 2121 2122 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, 2123 sizeof(cmd), &cmd); 2124 } 2125 #else 2126 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) 2127 { 2128 return 0; 2129 } 2130 #endif 2131 2132 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, 2133 struct ieee80211_vif *vif) 2134 { 2135 struct iwl_mu_group_mgmt_cmd cmd = {}; 2136 2137 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, 2138 WLAN_MEMBERSHIP_LEN); 2139 memcpy(cmd.user_position, vif->bss_conf.mu_group.position, 2140 WLAN_USER_POSITION_LEN); 2141 2142 return iwl_mvm_send_cmd_pdu(mvm, 2143 WIDE_ID(DATA_PATH_GROUP, 2144 UPDATE_MU_GROUPS_CMD), 2145 0, sizeof(cmd), &cmd); 2146 } 2147 2148 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, 2149 struct ieee80211_vif *vif) 2150 { 2151 if (vif->mu_mimo_owner) { 2152 struct iwl_mu_group_mgmt_notif *notif = _data; 2153 2154 /* 2155 * MU-MIMO Group Id action frame is little endian. We treat 2156 * the data received from firmware as if it came from the 2157 * action frame, so no conversion is needed. 2158 */ 2159 ieee80211_update_mu_groups(vif, 2160 (u8 *)¬if->membership_status, 2161 (u8 *)¬if->user_position); 2162 } 2163 } 2164 2165 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, 2166 struct iwl_rx_cmd_buffer *rxb) 2167 { 2168 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2169 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; 2170 2171 ieee80211_iterate_active_interfaces_atomic( 2172 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2173 iwl_mvm_mu_mimo_iface_iterator, notif); 2174 } 2175 2176 static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) 2177 { 2178 u8 byte_num = ppe_pos_bit / 8; 2179 u8 bit_num = ppe_pos_bit % 8; 2180 u8 residue_bits; 2181 u8 res; 2182 2183 if (bit_num <= 5) 2184 return (ppe[byte_num] >> bit_num) & 2185 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); 2186 2187 /* 2188 * If bit_num > 5, we have to combine bits with next byte. 2189 * Calculate how many bits we need to take from current byte (called 2190 * here "residue_bits"), and add them to bits from next byte. 2191 */ 2192 2193 residue_bits = 8 - bit_num; 2194 2195 res = (ppe[byte_num + 1] & 2196 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << 2197 residue_bits; 2198 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); 2199 2200 return res; 2201 } 2202 2203 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, 2204 struct ieee80211_vif *vif, u8 sta_id) 2205 { 2206 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2207 struct iwl_he_sta_context_cmd sta_ctxt_cmd = { 2208 .sta_id = sta_id, 2209 .tid_limit = IWL_MAX_TID_COUNT, 2210 .bss_color = vif->bss_conf.bss_color, 2211 .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, 2212 .frame_time_rts_th = 2213 cpu_to_le16(vif->bss_conf.frame_time_rts_th), 2214 }; 2215 int size = fw_has_api(&mvm->fw->ucode_capa, 2216 IWL_UCODE_TLV_API_MBSSID_HE) ? 2217 sizeof(sta_ctxt_cmd) : 2218 sizeof(struct iwl_he_sta_context_cmd_v1); 2219 struct ieee80211_sta *sta; 2220 u32 flags; 2221 int i; 2222 2223 rcu_read_lock(); 2224 2225 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); 2226 if (IS_ERR(sta)) { 2227 rcu_read_unlock(); 2228 WARN(1, "Can't find STA to configure HE\n"); 2229 return; 2230 } 2231 2232 if (!sta->he_cap.has_he) { 2233 rcu_read_unlock(); 2234 return; 2235 } 2236 2237 flags = 0; 2238 2239 /* HTC flags */ 2240 if (sta->he_cap.he_cap_elem.mac_cap_info[0] & 2241 IEEE80211_HE_MAC_CAP0_HTC_HE) 2242 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); 2243 if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & 2244 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || 2245 (sta->he_cap.he_cap_elem.mac_cap_info[2] & 2246 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { 2247 u8 link_adap = 2248 ((sta->he_cap.he_cap_elem.mac_cap_info[2] & 2249 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + 2250 (sta->he_cap.he_cap_elem.mac_cap_info[1] & 2251 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); 2252 2253 if (link_adap == 2) 2254 sta_ctxt_cmd.htc_flags |= 2255 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); 2256 else if (link_adap == 3) 2257 sta_ctxt_cmd.htc_flags |= 2258 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); 2259 } 2260 if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) 2261 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); 2262 if (sta->he_cap.he_cap_elem.mac_cap_info[3] & 2263 IEEE80211_HE_MAC_CAP3_OMI_CONTROL) 2264 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); 2265 if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) 2266 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); 2267 2268 /* 2269 * Initialize the PPE thresholds to "None" (7), as described in Table 2270 * 9-262ac of 80211.ax/D3.0. 2271 */ 2272 memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext)); 2273 2274 /* If PPE Thresholds exist, parse them into a FW-familiar format. */ 2275 if (sta->he_cap.he_cap_elem.phy_cap_info[6] & 2276 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { 2277 u8 nss = (sta->he_cap.ppe_thres[0] & 2278 IEEE80211_PPE_THRES_NSS_MASK) + 1; 2279 u8 ru_index_bitmap = 2280 (sta->he_cap.ppe_thres[0] & 2281 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> 2282 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; 2283 u8 *ppe = &sta->he_cap.ppe_thres[0]; 2284 u8 ppe_pos_bit = 7; /* Starting after PPE header */ 2285 2286 /* 2287 * FW currently supports only nss == MAX_HE_SUPP_NSS 2288 * 2289 * If nss > MAX: we can ignore values we don't support 2290 * If nss < MAX: we can set zeros in other streams 2291 */ 2292 if (nss > MAX_HE_SUPP_NSS) { 2293 IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, 2294 MAX_HE_SUPP_NSS); 2295 nss = MAX_HE_SUPP_NSS; 2296 } 2297 2298 for (i = 0; i < nss; i++) { 2299 u8 ru_index_tmp = ru_index_bitmap << 1; 2300 u8 bw; 2301 2302 for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) { 2303 ru_index_tmp >>= 1; 2304 if (!(ru_index_tmp & 1)) 2305 continue; 2306 2307 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] = 2308 iwl_mvm_he_get_ppe_val(ppe, 2309 ppe_pos_bit); 2310 ppe_pos_bit += 2311 IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2312 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] = 2313 iwl_mvm_he_get_ppe_val(ppe, 2314 ppe_pos_bit); 2315 ppe_pos_bit += 2316 IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2317 } 2318 } 2319 2320 flags |= STA_CTXT_HE_PACKET_EXT; 2321 } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] & 2322 IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) != 2323 IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) { 2324 int low_th = -1; 2325 int high_th = -1; 2326 2327 /* Take the PPE thresholds from the nominal padding info */ 2328 switch (sta->he_cap.he_cap_elem.phy_cap_info[9] & 2329 IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { 2330 case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: 2331 low_th = IWL_HE_PKT_EXT_NONE; 2332 high_th = IWL_HE_PKT_EXT_NONE; 2333 break; 2334 case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: 2335 low_th = IWL_HE_PKT_EXT_BPSK; 2336 high_th = IWL_HE_PKT_EXT_NONE; 2337 break; 2338 case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: 2339 low_th = IWL_HE_PKT_EXT_NONE; 2340 high_th = IWL_HE_PKT_EXT_BPSK; 2341 break; 2342 } 2343 2344 /* Set the PPE thresholds accordingly */ 2345 if (low_th >= 0 && high_th >= 0) { 2346 struct iwl_he_pkt_ext *pkt_ext = 2347 (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext; 2348 2349 for (i = 0; i < MAX_HE_SUPP_NSS; i++) { 2350 u8 bw; 2351 2352 for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; 2353 bw++) { 2354 pkt_ext->pkt_ext_qam_th[i][bw][0] = 2355 low_th; 2356 pkt_ext->pkt_ext_qam_th[i][bw][1] = 2357 high_th; 2358 } 2359 } 2360 2361 flags |= STA_CTXT_HE_PACKET_EXT; 2362 } 2363 } 2364 rcu_read_unlock(); 2365 2366 /* Mark MU EDCA as enabled, unless none detected on some AC */ 2367 flags |= STA_CTXT_HE_MU_EDCA_CW; 2368 for (i = 0; i < AC_NUM; i++) { 2369 struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = 2370 &mvmvif->queue_params[i].mu_edca_param_rec; 2371 2372 if (!mvmvif->queue_params[i].mu_edca) { 2373 flags &= ~STA_CTXT_HE_MU_EDCA_CW; 2374 break; 2375 } 2376 2377 sta_ctxt_cmd.trig_based_txf[i].cwmin = 2378 cpu_to_le16(mu_edca->ecw_min_max & 0xf); 2379 sta_ctxt_cmd.trig_based_txf[i].cwmax = 2380 cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); 2381 sta_ctxt_cmd.trig_based_txf[i].aifsn = 2382 cpu_to_le16(mu_edca->aifsn); 2383 sta_ctxt_cmd.trig_based_txf[i].mu_time = 2384 cpu_to_le16(mu_edca->mu_edca_timer); 2385 } 2386 2387 if (vif->bss_conf.multi_sta_back_32bit) 2388 flags |= STA_CTXT_HE_32BIT_BA_BITMAP; 2389 2390 if (vif->bss_conf.ack_enabled) 2391 flags |= STA_CTXT_HE_ACK_ENABLED; 2392 2393 if (vif->bss_conf.uora_exists) { 2394 flags |= STA_CTXT_HE_TRIG_RND_ALLOC; 2395 2396 sta_ctxt_cmd.rand_alloc_ecwmin = 2397 vif->bss_conf.uora_ocw_range & 0x7; 2398 sta_ctxt_cmd.rand_alloc_ecwmax = 2399 (vif->bss_conf.uora_ocw_range >> 3) & 0x7; 2400 } 2401 2402 if (vif->bss_conf.nontransmitted) { 2403 flags |= STA_CTXT_HE_REF_BSSID_VALID; 2404 ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, 2405 vif->bss_conf.transmitter_bssid); 2406 sta_ctxt_cmd.max_bssid_indicator = 2407 vif->bss_conf.bssid_indicator; 2408 sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; 2409 sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; 2410 sta_ctxt_cmd.profile_periodicity = 2411 vif->bss_conf.profile_periodicity; 2412 } 2413 2414 sta_ctxt_cmd.flags = cpu_to_le32(flags); 2415 2416 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD, 2417 DATA_PATH_GROUP, 0), 2418 0, size, &sta_ctxt_cmd)) 2419 IWL_ERR(mvm, "Failed to config FW to work HE!\n"); 2420 } 2421 2422 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 2423 struct ieee80211_vif *vif, 2424 struct ieee80211_bss_conf *bss_conf, 2425 u32 changes) 2426 { 2427 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2428 int ret; 2429 2430 /* 2431 * Re-calculate the tsf id, as the master-slave relations depend on the 2432 * beacon interval, which was not known when the station interface was 2433 * added. 2434 */ 2435 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { 2436 if (vif->bss_conf.he_support && 2437 !iwlwifi_mod_params.disable_11ax) 2438 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2439 2440 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2441 } 2442 2443 /* Update MU EDCA params */ 2444 if (changes & BSS_CHANGED_QOS && mvmvif->associated && 2445 bss_conf->assoc && vif->bss_conf.he_support && 2446 !iwlwifi_mod_params.disable_11ax) 2447 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2448 2449 /* 2450 * If we're not associated yet, take the (new) BSSID before associating 2451 * so the firmware knows. If we're already associated, then use the old 2452 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC 2453 * branch for disassociation below. 2454 */ 2455 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) 2456 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2457 2458 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); 2459 if (ret) 2460 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2461 2462 /* after sending it once, adopt mac80211 data */ 2463 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2464 mvmvif->associated = bss_conf->assoc; 2465 2466 if (changes & BSS_CHANGED_ASSOC) { 2467 if (bss_conf->assoc) { 2468 /* clear statistics to get clean beacon counter */ 2469 iwl_mvm_request_statistics(mvm, true); 2470 memset(&mvmvif->beacon_stats, 0, 2471 sizeof(mvmvif->beacon_stats)); 2472 2473 /* add quota for this interface */ 2474 ret = iwl_mvm_update_quotas(mvm, true, NULL); 2475 if (ret) { 2476 IWL_ERR(mvm, "failed to update quotas\n"); 2477 return; 2478 } 2479 2480 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2481 &mvm->status)) { 2482 /* 2483 * If we're restarting then the firmware will 2484 * obviously have lost synchronisation with 2485 * the AP. It will attempt to synchronise by 2486 * itself, but we can make it more reliable by 2487 * scheduling a session protection time event. 2488 * 2489 * The firmware needs to receive a beacon to 2490 * catch up with synchronisation, use 110% of 2491 * the beacon interval. 2492 * 2493 * Set a large maximum delay to allow for more 2494 * than a single interface. 2495 */ 2496 u32 dur = (11 * vif->bss_conf.beacon_int) / 10; 2497 iwl_mvm_protect_session(mvm, vif, dur, dur, 2498 5 * dur, false); 2499 } 2500 2501 iwl_mvm_sf_update(mvm, vif, false); 2502 iwl_mvm_power_vif_assoc(mvm, vif); 2503 if (vif->p2p) { 2504 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); 2505 iwl_mvm_update_smps(mvm, vif, 2506 IWL_MVM_SMPS_REQ_PROT, 2507 IEEE80211_SMPS_DYNAMIC); 2508 } 2509 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 2510 /* 2511 * If update fails - SF might be running in associated 2512 * mode while disassociated - which is forbidden. 2513 */ 2514 ret = iwl_mvm_sf_update(mvm, vif, false); 2515 WARN_ONCE(ret && 2516 !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 2517 &mvm->status), 2518 "Failed to update SF upon disassociation\n"); 2519 2520 /* 2521 * If we get an assert during the connection (after the 2522 * station has been added, but before the vif is set 2523 * to associated), mac80211 will re-add the station and 2524 * then configure the vif. Since the vif is not 2525 * associated, we would remove the station here and 2526 * this would fail the recovery. 2527 */ 2528 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2529 &mvm->status)) { 2530 /* 2531 * Remove AP station now that 2532 * the MAC is unassoc 2533 */ 2534 ret = iwl_mvm_rm_sta_id(mvm, vif, 2535 mvmvif->ap_sta_id); 2536 if (ret) 2537 IWL_ERR(mvm, 2538 "failed to remove AP station\n"); 2539 2540 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) 2541 mvm->d0i3_ap_sta_id = 2542 IWL_MVM_INVALID_STA; 2543 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 2544 } 2545 2546 /* remove quota for this interface */ 2547 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2548 if (ret) 2549 IWL_ERR(mvm, "failed to update quotas\n"); 2550 2551 if (vif->p2p) 2552 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); 2553 2554 /* this will take the cleared BSSID from bss_conf */ 2555 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2556 if (ret) 2557 IWL_ERR(mvm, 2558 "failed to update MAC %pM (clear after unassoc)\n", 2559 vif->addr); 2560 } 2561 2562 /* 2563 * The firmware tracks the MU-MIMO group on its own. 2564 * However, on HW restart we should restore this data. 2565 */ 2566 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2567 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { 2568 ret = iwl_mvm_update_mu_groups(mvm, vif); 2569 if (ret) 2570 IWL_ERR(mvm, 2571 "failed to update VHT MU_MIMO groups\n"); 2572 } 2573 2574 iwl_mvm_recalc_multicast(mvm); 2575 iwl_mvm_configure_bcast_filter(mvm); 2576 2577 /* reset rssi values */ 2578 mvmvif->bf_data.ave_beacon_signal = 0; 2579 2580 iwl_mvm_bt_coex_vif_change(mvm); 2581 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, 2582 IEEE80211_SMPS_AUTOMATIC); 2583 if (fw_has_capa(&mvm->fw->ucode_capa, 2584 IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 2585 iwl_mvm_config_scan(mvm); 2586 } 2587 2588 if (changes & BSS_CHANGED_BEACON_INFO) { 2589 /* 2590 * We received a beacon from the associated AP so 2591 * remove the session protection. 2592 */ 2593 iwl_mvm_stop_session_protection(mvm, vif); 2594 2595 iwl_mvm_sf_update(mvm, vif, false); 2596 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2597 } 2598 2599 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | 2600 /* 2601 * Send power command on every beacon change, 2602 * because we may have not enabled beacon abort yet. 2603 */ 2604 BSS_CHANGED_BEACON_INFO)) { 2605 ret = iwl_mvm_power_update_mac(mvm); 2606 if (ret) 2607 IWL_ERR(mvm, "failed to update power mode\n"); 2608 } 2609 2610 if (changes & BSS_CHANGED_TXPOWER) { 2611 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 2612 bss_conf->txpower); 2613 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2614 } 2615 2616 if (changes & BSS_CHANGED_CQM) { 2617 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); 2618 /* reset cqm events tracking */ 2619 mvmvif->bf_data.last_cqm_event = 0; 2620 if (mvmvif->bf_data.bf_enabled) { 2621 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 2622 if (ret) 2623 IWL_ERR(mvm, 2624 "failed to update CQM thresholds\n"); 2625 } 2626 } 2627 2628 if (changes & BSS_CHANGED_ARP_FILTER) { 2629 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); 2630 iwl_mvm_configure_bcast_filter(mvm); 2631 } 2632 } 2633 2634 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, 2635 struct ieee80211_vif *vif) 2636 { 2637 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2638 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2639 int ret; 2640 2641 /* 2642 * iwl_mvm_mac_ctxt_add() might read directly from the device 2643 * (the system time), so make sure it is available. 2644 */ 2645 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP); 2646 if (ret) 2647 return ret; 2648 2649 mutex_lock(&mvm->mutex); 2650 2651 /* Send the beacon template */ 2652 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); 2653 if (ret) 2654 goto out_unlock; 2655 2656 /* 2657 * Re-calculate the tsf id, as the master-slave relations depend on the 2658 * beacon interval, which was not known when the AP interface was added. 2659 */ 2660 if (vif->type == NL80211_IFTYPE_AP) 2661 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2662 2663 mvmvif->ap_assoc_sta_count = 0; 2664 2665 /* Add the mac context */ 2666 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 2667 if (ret) 2668 goto out_unlock; 2669 2670 /* Perform the binding */ 2671 ret = iwl_mvm_binding_add_vif(mvm, vif); 2672 if (ret) 2673 goto out_remove; 2674 2675 /* 2676 * This is not very nice, but the simplest: 2677 * For older FWs adding the mcast sta before the bcast station may 2678 * cause assert 0x2b00. 2679 * This is fixed in later FW so make the order of removal depend on 2680 * the TLV 2681 */ 2682 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2683 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2684 if (ret) 2685 goto out_unbind; 2686 /* 2687 * Send the bcast station. At this stage the TBTT and DTIM time 2688 * events are added and applied to the scheduler 2689 */ 2690 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2691 if (ret) { 2692 iwl_mvm_rm_mcast_sta(mvm, vif); 2693 goto out_unbind; 2694 } 2695 } else { 2696 /* 2697 * Send the bcast station. At this stage the TBTT and DTIM time 2698 * events are added and applied to the scheduler 2699 */ 2700 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2701 if (ret) 2702 goto out_unbind; 2703 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2704 if (ret) { 2705 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2706 goto out_unbind; 2707 } 2708 } 2709 2710 /* must be set before quota calculations */ 2711 mvmvif->ap_ibss_active = true; 2712 2713 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2714 iwl_mvm_vif_set_low_latency(mvmvif, true, 2715 LOW_LATENCY_VIF_TYPE); 2716 iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); 2717 } 2718 2719 /* power updated needs to be done before quotas */ 2720 iwl_mvm_power_update_mac(mvm); 2721 2722 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2723 if (ret) 2724 goto out_quota_failed; 2725 2726 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2727 if (vif->p2p && mvm->p2p_device_vif) 2728 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2729 2730 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); 2731 2732 iwl_mvm_bt_coex_vif_change(mvm); 2733 2734 /* we don't support TDLS during DCM */ 2735 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2736 iwl_mvm_teardown_tdls_peers(mvm); 2737 2738 iwl_mvm_ftm_restart_responder(mvm, vif); 2739 2740 goto out_unlock; 2741 2742 out_quota_failed: 2743 iwl_mvm_power_update_mac(mvm); 2744 mvmvif->ap_ibss_active = false; 2745 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2746 iwl_mvm_rm_mcast_sta(mvm, vif); 2747 out_unbind: 2748 iwl_mvm_binding_remove_vif(mvm, vif); 2749 out_remove: 2750 iwl_mvm_mac_ctxt_remove(mvm, vif); 2751 out_unlock: 2752 mutex_unlock(&mvm->mutex); 2753 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP); 2754 return ret; 2755 } 2756 2757 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, 2758 struct ieee80211_vif *vif) 2759 { 2760 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2761 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2762 2763 iwl_mvm_prepare_mac_removal(mvm, vif); 2764 2765 mutex_lock(&mvm->mutex); 2766 2767 /* Handle AP stop while in CSA */ 2768 if (rcu_access_pointer(mvm->csa_vif) == vif) { 2769 iwl_mvm_remove_time_event(mvm, mvmvif, 2770 &mvmvif->time_event_data); 2771 RCU_INIT_POINTER(mvm->csa_vif, NULL); 2772 mvmvif->csa_countdown = false; 2773 } 2774 2775 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { 2776 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); 2777 mvm->csa_tx_block_bcn_timeout = 0; 2778 } 2779 2780 mvmvif->ap_ibss_active = false; 2781 mvm->ap_last_beacon_gp2 = 0; 2782 2783 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2784 iwl_mvm_vif_set_low_latency(mvmvif, false, 2785 LOW_LATENCY_VIF_TYPE); 2786 iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); 2787 } 2788 2789 iwl_mvm_bt_coex_vif_change(mvm); 2790 2791 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); 2792 2793 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2794 if (vif->p2p && mvm->p2p_device_vif) 2795 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2796 2797 iwl_mvm_update_quotas(mvm, false, NULL); 2798 2799 /* 2800 * This is not very nice, but the simplest: 2801 * For older FWs removing the mcast sta before the bcast station may 2802 * cause assert 0x2b00. 2803 * This is fixed in later FW (which will stop beaconing when removing 2804 * bcast station). 2805 * So make the order of removal depend on the TLV 2806 */ 2807 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2808 iwl_mvm_rm_mcast_sta(mvm, vif); 2809 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2810 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2811 iwl_mvm_rm_mcast_sta(mvm, vif); 2812 iwl_mvm_binding_remove_vif(mvm, vif); 2813 2814 iwl_mvm_power_update_mac(mvm); 2815 2816 iwl_mvm_mac_ctxt_remove(mvm, vif); 2817 2818 mutex_unlock(&mvm->mutex); 2819 } 2820 2821 static void 2822 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, 2823 struct ieee80211_vif *vif, 2824 struct ieee80211_bss_conf *bss_conf, 2825 u32 changes) 2826 { 2827 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2828 2829 /* Changes will be applied when the AP/IBSS is started */ 2830 if (!mvmvif->ap_ibss_active) 2831 return; 2832 2833 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 2834 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && 2835 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) 2836 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2837 2838 /* Need to send a new beacon template to the FW */ 2839 if (changes & BSS_CHANGED_BEACON && 2840 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 2841 IWL_WARN(mvm, "Failed updating beacon data\n"); 2842 2843 if (changes & BSS_CHANGED_TXPOWER) { 2844 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 2845 bss_conf->txpower); 2846 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2847 } 2848 2849 if (changes & BSS_CHANGED_FTM_RESPONDER) { 2850 int ret = iwl_mvm_ftm_start_responder(mvm, vif); 2851 2852 if (ret) 2853 IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", 2854 ret); 2855 } 2856 2857 } 2858 2859 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 2860 struct ieee80211_vif *vif, 2861 struct ieee80211_bss_conf *bss_conf, 2862 u32 changes) 2863 { 2864 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2865 2866 /* 2867 * iwl_mvm_bss_info_changed_station() might call 2868 * iwl_mvm_protect_session(), which reads directly from 2869 * the device (the system time), so make sure it is available. 2870 */ 2871 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED)) 2872 return; 2873 2874 mutex_lock(&mvm->mutex); 2875 2876 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) 2877 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 2878 2879 switch (vif->type) { 2880 case NL80211_IFTYPE_STATION: 2881 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 2882 break; 2883 case NL80211_IFTYPE_AP: 2884 case NL80211_IFTYPE_ADHOC: 2885 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); 2886 break; 2887 case NL80211_IFTYPE_MONITOR: 2888 if (changes & BSS_CHANGED_MU_GROUPS) 2889 iwl_mvm_update_mu_groups(mvm, vif); 2890 break; 2891 default: 2892 /* shouldn't happen */ 2893 WARN_ON_ONCE(1); 2894 } 2895 2896 mutex_unlock(&mvm->mutex); 2897 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED); 2898 } 2899 2900 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, 2901 struct ieee80211_vif *vif, 2902 struct ieee80211_scan_request *hw_req) 2903 { 2904 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2905 int ret; 2906 2907 if (hw_req->req.n_channels == 0 || 2908 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) 2909 return -EINVAL; 2910 2911 mutex_lock(&mvm->mutex); 2912 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); 2913 mutex_unlock(&mvm->mutex); 2914 2915 return ret; 2916 } 2917 2918 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, 2919 struct ieee80211_vif *vif) 2920 { 2921 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2922 2923 mutex_lock(&mvm->mutex); 2924 2925 /* Due to a race condition, it's possible that mac80211 asks 2926 * us to stop a hw_scan when it's already stopped. This can 2927 * happen, for instance, if we stopped the scan ourselves, 2928 * called ieee80211_scan_completed() and the userspace called 2929 * cancel scan scan before ieee80211_scan_work() could run. 2930 * To handle that, simply return if the scan is not running. 2931 */ 2932 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) 2933 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 2934 2935 mutex_unlock(&mvm->mutex); 2936 } 2937 2938 static void 2939 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, 2940 struct ieee80211_sta *sta, u16 tids, 2941 int num_frames, 2942 enum ieee80211_frame_release_type reason, 2943 bool more_data) 2944 { 2945 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2946 2947 /* Called when we need to transmit (a) frame(s) from mac80211 */ 2948 2949 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2950 tids, more_data, false); 2951 } 2952 2953 static void 2954 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, 2955 struct ieee80211_sta *sta, u16 tids, 2956 int num_frames, 2957 enum ieee80211_frame_release_type reason, 2958 bool more_data) 2959 { 2960 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2961 2962 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ 2963 2964 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2965 tids, more_data, true); 2966 } 2967 2968 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2969 enum sta_notify_cmd cmd, 2970 struct ieee80211_sta *sta) 2971 { 2972 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2973 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2974 unsigned long txqs = 0, tids = 0; 2975 int tid; 2976 2977 /* 2978 * If we have TVQM then we get too high queue numbers - luckily 2979 * we really shouldn't get here with that because such hardware 2980 * should have firmware supporting buffer station offload. 2981 */ 2982 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 2983 return; 2984 2985 spin_lock_bh(&mvmsta->lock); 2986 for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { 2987 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2988 2989 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) 2990 continue; 2991 2992 __set_bit(tid_data->txq_id, &txqs); 2993 2994 if (iwl_mvm_tid_queued(mvm, tid_data) == 0) 2995 continue; 2996 2997 __set_bit(tid, &tids); 2998 } 2999 3000 switch (cmd) { 3001 case STA_NOTIFY_SLEEP: 3002 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) 3003 ieee80211_sta_set_buffered(sta, tid, true); 3004 3005 if (txqs) 3006 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); 3007 /* 3008 * The fw updates the STA to be asleep. Tx packets on the Tx 3009 * queues to this station will not be transmitted. The fw will 3010 * send a Tx response with TX_STATUS_FAIL_DEST_PS. 3011 */ 3012 break; 3013 case STA_NOTIFY_AWAKE: 3014 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 3015 break; 3016 3017 if (txqs) 3018 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); 3019 iwl_mvm_sta_modify_ps_wake(mvm, sta); 3020 break; 3021 default: 3022 break; 3023 } 3024 spin_unlock_bh(&mvmsta->lock); 3025 } 3026 3027 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 3028 struct ieee80211_vif *vif, 3029 enum sta_notify_cmd cmd, 3030 struct ieee80211_sta *sta) 3031 { 3032 __iwl_mvm_mac_sta_notify(hw, cmd, sta); 3033 } 3034 3035 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 3036 { 3037 struct iwl_rx_packet *pkt = rxb_addr(rxb); 3038 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; 3039 struct ieee80211_sta *sta; 3040 struct iwl_mvm_sta *mvmsta; 3041 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); 3042 3043 if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) 3044 return; 3045 3046 rcu_read_lock(); 3047 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); 3048 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 3049 rcu_read_unlock(); 3050 return; 3051 } 3052 3053 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3054 3055 if (!mvmsta->vif || 3056 mvmsta->vif->type != NL80211_IFTYPE_AP) { 3057 rcu_read_unlock(); 3058 return; 3059 } 3060 3061 if (mvmsta->sleeping != sleeping) { 3062 mvmsta->sleeping = sleeping; 3063 __iwl_mvm_mac_sta_notify(mvm->hw, 3064 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, 3065 sta); 3066 ieee80211_sta_ps_transition(sta, sleeping); 3067 } 3068 3069 if (sleeping) { 3070 switch (notif->type) { 3071 case IWL_MVM_PM_EVENT_AWAKE: 3072 case IWL_MVM_PM_EVENT_ASLEEP: 3073 break; 3074 case IWL_MVM_PM_EVENT_UAPSD: 3075 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); 3076 break; 3077 case IWL_MVM_PM_EVENT_PS_POLL: 3078 ieee80211_sta_pspoll(sta); 3079 break; 3080 default: 3081 break; 3082 } 3083 } 3084 3085 rcu_read_unlock(); 3086 } 3087 3088 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, 3089 struct ieee80211_vif *vif, 3090 struct ieee80211_sta *sta) 3091 { 3092 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3093 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3094 3095 /* 3096 * This is called before mac80211 does RCU synchronisation, 3097 * so here we already invalidate our internal RCU-protected 3098 * station pointer. The rest of the code will thus no longer 3099 * be able to find the station this way, and we don't rely 3100 * on further RCU synchronisation after the sta_state() 3101 * callback deleted the station. 3102 */ 3103 mutex_lock(&mvm->mutex); 3104 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) 3105 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 3106 ERR_PTR(-ENOENT)); 3107 3108 mutex_unlock(&mvm->mutex); 3109 } 3110 3111 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3112 const u8 *bssid) 3113 { 3114 int i; 3115 3116 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 3117 struct iwl_mvm_tcm_mac *mdata; 3118 3119 mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; 3120 ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); 3121 mdata->opened_rx_ba_sessions = false; 3122 } 3123 3124 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) 3125 return; 3126 3127 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { 3128 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 3129 return; 3130 } 3131 3132 if (!vif->p2p && 3133 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { 3134 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 3135 return; 3136 } 3137 3138 for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { 3139 if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { 3140 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 3141 return; 3142 } 3143 } 3144 3145 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 3146 } 3147 3148 static void 3149 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, 3150 struct ieee80211_vif *vif, u8 *peer_addr, 3151 enum nl80211_tdls_operation action) 3152 { 3153 struct iwl_fw_dbg_trigger_tlv *trig; 3154 struct iwl_fw_dbg_trigger_tdls *tdls_trig; 3155 3156 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 3157 FW_DBG_TRIGGER_TDLS); 3158 if (!trig) 3159 return; 3160 3161 tdls_trig = (void *)trig->data; 3162 3163 if (!(tdls_trig->action_bitmap & BIT(action))) 3164 return; 3165 3166 if (tdls_trig->peer_mode && 3167 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) 3168 return; 3169 3170 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 3171 "TDLS event occurred, peer %pM, action %d", 3172 peer_addr, action); 3173 } 3174 3175 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 3176 struct ieee80211_vif *vif, 3177 struct ieee80211_sta *sta, 3178 enum ieee80211_sta_state old_state, 3179 enum ieee80211_sta_state new_state) 3180 { 3181 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3182 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3183 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3184 int ret; 3185 3186 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", 3187 sta->addr, old_state, new_state); 3188 3189 /* this would be a mac80211 bug ... but don't crash */ 3190 if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) 3191 return -EINVAL; 3192 3193 /* 3194 * If we are in a STA removal flow and in DQA mode: 3195 * 3196 * This is after the sync_rcu part, so the queues have already been 3197 * flushed. No more TXs on their way in mac80211's path, and no more in 3198 * the queues. 3199 * Also, we won't be getting any new TX frames for this station. 3200 * What we might have are deferred TX frames that need to be taken care 3201 * of. 3202 * 3203 * Drop any still-queued deferred-frame before removing the STA, and 3204 * make sure the worker is no longer handling frames for this STA. 3205 */ 3206 if (old_state == IEEE80211_STA_NONE && 3207 new_state == IEEE80211_STA_NOTEXIST) { 3208 flush_work(&mvm->add_stream_wk); 3209 3210 /* 3211 * No need to make sure deferred TX indication is off since the 3212 * worker will already remove it if it was on 3213 */ 3214 } 3215 3216 mutex_lock(&mvm->mutex); 3217 /* track whether or not the station is associated */ 3218 mvm_sta->sta_state = new_state; 3219 3220 if (old_state == IEEE80211_STA_NOTEXIST && 3221 new_state == IEEE80211_STA_NONE) { 3222 /* 3223 * Firmware bug - it'll crash if the beacon interval is less 3224 * than 16. We can't avoid connecting at all, so refuse the 3225 * station state change, this will cause mac80211 to abandon 3226 * attempts to connect to this AP, and eventually wpa_s will 3227 * blacklist the AP... 3228 */ 3229 if (vif->type == NL80211_IFTYPE_STATION && 3230 vif->bss_conf.beacon_int < 16) { 3231 IWL_ERR(mvm, 3232 "AP %pM beacon interval is %d, refusing due to firmware bug!\n", 3233 sta->addr, vif->bss_conf.beacon_int); 3234 ret = -EINVAL; 3235 goto out_unlock; 3236 } 3237 3238 if (sta->tdls && 3239 (vif->p2p || 3240 iwl_mvm_tdls_sta_count(mvm, NULL) == 3241 IWL_MVM_TDLS_STA_COUNT || 3242 iwl_mvm_phy_ctx_count(mvm) > 1)) { 3243 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); 3244 ret = -EBUSY; 3245 goto out_unlock; 3246 } 3247 3248 ret = iwl_mvm_add_sta(mvm, vif, sta); 3249 if (sta->tdls && ret == 0) { 3250 iwl_mvm_recalc_tdls_state(mvm, vif, true); 3251 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3252 NL80211_TDLS_SETUP); 3253 } 3254 3255 sta->max_rc_amsdu_len = 1; 3256 } else if (old_state == IEEE80211_STA_NONE && 3257 new_state == IEEE80211_STA_AUTH) { 3258 /* 3259 * EBS may be disabled due to previous failures reported by FW. 3260 * Reset EBS status here assuming environment has been changed. 3261 */ 3262 mvm->last_ebs_successful = true; 3263 iwl_mvm_check_uapsd(mvm, vif, sta->addr); 3264 ret = 0; 3265 } else if (old_state == IEEE80211_STA_AUTH && 3266 new_state == IEEE80211_STA_ASSOC) { 3267 if (vif->type == NL80211_IFTYPE_AP) { 3268 vif->bss_conf.he_support = sta->he_cap.has_he; 3269 mvmvif->ap_assoc_sta_count++; 3270 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3271 if (vif->bss_conf.he_support && 3272 !iwlwifi_mod_params.disable_11ax) 3273 iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); 3274 } else if (vif->type == NL80211_IFTYPE_STATION) { 3275 vif->bss_conf.he_support = sta->he_cap.has_he; 3276 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3277 } 3278 3279 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3280 false); 3281 ret = iwl_mvm_update_sta(mvm, vif, sta); 3282 } else if (old_state == IEEE80211_STA_ASSOC && 3283 new_state == IEEE80211_STA_AUTHORIZED) { 3284 ret = 0; 3285 3286 /* we don't support TDLS during DCM */ 3287 if (iwl_mvm_phy_ctx_count(mvm) > 1) 3288 iwl_mvm_teardown_tdls_peers(mvm); 3289 3290 if (sta->tdls) 3291 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3292 NL80211_TDLS_ENABLE_LINK); 3293 3294 /* enable beacon filtering */ 3295 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3296 3297 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3298 true); 3299 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3300 new_state == IEEE80211_STA_ASSOC) { 3301 /* disable beacon filtering */ 3302 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3303 WARN_ON(ret && 3304 !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3305 &mvm->status)); 3306 ret = 0; 3307 } else if (old_state == IEEE80211_STA_ASSOC && 3308 new_state == IEEE80211_STA_AUTH) { 3309 if (vif->type == NL80211_IFTYPE_AP) { 3310 mvmvif->ap_assoc_sta_count--; 3311 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3312 } 3313 ret = 0; 3314 } else if (old_state == IEEE80211_STA_AUTH && 3315 new_state == IEEE80211_STA_NONE) { 3316 ret = 0; 3317 } else if (old_state == IEEE80211_STA_NONE && 3318 new_state == IEEE80211_STA_NOTEXIST) { 3319 ret = iwl_mvm_rm_sta(mvm, vif, sta); 3320 if (sta->tdls) { 3321 iwl_mvm_recalc_tdls_state(mvm, vif, false); 3322 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3323 NL80211_TDLS_DISABLE_LINK); 3324 } 3325 3326 if (unlikely(ret && 3327 test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3328 &mvm->status))) 3329 ret = 0; 3330 } else { 3331 ret = -EIO; 3332 } 3333 out_unlock: 3334 mutex_unlock(&mvm->mutex); 3335 3336 if (sta->tdls && ret == 0) { 3337 if (old_state == IEEE80211_STA_NOTEXIST && 3338 new_state == IEEE80211_STA_NONE) 3339 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3340 else if (old_state == IEEE80211_STA_NONE && 3341 new_state == IEEE80211_STA_NOTEXIST) 3342 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3343 } 3344 3345 return ret; 3346 } 3347 3348 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3349 { 3350 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3351 3352 mvm->rts_threshold = value; 3353 3354 return 0; 3355 } 3356 3357 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, 3358 struct ieee80211_vif *vif, 3359 struct ieee80211_sta *sta, u32 changed) 3360 { 3361 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3362 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3363 3364 if (changed & (IEEE80211_RC_BW_CHANGED | 3365 IEEE80211_RC_SUPP_RATES_CHANGED | 3366 IEEE80211_RC_NSS_CHANGED)) 3367 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3368 true); 3369 3370 if (vif->type == NL80211_IFTYPE_STATION && 3371 changed & IEEE80211_RC_NSS_CHANGED) 3372 iwl_mvm_sf_update(mvm, vif, false); 3373 } 3374 3375 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, 3376 struct ieee80211_vif *vif, u16 ac, 3377 const struct ieee80211_tx_queue_params *params) 3378 { 3379 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3380 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3381 3382 mvmvif->queue_params[ac] = *params; 3383 3384 /* 3385 * No need to update right away, we'll get BSS_CHANGED_QOS 3386 * The exception is P2P_DEVICE interface which needs immediate update. 3387 */ 3388 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 3389 int ret; 3390 3391 mutex_lock(&mvm->mutex); 3392 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3393 mutex_unlock(&mvm->mutex); 3394 return ret; 3395 } 3396 return 0; 3397 } 3398 3399 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, 3400 struct ieee80211_vif *vif, 3401 u16 req_duration) 3402 { 3403 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3404 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 3405 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; 3406 3407 /* 3408 * iwl_mvm_protect_session() reads directly from the device 3409 * (the system time), so make sure it is available. 3410 */ 3411 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) 3412 return; 3413 3414 if (req_duration > duration) 3415 duration = req_duration; 3416 3417 mutex_lock(&mvm->mutex); 3418 /* Try really hard to protect the session and hear a beacon */ 3419 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); 3420 mutex_unlock(&mvm->mutex); 3421 3422 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); 3423 } 3424 3425 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, 3426 struct ieee80211_vif *vif, 3427 struct cfg80211_sched_scan_request *req, 3428 struct ieee80211_scan_ies *ies) 3429 { 3430 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3431 3432 int ret; 3433 3434 mutex_lock(&mvm->mutex); 3435 3436 if (!vif->bss_conf.idle) { 3437 ret = -EBUSY; 3438 goto out; 3439 } 3440 3441 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); 3442 3443 out: 3444 mutex_unlock(&mvm->mutex); 3445 return ret; 3446 } 3447 3448 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, 3449 struct ieee80211_vif *vif) 3450 { 3451 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3452 int ret; 3453 3454 mutex_lock(&mvm->mutex); 3455 3456 /* Due to a race condition, it's possible that mac80211 asks 3457 * us to stop a sched_scan when it's already stopped. This 3458 * can happen, for instance, if we stopped the scan ourselves, 3459 * called ieee80211_sched_scan_stopped() and the userspace called 3460 * stop sched scan scan before ieee80211_sched_scan_stopped_work() 3461 * could run. To handle this, simply return if the scan is 3462 * not running. 3463 */ 3464 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { 3465 mutex_unlock(&mvm->mutex); 3466 return 0; 3467 } 3468 3469 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); 3470 mutex_unlock(&mvm->mutex); 3471 iwl_mvm_wait_for_async_handlers(mvm); 3472 3473 return ret; 3474 } 3475 3476 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3477 enum set_key_cmd cmd, 3478 struct ieee80211_vif *vif, 3479 struct ieee80211_sta *sta, 3480 struct ieee80211_key_conf *key) 3481 { 3482 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3483 struct iwl_mvm_sta *mvmsta; 3484 struct iwl_mvm_key_pn *ptk_pn; 3485 int keyidx = key->keyidx; 3486 int ret; 3487 u8 key_offset; 3488 3489 if (iwlwifi_mod_params.swcrypto) { 3490 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); 3491 return -EOPNOTSUPP; 3492 } 3493 3494 switch (key->cipher) { 3495 case WLAN_CIPHER_SUITE_TKIP: 3496 if (!mvm->trans->cfg->gen2) { 3497 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 3498 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3499 } else if (vif->type == NL80211_IFTYPE_STATION) { 3500 key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; 3501 } else { 3502 IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); 3503 return -EOPNOTSUPP; 3504 } 3505 break; 3506 case WLAN_CIPHER_SUITE_CCMP: 3507 case WLAN_CIPHER_SUITE_GCMP: 3508 case WLAN_CIPHER_SUITE_GCMP_256: 3509 if (!iwl_mvm_has_new_tx_api(mvm)) 3510 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3511 break; 3512 case WLAN_CIPHER_SUITE_AES_CMAC: 3513 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3514 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3515 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); 3516 break; 3517 case WLAN_CIPHER_SUITE_WEP40: 3518 case WLAN_CIPHER_SUITE_WEP104: 3519 if (vif->type == NL80211_IFTYPE_STATION) 3520 break; 3521 if (iwl_mvm_has_new_tx_api(mvm)) 3522 return -EOPNOTSUPP; 3523 /* support HW crypto on TX */ 3524 return 0; 3525 default: 3526 /* currently FW supports only one optional cipher scheme */ 3527 if (hw->n_cipher_schemes && 3528 hw->cipher_schemes->cipher == key->cipher) 3529 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3530 else 3531 return -EOPNOTSUPP; 3532 } 3533 3534 mutex_lock(&mvm->mutex); 3535 3536 switch (cmd) { 3537 case SET_KEY: 3538 if ((vif->type == NL80211_IFTYPE_ADHOC || 3539 vif->type == NL80211_IFTYPE_AP) && !sta) { 3540 /* 3541 * GTK on AP interface is a TX-only key, return 0; 3542 * on IBSS they're per-station and because we're lazy 3543 * we don't support them for RX, so do the same. 3544 * CMAC/GMAC in AP/IBSS modes must be done in software. 3545 */ 3546 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3547 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3548 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3549 ret = -EOPNOTSUPP; 3550 else 3551 ret = 0; 3552 3553 if (key->cipher != WLAN_CIPHER_SUITE_GCMP && 3554 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && 3555 !iwl_mvm_has_new_tx_api(mvm)) { 3556 key->hw_key_idx = STA_KEY_IDX_INVALID; 3557 break; 3558 } 3559 } 3560 3561 /* During FW restart, in order to restore the state as it was, 3562 * don't try to reprogram keys we previously failed for. 3563 */ 3564 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3565 key->hw_key_idx == STA_KEY_IDX_INVALID) { 3566 IWL_DEBUG_MAC80211(mvm, 3567 "skip invalid idx key programming during restart\n"); 3568 ret = 0; 3569 break; 3570 } 3571 3572 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3573 sta && iwl_mvm_has_new_rx_api(mvm) && 3574 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3575 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3576 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3577 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3578 struct ieee80211_key_seq seq; 3579 int tid, q; 3580 3581 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3582 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 3583 ptk_pn = kzalloc(struct_size(ptk_pn, q, 3584 mvm->trans->num_rx_queues), 3585 GFP_KERNEL); 3586 if (!ptk_pn) { 3587 ret = -ENOMEM; 3588 break; 3589 } 3590 3591 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 3592 ieee80211_get_key_rx_seq(key, tid, &seq); 3593 for (q = 0; q < mvm->trans->num_rx_queues; q++) 3594 memcpy(ptk_pn->q[q].pn[tid], 3595 seq.ccmp.pn, 3596 IEEE80211_CCMP_PN_LEN); 3597 } 3598 3599 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); 3600 } 3601 3602 /* in HW restart reuse the index, otherwise request a new one */ 3603 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3604 key_offset = key->hw_key_idx; 3605 else 3606 key_offset = STA_KEY_IDX_INVALID; 3607 3608 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3609 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); 3610 if (ret) { 3611 IWL_WARN(mvm, "set key failed\n"); 3612 key->hw_key_idx = STA_KEY_IDX_INVALID; 3613 /* 3614 * can't add key for RX, but we don't need it 3615 * in the device for TX so still return 0, 3616 * unless we have new TX API where we cannot 3617 * put key material into the TX_CMD 3618 */ 3619 if (iwl_mvm_has_new_tx_api(mvm)) 3620 ret = -EOPNOTSUPP; 3621 else 3622 ret = 0; 3623 } 3624 3625 break; 3626 case DISABLE_KEY: 3627 if (key->hw_key_idx == STA_KEY_IDX_INVALID) { 3628 ret = 0; 3629 break; 3630 } 3631 3632 if (sta && iwl_mvm_has_new_rx_api(mvm) && 3633 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3634 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3635 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3636 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3637 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3638 ptk_pn = rcu_dereference_protected( 3639 mvmsta->ptk_pn[keyidx], 3640 lockdep_is_held(&mvm->mutex)); 3641 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); 3642 if (ptk_pn) 3643 kfree_rcu(ptk_pn, rcu_head); 3644 } 3645 3646 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); 3647 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); 3648 break; 3649 default: 3650 ret = -EINVAL; 3651 } 3652 3653 mutex_unlock(&mvm->mutex); 3654 return ret; 3655 } 3656 3657 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, 3658 struct ieee80211_vif *vif, 3659 struct ieee80211_key_conf *keyconf, 3660 struct ieee80211_sta *sta, 3661 u32 iv32, u16 *phase1key) 3662 { 3663 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3664 3665 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 3666 return; 3667 3668 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); 3669 } 3670 3671 3672 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, 3673 struct iwl_rx_packet *pkt, void *data) 3674 { 3675 struct iwl_mvm *mvm = 3676 container_of(notif_wait, struct iwl_mvm, notif_wait); 3677 struct iwl_hs20_roc_res *resp; 3678 int resp_len = iwl_rx_packet_payload_len(pkt); 3679 struct iwl_mvm_time_event_data *te_data = data; 3680 3681 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) 3682 return true; 3683 3684 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 3685 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); 3686 return true; 3687 } 3688 3689 resp = (void *)pkt->data; 3690 3691 IWL_DEBUG_TE(mvm, 3692 "Aux ROC: Received response from ucode: status=%d uid=%d\n", 3693 resp->status, resp->event_unique_id); 3694 3695 te_data->uid = le32_to_cpu(resp->event_unique_id); 3696 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 3697 te_data->uid); 3698 3699 spin_lock_bh(&mvm->time_event_lock); 3700 list_add_tail(&te_data->list, &mvm->aux_roc_te_list); 3701 spin_unlock_bh(&mvm->time_event_lock); 3702 3703 return true; 3704 } 3705 3706 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) 3707 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) 3708 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) 3709 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) 3710 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) 3711 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, 3712 struct ieee80211_channel *channel, 3713 struct ieee80211_vif *vif, 3714 int duration) 3715 { 3716 int res; 3717 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3718 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; 3719 static const u16 time_event_response[] = { HOT_SPOT_CMD }; 3720 struct iwl_notification_wait wait_time_event; 3721 u32 dtim_interval = vif->bss_conf.dtim_period * 3722 vif->bss_conf.beacon_int; 3723 u32 req_dur, delay; 3724 struct iwl_hs20_roc_req aux_roc_req = { 3725 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 3726 .id_and_color = 3727 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), 3728 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), 3729 }; 3730 struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, 3731 &aux_roc_req.channel_info); 3732 u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); 3733 3734 /* Set the channel info data */ 3735 iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, 3736 (channel->band == NL80211_BAND_2GHZ) ? 3737 PHY_BAND_24 : PHY_BAND_5, 3738 PHY_VHT_CHANNEL_MODE20, 3739 0); 3740 3741 /* Set the time and duration */ 3742 tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); 3743 3744 delay = AUX_ROC_MIN_DELAY; 3745 req_dur = MSEC_TO_TU(duration); 3746 3747 /* 3748 * If we are associated we want the delay time to be at least one 3749 * dtim interval so that the FW can wait until after the DTIM and 3750 * then start the time event, this will potentially allow us to 3751 * remain off-channel for the max duration. 3752 * Since we want to use almost a whole dtim interval we would also 3753 * like the delay to be for 2-3 dtim intervals, in case there are 3754 * other time events with higher priority. 3755 */ 3756 if (vif->bss_conf.assoc) { 3757 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); 3758 /* We cannot remain off-channel longer than the DTIM interval */ 3759 if (dtim_interval <= req_dur) { 3760 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; 3761 if (req_dur <= AUX_ROC_MIN_DURATION) 3762 req_dur = dtim_interval - 3763 AUX_ROC_MIN_SAFETY_BUFFER; 3764 } 3765 } 3766 3767 tail->duration = cpu_to_le32(req_dur); 3768 tail->apply_time_max_delay = cpu_to_le32(delay); 3769 3770 IWL_DEBUG_TE(mvm, 3771 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", 3772 channel->hw_value, req_dur, duration, delay, 3773 dtim_interval); 3774 /* Set the node address */ 3775 memcpy(tail->node_addr, vif->addr, ETH_ALEN); 3776 3777 lockdep_assert_held(&mvm->mutex); 3778 3779 spin_lock_bh(&mvm->time_event_lock); 3780 3781 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { 3782 spin_unlock_bh(&mvm->time_event_lock); 3783 return -EIO; 3784 } 3785 3786 te_data->vif = vif; 3787 te_data->duration = duration; 3788 te_data->id = HOT_SPOT_CMD; 3789 3790 spin_unlock_bh(&mvm->time_event_lock); 3791 3792 /* 3793 * Use a notification wait, which really just processes the 3794 * command response and doesn't wait for anything, in order 3795 * to be able to process the response and get the UID inside 3796 * the RX path. Using CMD_WANT_SKB doesn't work because it 3797 * stores the buffer and then wakes up this thread, by which 3798 * time another notification (that the time event started) 3799 * might already be processed unsuccessfully. 3800 */ 3801 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 3802 time_event_response, 3803 ARRAY_SIZE(time_event_response), 3804 iwl_mvm_rx_aux_roc, te_data); 3805 3806 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, 3807 &aux_roc_req); 3808 3809 if (res) { 3810 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); 3811 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 3812 goto out_clear_te; 3813 } 3814 3815 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 3816 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 3817 /* should never fail */ 3818 WARN_ON_ONCE(res); 3819 3820 if (res) { 3821 out_clear_te: 3822 spin_lock_bh(&mvm->time_event_lock); 3823 iwl_mvm_te_clear_data(mvm, te_data); 3824 spin_unlock_bh(&mvm->time_event_lock); 3825 } 3826 3827 return res; 3828 } 3829 3830 static int iwl_mvm_roc(struct ieee80211_hw *hw, 3831 struct ieee80211_vif *vif, 3832 struct ieee80211_channel *channel, 3833 int duration, 3834 enum ieee80211_roc_type type) 3835 { 3836 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3837 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3838 struct cfg80211_chan_def chandef; 3839 struct iwl_mvm_phy_ctxt *phy_ctxt; 3840 int ret, i; 3841 3842 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 3843 duration, type); 3844 3845 /* 3846 * Flush the done work, just in case it's still pending, so that 3847 * the work it does can complete and we can accept new frames. 3848 */ 3849 flush_work(&mvm->roc_done_wk); 3850 3851 mutex_lock(&mvm->mutex); 3852 3853 switch (vif->type) { 3854 case NL80211_IFTYPE_STATION: 3855 if (fw_has_capa(&mvm->fw->ucode_capa, 3856 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { 3857 /* Use aux roc framework (HS20) */ 3858 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 3859 vif, duration); 3860 goto out_unlock; 3861 } 3862 IWL_ERR(mvm, "hotspot not supported\n"); 3863 ret = -EINVAL; 3864 goto out_unlock; 3865 case NL80211_IFTYPE_P2P_DEVICE: 3866 /* handle below */ 3867 break; 3868 default: 3869 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 3870 ret = -EINVAL; 3871 goto out_unlock; 3872 } 3873 3874 for (i = 0; i < NUM_PHY_CTX; i++) { 3875 phy_ctxt = &mvm->phy_ctxts[i]; 3876 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 3877 continue; 3878 3879 if (phy_ctxt->ref && channel == phy_ctxt->channel) { 3880 /* 3881 * Unbind the P2P_DEVICE from the current PHY context, 3882 * and if the PHY context is not used remove it. 3883 */ 3884 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3885 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3886 goto out_unlock; 3887 3888 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3889 3890 /* Bind the P2P_DEVICE to the current PHY Context */ 3891 mvmvif->phy_ctxt = phy_ctxt; 3892 3893 ret = iwl_mvm_binding_add_vif(mvm, vif); 3894 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3895 goto out_unlock; 3896 3897 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3898 goto schedule_time_event; 3899 } 3900 } 3901 3902 /* Need to update the PHY context only if the ROC channel changed */ 3903 if (channel == mvmvif->phy_ctxt->channel) 3904 goto schedule_time_event; 3905 3906 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); 3907 3908 /* 3909 * Change the PHY context configuration as it is currently referenced 3910 * only by the P2P Device MAC 3911 */ 3912 if (mvmvif->phy_ctxt->ref == 1) { 3913 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, 3914 &chandef, 1, 1); 3915 if (ret) 3916 goto out_unlock; 3917 } else { 3918 /* 3919 * The PHY context is shared with other MACs. Need to remove the 3920 * P2P Device from the binding, allocate an new PHY context and 3921 * create a new binding 3922 */ 3923 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3924 if (!phy_ctxt) { 3925 ret = -ENOSPC; 3926 goto out_unlock; 3927 } 3928 3929 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 3930 1, 1); 3931 if (ret) { 3932 IWL_ERR(mvm, "Failed to change PHY context\n"); 3933 goto out_unlock; 3934 } 3935 3936 /* Unbind the P2P_DEVICE from the current PHY context */ 3937 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3938 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3939 goto out_unlock; 3940 3941 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3942 3943 /* Bind the P2P_DEVICE to the new allocated PHY context */ 3944 mvmvif->phy_ctxt = phy_ctxt; 3945 3946 ret = iwl_mvm_binding_add_vif(mvm, vif); 3947 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3948 goto out_unlock; 3949 3950 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3951 } 3952 3953 schedule_time_event: 3954 /* Schedule the time events */ 3955 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); 3956 3957 out_unlock: 3958 mutex_unlock(&mvm->mutex); 3959 IWL_DEBUG_MAC80211(mvm, "leave\n"); 3960 return ret; 3961 } 3962 3963 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) 3964 { 3965 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3966 3967 IWL_DEBUG_MAC80211(mvm, "enter\n"); 3968 3969 mutex_lock(&mvm->mutex); 3970 iwl_mvm_stop_roc(mvm); 3971 mutex_unlock(&mvm->mutex); 3972 3973 IWL_DEBUG_MAC80211(mvm, "leave\n"); 3974 return 0; 3975 } 3976 3977 struct iwl_mvm_ftm_responder_iter_data { 3978 bool responder; 3979 struct ieee80211_chanctx_conf *ctx; 3980 }; 3981 3982 static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, 3983 struct ieee80211_vif *vif) 3984 { 3985 struct iwl_mvm_ftm_responder_iter_data *data = _data; 3986 3987 if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && 3988 vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) 3989 data->responder = true; 3990 } 3991 3992 static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, 3993 struct ieee80211_chanctx_conf *ctx) 3994 { 3995 struct iwl_mvm_ftm_responder_iter_data data = { 3996 .responder = false, 3997 .ctx = ctx, 3998 }; 3999 4000 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 4001 IEEE80211_IFACE_ITER_NORMAL, 4002 iwl_mvm_ftm_responder_chanctx_iter, 4003 &data); 4004 return data.responder; 4005 } 4006 4007 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, 4008 struct ieee80211_chanctx_conf *ctx) 4009 { 4010 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4011 struct iwl_mvm_phy_ctxt *phy_ctxt; 4012 bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); 4013 struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; 4014 int ret; 4015 4016 lockdep_assert_held(&mvm->mutex); 4017 4018 IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); 4019 4020 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 4021 if (!phy_ctxt) { 4022 ret = -ENOSPC; 4023 goto out; 4024 } 4025 4026 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, 4027 ctx->rx_chains_static, 4028 ctx->rx_chains_dynamic); 4029 if (ret) { 4030 IWL_ERR(mvm, "Failed to add PHY context\n"); 4031 goto out; 4032 } 4033 4034 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); 4035 *phy_ctxt_id = phy_ctxt->id; 4036 out: 4037 return ret; 4038 } 4039 4040 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, 4041 struct ieee80211_chanctx_conf *ctx) 4042 { 4043 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4044 int ret; 4045 4046 mutex_lock(&mvm->mutex); 4047 ret = __iwl_mvm_add_chanctx(mvm, ctx); 4048 mutex_unlock(&mvm->mutex); 4049 4050 return ret; 4051 } 4052 4053 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, 4054 struct ieee80211_chanctx_conf *ctx) 4055 { 4056 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4057 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4058 4059 lockdep_assert_held(&mvm->mutex); 4060 4061 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); 4062 } 4063 4064 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, 4065 struct ieee80211_chanctx_conf *ctx) 4066 { 4067 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4068 4069 mutex_lock(&mvm->mutex); 4070 __iwl_mvm_remove_chanctx(mvm, ctx); 4071 mutex_unlock(&mvm->mutex); 4072 } 4073 4074 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, 4075 struct ieee80211_chanctx_conf *ctx, 4076 u32 changed) 4077 { 4078 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4079 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4080 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4081 bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); 4082 struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; 4083 4084 if (WARN_ONCE((phy_ctxt->ref > 1) && 4085 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | 4086 IEEE80211_CHANCTX_CHANGE_RX_CHAINS | 4087 IEEE80211_CHANCTX_CHANGE_RADAR | 4088 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), 4089 "Cannot change PHY. Ref=%d, changed=0x%X\n", 4090 phy_ctxt->ref, changed)) 4091 return; 4092 4093 mutex_lock(&mvm->mutex); 4094 4095 /* we are only changing the min_width, may be a noop */ 4096 if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { 4097 if (phy_ctxt->width == def->width) 4098 goto out_unlock; 4099 4100 /* we are just toggling between 20_NOHT and 20 */ 4101 if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && 4102 def->width <= NL80211_CHAN_WIDTH_20) 4103 goto out_unlock; 4104 } 4105 4106 iwl_mvm_bt_coex_vif_change(mvm); 4107 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, 4108 ctx->rx_chains_static, 4109 ctx->rx_chains_dynamic); 4110 4111 out_unlock: 4112 mutex_unlock(&mvm->mutex); 4113 } 4114 4115 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, 4116 struct ieee80211_vif *vif, 4117 struct ieee80211_chanctx_conf *ctx, 4118 bool switching_chanctx) 4119 { 4120 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4121 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4122 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4123 int ret; 4124 4125 lockdep_assert_held(&mvm->mutex); 4126 4127 mvmvif->phy_ctxt = phy_ctxt; 4128 4129 switch (vif->type) { 4130 case NL80211_IFTYPE_AP: 4131 /* only needed if we're switching chanctx (i.e. during CSA) */ 4132 if (switching_chanctx) { 4133 mvmvif->ap_ibss_active = true; 4134 break; 4135 } 4136 /* fall through */ 4137 case NL80211_IFTYPE_ADHOC: 4138 /* 4139 * The AP binding flow is handled as part of the start_ap flow 4140 * (in bss_info_changed), similarly for IBSS. 4141 */ 4142 ret = 0; 4143 goto out; 4144 case NL80211_IFTYPE_STATION: 4145 mvmvif->csa_bcn_pending = false; 4146 break; 4147 case NL80211_IFTYPE_MONITOR: 4148 /* always disable PS when a monitor interface is active */ 4149 mvmvif->ps_disabled = true; 4150 break; 4151 default: 4152 ret = -EINVAL; 4153 goto out; 4154 } 4155 4156 ret = iwl_mvm_binding_add_vif(mvm, vif); 4157 if (ret) 4158 goto out; 4159 4160 /* 4161 * Power state must be updated before quotas, 4162 * otherwise fw will complain. 4163 */ 4164 iwl_mvm_power_update_mac(mvm); 4165 4166 /* Setting the quota at this stage is only required for monitor 4167 * interfaces. For the other types, the bss_info changed flow 4168 * will handle quota settings. 4169 */ 4170 if (vif->type == NL80211_IFTYPE_MONITOR) { 4171 mvmvif->monitor_active = true; 4172 ret = iwl_mvm_update_quotas(mvm, false, NULL); 4173 if (ret) 4174 goto out_remove_binding; 4175 4176 ret = iwl_mvm_add_snif_sta(mvm, vif); 4177 if (ret) 4178 goto out_remove_binding; 4179 4180 } 4181 4182 /* Handle binding during CSA */ 4183 if (vif->type == NL80211_IFTYPE_AP) { 4184 iwl_mvm_update_quotas(mvm, false, NULL); 4185 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 4186 } 4187 4188 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { 4189 mvmvif->csa_bcn_pending = true; 4190 4191 if (!fw_has_capa(&mvm->fw->ucode_capa, 4192 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 4193 u32 duration = 3 * vif->bss_conf.beacon_int; 4194 4195 4196 /* iwl_mvm_protect_session() reads directly from the 4197 * device (the system time), so make sure it is 4198 * available. 4199 */ 4200 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); 4201 if (ret) 4202 goto out_remove_binding; 4203 4204 /* Protect the session to make sure we hear the first 4205 * beacon on the new channel. 4206 */ 4207 iwl_mvm_protect_session(mvm, vif, duration, duration, 4208 vif->bss_conf.beacon_int / 2, 4209 true); 4210 4211 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); 4212 } 4213 4214 iwl_mvm_update_quotas(mvm, false, NULL); 4215 } 4216 4217 goto out; 4218 4219 out_remove_binding: 4220 iwl_mvm_binding_remove_vif(mvm, vif); 4221 iwl_mvm_power_update_mac(mvm); 4222 out: 4223 if (ret) 4224 mvmvif->phy_ctxt = NULL; 4225 return ret; 4226 } 4227 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, 4228 struct ieee80211_vif *vif, 4229 struct ieee80211_chanctx_conf *ctx) 4230 { 4231 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4232 int ret; 4233 4234 mutex_lock(&mvm->mutex); 4235 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); 4236 mutex_unlock(&mvm->mutex); 4237 4238 return ret; 4239 } 4240 4241 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, 4242 struct ieee80211_vif *vif, 4243 struct ieee80211_chanctx_conf *ctx, 4244 bool switching_chanctx) 4245 { 4246 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4247 struct ieee80211_vif *disabled_vif = NULL; 4248 4249 lockdep_assert_held(&mvm->mutex); 4250 4251 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 4252 4253 switch (vif->type) { 4254 case NL80211_IFTYPE_ADHOC: 4255 goto out; 4256 case NL80211_IFTYPE_MONITOR: 4257 mvmvif->monitor_active = false; 4258 mvmvif->ps_disabled = false; 4259 iwl_mvm_rm_snif_sta(mvm, vif); 4260 break; 4261 case NL80211_IFTYPE_AP: 4262 /* This part is triggered only during CSA */ 4263 if (!switching_chanctx || !mvmvif->ap_ibss_active) 4264 goto out; 4265 4266 mvmvif->csa_countdown = false; 4267 4268 /* Set CS bit on all the stations */ 4269 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); 4270 4271 /* Save blocked iface, the timeout is set on the next beacon */ 4272 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); 4273 4274 mvmvif->ap_ibss_active = false; 4275 break; 4276 case NL80211_IFTYPE_STATION: 4277 if (!switching_chanctx) 4278 break; 4279 4280 disabled_vif = vif; 4281 4282 if (!fw_has_capa(&mvm->fw->ucode_capa, 4283 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) 4284 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); 4285 break; 4286 default: 4287 break; 4288 } 4289 4290 iwl_mvm_update_quotas(mvm, false, disabled_vif); 4291 iwl_mvm_binding_remove_vif(mvm, vif); 4292 4293 out: 4294 mvmvif->phy_ctxt = NULL; 4295 iwl_mvm_power_update_mac(mvm); 4296 } 4297 4298 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, 4299 struct ieee80211_vif *vif, 4300 struct ieee80211_chanctx_conf *ctx) 4301 { 4302 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4303 4304 mutex_lock(&mvm->mutex); 4305 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); 4306 mutex_unlock(&mvm->mutex); 4307 } 4308 4309 static int 4310 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, 4311 struct ieee80211_vif_chanctx_switch *vifs) 4312 { 4313 int ret; 4314 4315 mutex_lock(&mvm->mutex); 4316 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4317 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); 4318 4319 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); 4320 if (ret) { 4321 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); 4322 goto out_reassign; 4323 } 4324 4325 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4326 true); 4327 if (ret) { 4328 IWL_ERR(mvm, 4329 "failed to assign new_ctx during channel switch\n"); 4330 goto out_remove; 4331 } 4332 4333 /* we don't support TDLS during DCM - can be caused by channel switch */ 4334 if (iwl_mvm_phy_ctx_count(mvm) > 1) 4335 iwl_mvm_teardown_tdls_peers(mvm); 4336 4337 goto out; 4338 4339 out_remove: 4340 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); 4341 4342 out_reassign: 4343 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { 4344 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); 4345 goto out_restart; 4346 } 4347 4348 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4349 true)) { 4350 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4351 goto out_restart; 4352 } 4353 4354 goto out; 4355 4356 out_restart: 4357 /* things keep failing, better restart the hw */ 4358 iwl_mvm_nic_restart(mvm, false); 4359 4360 out: 4361 mutex_unlock(&mvm->mutex); 4362 4363 return ret; 4364 } 4365 4366 static int 4367 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, 4368 struct ieee80211_vif_chanctx_switch *vifs) 4369 { 4370 int ret; 4371 4372 mutex_lock(&mvm->mutex); 4373 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4374 4375 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4376 true); 4377 if (ret) { 4378 IWL_ERR(mvm, 4379 "failed to assign new_ctx during channel switch\n"); 4380 goto out_reassign; 4381 } 4382 4383 goto out; 4384 4385 out_reassign: 4386 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4387 true)) { 4388 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4389 goto out_restart; 4390 } 4391 4392 goto out; 4393 4394 out_restart: 4395 /* things keep failing, better restart the hw */ 4396 iwl_mvm_nic_restart(mvm, false); 4397 4398 out: 4399 mutex_unlock(&mvm->mutex); 4400 4401 return ret; 4402 } 4403 4404 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, 4405 struct ieee80211_vif_chanctx_switch *vifs, 4406 int n_vifs, 4407 enum ieee80211_chanctx_switch_mode mode) 4408 { 4409 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4410 int ret; 4411 4412 /* we only support a single-vif right now */ 4413 if (n_vifs > 1) 4414 return -EOPNOTSUPP; 4415 4416 switch (mode) { 4417 case CHANCTX_SWMODE_SWAP_CONTEXTS: 4418 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); 4419 break; 4420 case CHANCTX_SWMODE_REASSIGN_VIF: 4421 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); 4422 break; 4423 default: 4424 ret = -EOPNOTSUPP; 4425 break; 4426 } 4427 4428 return ret; 4429 } 4430 4431 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) 4432 { 4433 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4434 4435 return mvm->ibss_manager; 4436 } 4437 4438 static int iwl_mvm_set_tim(struct ieee80211_hw *hw, 4439 struct ieee80211_sta *sta, 4440 bool set) 4441 { 4442 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4443 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4444 4445 if (!mvm_sta || !mvm_sta->vif) { 4446 IWL_ERR(mvm, "Station is not associated to a vif\n"); 4447 return -EINVAL; 4448 } 4449 4450 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 4451 } 4452 4453 #ifdef CONFIG_NL80211_TESTMODE 4454 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { 4455 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, 4456 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, 4457 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, 4458 }; 4459 4460 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, 4461 struct ieee80211_vif *vif, 4462 void *data, int len) 4463 { 4464 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; 4465 int err; 4466 u32 noa_duration; 4467 4468 err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, 4469 iwl_mvm_tm_policy, NULL); 4470 if (err) 4471 return err; 4472 4473 if (!tb[IWL_MVM_TM_ATTR_CMD]) 4474 return -EINVAL; 4475 4476 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { 4477 case IWL_MVM_TM_CMD_SET_NOA: 4478 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || 4479 !vif->bss_conf.enable_beacon || 4480 !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) 4481 return -EINVAL; 4482 4483 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); 4484 if (noa_duration >= vif->bss_conf.beacon_int) 4485 return -EINVAL; 4486 4487 mvm->noa_duration = noa_duration; 4488 mvm->noa_vif = vif; 4489 4490 return iwl_mvm_update_quotas(mvm, true, NULL); 4491 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 4492 /* must be associated client vif - ignore authorized */ 4493 if (!vif || vif->type != NL80211_IFTYPE_STATION || 4494 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || 4495 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) 4496 return -EINVAL; 4497 4498 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 4499 return iwl_mvm_enable_beacon_filter(mvm, vif, 0); 4500 return iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4501 } 4502 4503 return -EOPNOTSUPP; 4504 } 4505 4506 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, 4507 struct ieee80211_vif *vif, 4508 void *data, int len) 4509 { 4510 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4511 int err; 4512 4513 mutex_lock(&mvm->mutex); 4514 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); 4515 mutex_unlock(&mvm->mutex); 4516 4517 return err; 4518 } 4519 #endif 4520 4521 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, 4522 struct ieee80211_vif *vif, 4523 struct ieee80211_channel_switch *chsw) 4524 { 4525 /* By implementing this operation, we prevent mac80211 from 4526 * starting its own channel switch timer, so that we can call 4527 * ieee80211_chswitch_done() ourselves at the right time 4528 * (which is when the absence time event starts). 4529 */ 4530 4531 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), 4532 "dummy channel switch op\n"); 4533 } 4534 4535 static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, 4536 struct ieee80211_vif *vif, 4537 struct ieee80211_channel_switch *chsw) 4538 { 4539 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4540 struct iwl_chan_switch_te_cmd cmd = { 4541 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 4542 mvmvif->color)), 4543 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 4544 .tsf = cpu_to_le32(chsw->timestamp), 4545 .cs_count = chsw->count, 4546 .cs_mode = chsw->block_tx, 4547 }; 4548 4549 lockdep_assert_held(&mvm->mutex); 4550 4551 if (chsw->delay) 4552 cmd.cs_delayed_bcn_count = 4553 DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); 4554 4555 return iwl_mvm_send_cmd_pdu(mvm, 4556 WIDE_ID(MAC_CONF_GROUP, 4557 CHANNEL_SWITCH_TIME_EVENT_CMD), 4558 0, sizeof(cmd), &cmd); 4559 } 4560 4561 #define IWL_MAX_CSA_BLOCK_TX 1500 4562 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, 4563 struct ieee80211_vif *vif, 4564 struct ieee80211_channel_switch *chsw) 4565 { 4566 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4567 struct ieee80211_vif *csa_vif; 4568 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4569 u32 apply_time; 4570 int ret; 4571 4572 mutex_lock(&mvm->mutex); 4573 4574 mvmvif->csa_failed = false; 4575 4576 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", 4577 chsw->chandef.center_freq1); 4578 4579 iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, 4580 ieee80211_vif_to_wdev(vif), 4581 FW_DBG_TRIGGER_CHANNEL_SWITCH); 4582 4583 switch (vif->type) { 4584 case NL80211_IFTYPE_AP: 4585 csa_vif = 4586 rcu_dereference_protected(mvm->csa_vif, 4587 lockdep_is_held(&mvm->mutex)); 4588 if (WARN_ONCE(csa_vif && csa_vif->csa_active, 4589 "Another CSA is already in progress")) { 4590 ret = -EBUSY; 4591 goto out_unlock; 4592 } 4593 4594 /* we still didn't unblock tx. prevent new CS meanwhile */ 4595 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, 4596 lockdep_is_held(&mvm->mutex))) { 4597 ret = -EBUSY; 4598 goto out_unlock; 4599 } 4600 4601 rcu_assign_pointer(mvm->csa_vif, vif); 4602 4603 if (WARN_ONCE(mvmvif->csa_countdown, 4604 "Previous CSA countdown didn't complete")) { 4605 ret = -EBUSY; 4606 goto out_unlock; 4607 } 4608 4609 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; 4610 4611 break; 4612 case NL80211_IFTYPE_STATION: 4613 /* Schedule the time event to a bit before beacon 1, 4614 * to make sure we're in the new channel when the 4615 * GO/AP arrives. In case count <= 1 immediately schedule the 4616 * TE (this might result with some packet loss or connection 4617 * loss). 4618 */ 4619 if (chsw->count <= 1) 4620 apply_time = 0; 4621 else 4622 apply_time = chsw->device_timestamp + 4623 ((vif->bss_conf.beacon_int * (chsw->count - 1) - 4624 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); 4625 4626 if (chsw->block_tx) { 4627 iwl_mvm_csa_client_absent(mvm, vif); 4628 /* 4629 * In case of undetermined / long time with immediate 4630 * quiet monitor status to gracefully disconnect 4631 */ 4632 if (!chsw->count || 4633 chsw->count * vif->bss_conf.beacon_int > 4634 IWL_MAX_CSA_BLOCK_TX) 4635 schedule_delayed_work(&mvmvif->csa_work, 4636 msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); 4637 } 4638 4639 if (mvmvif->bf_data.bf_enabled) { 4640 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4641 if (ret) 4642 goto out_unlock; 4643 } 4644 4645 if (fw_has_capa(&mvm->fw->ucode_capa, 4646 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) 4647 iwl_mvm_schedule_client_csa(mvm, vif, chsw); 4648 else 4649 iwl_mvm_schedule_csa_period(mvm, vif, 4650 vif->bss_conf.beacon_int, 4651 apply_time); 4652 4653 mvmvif->csa_count = chsw->count; 4654 mvmvif->csa_misbehave = false; 4655 break; 4656 default: 4657 break; 4658 } 4659 4660 mvmvif->ps_disabled = true; 4661 4662 ret = iwl_mvm_power_update_ps(mvm); 4663 if (ret) 4664 goto out_unlock; 4665 4666 /* we won't be on this channel any longer */ 4667 iwl_mvm_teardown_tdls_peers(mvm); 4668 4669 out_unlock: 4670 mutex_unlock(&mvm->mutex); 4671 4672 return ret; 4673 } 4674 4675 static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, 4676 struct ieee80211_vif *vif, 4677 struct ieee80211_channel_switch *chsw) 4678 { 4679 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4680 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4681 struct iwl_chan_switch_te_cmd cmd = { 4682 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 4683 mvmvif->color)), 4684 .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), 4685 .tsf = cpu_to_le32(chsw->timestamp), 4686 .cs_count = chsw->count, 4687 .cs_mode = chsw->block_tx, 4688 }; 4689 4690 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) 4691 return; 4692 4693 if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { 4694 if (mvmvif->csa_misbehave) { 4695 /* Second time, give up on this AP*/ 4696 iwl_mvm_abort_channel_switch(hw, vif); 4697 ieee80211_chswitch_done(vif, false); 4698 mvmvif->csa_misbehave = false; 4699 return; 4700 } 4701 mvmvif->csa_misbehave = true; 4702 } 4703 mvmvif->csa_count = chsw->count; 4704 4705 IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id); 4706 4707 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, 4708 WIDE_ID(MAC_CONF_GROUP, 4709 CHANNEL_SWITCH_TIME_EVENT_CMD), 4710 CMD_ASYNC, sizeof(cmd), &cmd)); 4711 } 4712 4713 static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) 4714 { 4715 int i; 4716 4717 if (!iwl_mvm_has_new_tx_api(mvm)) { 4718 if (drop) { 4719 mutex_lock(&mvm->mutex); 4720 iwl_mvm_flush_tx_path(mvm, 4721 iwl_mvm_flushable_queues(mvm) & queues, 0); 4722 mutex_unlock(&mvm->mutex); 4723 } else { 4724 iwl_trans_wait_tx_queues_empty(mvm->trans, queues); 4725 } 4726 return; 4727 } 4728 4729 mutex_lock(&mvm->mutex); 4730 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 4731 struct ieee80211_sta *sta; 4732 4733 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4734 lockdep_is_held(&mvm->mutex)); 4735 if (IS_ERR_OR_NULL(sta)) 4736 continue; 4737 4738 if (drop) 4739 iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0); 4740 else 4741 iwl_mvm_wait_sta_queues_empty(mvm, 4742 iwl_mvm_sta_from_mac80211(sta)); 4743 } 4744 mutex_unlock(&mvm->mutex); 4745 } 4746 4747 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 4748 struct ieee80211_vif *vif, u32 queues, bool drop) 4749 { 4750 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4751 struct iwl_mvm_vif *mvmvif; 4752 struct iwl_mvm_sta *mvmsta; 4753 struct ieee80211_sta *sta; 4754 int i; 4755 u32 msk = 0; 4756 4757 if (!vif) { 4758 iwl_mvm_flush_no_vif(mvm, queues, drop); 4759 return; 4760 } 4761 4762 if (vif->type != NL80211_IFTYPE_STATION) 4763 return; 4764 4765 /* Make sure we're done with the deferred traffic before flushing */ 4766 flush_work(&mvm->add_stream_wk); 4767 4768 mutex_lock(&mvm->mutex); 4769 mvmvif = iwl_mvm_vif_from_mac80211(vif); 4770 4771 /* flush the AP-station and all TDLS peers */ 4772 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 4773 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4774 lockdep_is_held(&mvm->mutex)); 4775 if (IS_ERR_OR_NULL(sta)) 4776 continue; 4777 4778 mvmsta = iwl_mvm_sta_from_mac80211(sta); 4779 if (mvmsta->vif != vif) 4780 continue; 4781 4782 /* make sure only TDLS peers or the AP are flushed */ 4783 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); 4784 4785 if (drop) { 4786 if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0)) 4787 IWL_ERR(mvm, "flush request fail\n"); 4788 } else { 4789 msk |= mvmsta->tfd_queue_msk; 4790 if (iwl_mvm_has_new_tx_api(mvm)) 4791 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); 4792 } 4793 } 4794 4795 mutex_unlock(&mvm->mutex); 4796 4797 /* this can take a while, and we may need/want other operations 4798 * to succeed while doing this, so do it without the mutex held 4799 */ 4800 if (!drop && !iwl_mvm_has_new_tx_api(mvm)) 4801 iwl_trans_wait_tx_queues_empty(mvm->trans, msk); 4802 } 4803 4804 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, 4805 struct survey_info *survey) 4806 { 4807 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4808 int ret; 4809 4810 memset(survey, 0, sizeof(*survey)); 4811 4812 /* only support global statistics right now */ 4813 if (idx != 0) 4814 return -ENOENT; 4815 4816 if (!fw_has_capa(&mvm->fw->ucode_capa, 4817 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 4818 return -ENOENT; 4819 4820 mutex_lock(&mvm->mutex); 4821 4822 if (iwl_mvm_firmware_running(mvm)) { 4823 ret = iwl_mvm_request_statistics(mvm, false); 4824 if (ret) 4825 goto out; 4826 } 4827 4828 survey->filled = SURVEY_INFO_TIME | 4829 SURVEY_INFO_TIME_RX | 4830 SURVEY_INFO_TIME_TX | 4831 SURVEY_INFO_TIME_SCAN; 4832 survey->time = mvm->accu_radio_stats.on_time_rf + 4833 mvm->radio_stats.on_time_rf; 4834 do_div(survey->time, USEC_PER_MSEC); 4835 4836 survey->time_rx = mvm->accu_radio_stats.rx_time + 4837 mvm->radio_stats.rx_time; 4838 do_div(survey->time_rx, USEC_PER_MSEC); 4839 4840 survey->time_tx = mvm->accu_radio_stats.tx_time + 4841 mvm->radio_stats.tx_time; 4842 do_div(survey->time_tx, USEC_PER_MSEC); 4843 4844 survey->time_scan = mvm->accu_radio_stats.on_time_scan + 4845 mvm->radio_stats.on_time_scan; 4846 do_div(survey->time_scan, USEC_PER_MSEC); 4847 4848 ret = 0; 4849 out: 4850 mutex_unlock(&mvm->mutex); 4851 return ret; 4852 } 4853 4854 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, 4855 struct ieee80211_vif *vif, 4856 struct ieee80211_sta *sta, 4857 struct station_info *sinfo) 4858 { 4859 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4860 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4861 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 4862 4863 if (mvmsta->avg_energy) { 4864 sinfo->signal_avg = mvmsta->avg_energy; 4865 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 4866 } 4867 4868 /* if beacon filtering isn't on mac80211 does it anyway */ 4869 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 4870 return; 4871 4872 if (!vif->bss_conf.assoc) 4873 return; 4874 4875 mutex_lock(&mvm->mutex); 4876 4877 if (mvmvif->ap_sta_id != mvmsta->sta_id) 4878 goto unlock; 4879 4880 if (iwl_mvm_request_statistics(mvm, false)) 4881 goto unlock; 4882 4883 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + 4884 mvmvif->beacon_stats.accu_num_beacons; 4885 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); 4886 if (mvmvif->beacon_stats.avg_signal) { 4887 /* firmware only reports a value after RXing a few beacons */ 4888 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; 4889 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 4890 } 4891 unlock: 4892 mutex_unlock(&mvm->mutex); 4893 } 4894 4895 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, 4896 struct ieee80211_vif *vif, 4897 const struct ieee80211_event *event) 4898 { 4899 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ 4900 do { \ 4901 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ 4902 break; \ 4903 iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ 4904 } while (0) 4905 4906 struct iwl_fw_dbg_trigger_tlv *trig; 4907 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 4908 4909 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 4910 FW_DBG_TRIGGER_MLME); 4911 if (!trig) 4912 return; 4913 4914 trig_mlme = (void *)trig->data; 4915 4916 if (event->u.mlme.data == ASSOC_EVENT) { 4917 if (event->u.mlme.status == MLME_DENIED) 4918 CHECK_MLME_TRIGGER(stop_assoc_denied, 4919 "DENIED ASSOC: reason %d", 4920 event->u.mlme.reason); 4921 else if (event->u.mlme.status == MLME_TIMEOUT) 4922 CHECK_MLME_TRIGGER(stop_assoc_timeout, 4923 "ASSOC TIMEOUT"); 4924 } else if (event->u.mlme.data == AUTH_EVENT) { 4925 if (event->u.mlme.status == MLME_DENIED) 4926 CHECK_MLME_TRIGGER(stop_auth_denied, 4927 "DENIED AUTH: reason %d", 4928 event->u.mlme.reason); 4929 else if (event->u.mlme.status == MLME_TIMEOUT) 4930 CHECK_MLME_TRIGGER(stop_auth_timeout, 4931 "AUTH TIMEOUT"); 4932 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { 4933 CHECK_MLME_TRIGGER(stop_rx_deauth, 4934 "DEAUTH RX %d", event->u.mlme.reason); 4935 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { 4936 CHECK_MLME_TRIGGER(stop_tx_deauth, 4937 "DEAUTH TX %d", event->u.mlme.reason); 4938 } 4939 #undef CHECK_MLME_TRIGGER 4940 } 4941 4942 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, 4943 struct ieee80211_vif *vif, 4944 const struct ieee80211_event *event) 4945 { 4946 struct iwl_fw_dbg_trigger_tlv *trig; 4947 struct iwl_fw_dbg_trigger_ba *ba_trig; 4948 4949 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 4950 FW_DBG_TRIGGER_BA); 4951 if (!trig) 4952 return; 4953 4954 ba_trig = (void *)trig->data; 4955 4956 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) 4957 return; 4958 4959 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 4960 "BAR received from %pM, tid %d, ssn %d", 4961 event->u.ba.sta->addr, event->u.ba.tid, 4962 event->u.ba.ssn); 4963 } 4964 4965 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, 4966 struct ieee80211_vif *vif, 4967 const struct ieee80211_event *event) 4968 { 4969 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4970 4971 switch (event->type) { 4972 case MLME_EVENT: 4973 iwl_mvm_event_mlme_callback(mvm, vif, event); 4974 break; 4975 case BAR_RX_EVENT: 4976 iwl_mvm_event_bar_rx_callback(mvm, vif, event); 4977 break; 4978 case BA_FRAME_TIMEOUT: 4979 iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, 4980 event->u.ba.tid); 4981 break; 4982 default: 4983 break; 4984 } 4985 } 4986 4987 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, 4988 struct iwl_mvm_internal_rxq_notif *notif, 4989 u32 size) 4990 { 4991 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; 4992 int ret; 4993 4994 lockdep_assert_held(&mvm->mutex); 4995 4996 if (!iwl_mvm_has_new_rx_api(mvm)) 4997 return; 4998 4999 notif->cookie = mvm->queue_sync_cookie; 5000 5001 if (notif->sync) 5002 atomic_set(&mvm->queue_sync_counter, 5003 mvm->trans->num_rx_queues); 5004 5005 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); 5006 if (ret) { 5007 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 5008 goto out; 5009 } 5010 5011 if (notif->sync) { 5012 ret = wait_event_timeout(mvm->rx_sync_waitq, 5013 atomic_read(&mvm->queue_sync_counter) == 0 || 5014 iwl_mvm_is_radio_killed(mvm), 5015 HZ); 5016 WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm)); 5017 } 5018 5019 out: 5020 atomic_set(&mvm->queue_sync_counter, 0); 5021 mvm->queue_sync_cookie++; 5022 } 5023 5024 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) 5025 { 5026 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5027 struct iwl_mvm_internal_rxq_notif data = { 5028 .type = IWL_MVM_RXQ_EMPTY, 5029 .sync = 1, 5030 }; 5031 5032 mutex_lock(&mvm->mutex); 5033 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); 5034 mutex_unlock(&mvm->mutex); 5035 } 5036 5037 static int 5038 iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, 5039 struct ieee80211_vif *vif, 5040 struct cfg80211_ftm_responder_stats *stats) 5041 { 5042 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5043 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 5044 5045 if (vif->p2p || vif->type != NL80211_IFTYPE_AP || 5046 !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) 5047 return -EINVAL; 5048 5049 mutex_lock(&mvm->mutex); 5050 *stats = mvm->ftm_resp_stats; 5051 mutex_unlock(&mvm->mutex); 5052 5053 stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | 5054 BIT(NL80211_FTM_STATS_PARTIAL_NUM) | 5055 BIT(NL80211_FTM_STATS_FAILED_NUM) | 5056 BIT(NL80211_FTM_STATS_ASAP_NUM) | 5057 BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | 5058 BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | 5059 BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | 5060 BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | 5061 BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); 5062 5063 return 0; 5064 } 5065 5066 static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, 5067 struct ieee80211_vif *vif, 5068 struct cfg80211_pmsr_request *request) 5069 { 5070 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5071 int ret; 5072 5073 mutex_lock(&mvm->mutex); 5074 ret = iwl_mvm_ftm_start(mvm, vif, request); 5075 mutex_unlock(&mvm->mutex); 5076 5077 return ret; 5078 } 5079 5080 static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, 5081 struct ieee80211_vif *vif, 5082 struct cfg80211_pmsr_request *request) 5083 { 5084 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5085 5086 mutex_lock(&mvm->mutex); 5087 iwl_mvm_ftm_abort(mvm, request); 5088 mutex_unlock(&mvm->mutex); 5089 } 5090 5091 static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) 5092 { 5093 u8 protocol = ip_hdr(skb)->protocol; 5094 5095 if (!IS_ENABLED(CONFIG_INET)) 5096 return false; 5097 5098 return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; 5099 } 5100 5101 static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, 5102 struct sk_buff *head, 5103 struct sk_buff *skb) 5104 { 5105 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5106 5107 /* For now don't aggregate IPv6 in AMSDU */ 5108 if (skb->protocol != htons(ETH_P_IP)) 5109 return false; 5110 5111 if (!iwl_mvm_is_csum_supported(mvm)) 5112 return true; 5113 5114 return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); 5115 } 5116 5117 const struct ieee80211_ops iwl_mvm_hw_ops = { 5118 .tx = iwl_mvm_mac_tx, 5119 .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, 5120 .ampdu_action = iwl_mvm_mac_ampdu_action, 5121 .start = iwl_mvm_mac_start, 5122 .reconfig_complete = iwl_mvm_mac_reconfig_complete, 5123 .stop = iwl_mvm_mac_stop, 5124 .add_interface = iwl_mvm_mac_add_interface, 5125 .remove_interface = iwl_mvm_mac_remove_interface, 5126 .config = iwl_mvm_mac_config, 5127 .prepare_multicast = iwl_mvm_prepare_multicast, 5128 .configure_filter = iwl_mvm_configure_filter, 5129 .config_iface_filter = iwl_mvm_config_iface_filter, 5130 .bss_info_changed = iwl_mvm_bss_info_changed, 5131 .hw_scan = iwl_mvm_mac_hw_scan, 5132 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, 5133 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, 5134 .sta_state = iwl_mvm_mac_sta_state, 5135 .sta_notify = iwl_mvm_mac_sta_notify, 5136 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, 5137 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, 5138 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 5139 .sta_rc_update = iwl_mvm_sta_rc_update, 5140 .conf_tx = iwl_mvm_mac_conf_tx, 5141 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 5142 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, 5143 .flush = iwl_mvm_mac_flush, 5144 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 5145 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 5146 .set_key = iwl_mvm_mac_set_key, 5147 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 5148 .remain_on_channel = iwl_mvm_roc, 5149 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 5150 .add_chanctx = iwl_mvm_add_chanctx, 5151 .remove_chanctx = iwl_mvm_remove_chanctx, 5152 .change_chanctx = iwl_mvm_change_chanctx, 5153 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, 5154 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, 5155 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, 5156 5157 .start_ap = iwl_mvm_start_ap_ibss, 5158 .stop_ap = iwl_mvm_stop_ap_ibss, 5159 .join_ibss = iwl_mvm_start_ap_ibss, 5160 .leave_ibss = iwl_mvm_stop_ap_ibss, 5161 5162 .tx_last_beacon = iwl_mvm_tx_last_beacon, 5163 5164 .set_tim = iwl_mvm_set_tim, 5165 5166 .channel_switch = iwl_mvm_channel_switch, 5167 .pre_channel_switch = iwl_mvm_pre_channel_switch, 5168 .post_channel_switch = iwl_mvm_post_channel_switch, 5169 .abort_channel_switch = iwl_mvm_abort_channel_switch, 5170 .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, 5171 5172 .tdls_channel_switch = iwl_mvm_tdls_channel_switch, 5173 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, 5174 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, 5175 5176 .event_callback = iwl_mvm_mac_event_callback, 5177 5178 .sync_rx_queues = iwl_mvm_sync_rx_queues, 5179 5180 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 5181 5182 #ifdef CONFIG_PM_SLEEP 5183 /* look at d3.c */ 5184 .suspend = iwl_mvm_suspend, 5185 .resume = iwl_mvm_resume, 5186 .set_wakeup = iwl_mvm_set_wakeup, 5187 .set_rekey_data = iwl_mvm_set_rekey_data, 5188 #if IS_ENABLED(CONFIG_IPV6) 5189 .ipv6_addr_change = iwl_mvm_ipv6_addr_change, 5190 #endif 5191 .set_default_unicast_key = iwl_mvm_set_default_unicast_key, 5192 #endif 5193 .get_survey = iwl_mvm_mac_get_survey, 5194 .sta_statistics = iwl_mvm_mac_sta_statistics, 5195 .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, 5196 .start_pmsr = iwl_mvm_start_pmsr, 5197 .abort_pmsr = iwl_mvm_abort_pmsr, 5198 5199 .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, 5200 #ifdef CONFIG_IWLWIFI_DEBUGFS 5201 .sta_add_debugfs = iwl_mvm_sta_add_debugfs, 5202 #endif 5203 }; 5204