1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2014, 2018-2022 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/kernel.h> 8 #include <linux/slab.h> 9 #include <linux/skbuff.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/ip.h> 13 #include <linux/if_arp.h> 14 #include <linux/time.h> 15 #include <net/mac80211.h> 16 #include <net/ieee80211_radiotap.h> 17 #include <net/tcp.h> 18 19 #include "iwl-drv.h" 20 #include "iwl-op-mode.h" 21 #include "iwl-io.h" 22 #include "mvm.h" 23 #include "sta.h" 24 #include "time-event.h" 25 #include "iwl-eeprom-parse.h" 26 #include "iwl-phy-db.h" 27 #include "testmode.h" 28 #include "fw/error-dump.h" 29 #include "iwl-prph.h" 30 #include "iwl-nvm-parse.h" 31 32 static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 33 { 34 .max = 1, 35 .types = BIT(NL80211_IFTYPE_STATION), 36 }, 37 { 38 .max = 1, 39 .types = BIT(NL80211_IFTYPE_AP) | 40 BIT(NL80211_IFTYPE_P2P_CLIENT) | 41 BIT(NL80211_IFTYPE_P2P_GO), 42 }, 43 { 44 .max = 1, 45 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 46 }, 47 }; 48 49 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { 50 { 51 .num_different_channels = 2, 52 .max_interfaces = 3, 53 .limits = iwl_mvm_limits, 54 .n_limits = ARRAY_SIZE(iwl_mvm_limits), 55 }, 56 }; 57 58 static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { 59 .max_peers = IWL_MVM_TOF_MAX_APS, 60 .report_ap_tsf = 1, 61 .randomize_mac_addr = 1, 62 63 .ftm = { 64 .supported = 1, 65 .asap = 1, 66 .non_asap = 1, 67 .request_lci = 1, 68 .request_civicloc = 1, 69 .trigger_based = 1, 70 .non_trigger_based = 1, 71 .max_bursts_exponent = -1, /* all supported */ 72 .max_ftms_per_burst = 0, /* no limits */ 73 .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 74 BIT(NL80211_CHAN_WIDTH_20) | 75 BIT(NL80211_CHAN_WIDTH_40) | 76 BIT(NL80211_CHAN_WIDTH_80) | 77 BIT(NL80211_CHAN_WIDTH_160), 78 .preambles = BIT(NL80211_PREAMBLE_LEGACY) | 79 BIT(NL80211_PREAMBLE_HT) | 80 BIT(NL80211_PREAMBLE_VHT) | 81 BIT(NL80211_PREAMBLE_HE), 82 }, 83 }; 84 85 static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 86 enum set_key_cmd cmd, 87 struct ieee80211_vif *vif, 88 struct ieee80211_sta *sta, 89 struct ieee80211_key_conf *key); 90 91 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) 92 { 93 int i; 94 95 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); 96 for (i = 0; i < NUM_PHY_CTX; i++) { 97 mvm->phy_ctxts[i].id = i; 98 mvm->phy_ctxts[i].ref = 0; 99 } 100 } 101 102 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 103 const char *alpha2, 104 enum iwl_mcc_source src_id, 105 bool *changed) 106 { 107 struct ieee80211_regdomain *regd = NULL; 108 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 109 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 110 struct iwl_mcc_update_resp *resp; 111 u8 resp_ver; 112 113 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); 114 115 lockdep_assert_held(&mvm->mutex); 116 117 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); 118 if (IS_ERR_OR_NULL(resp)) { 119 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", 120 PTR_ERR_OR_ZERO(resp)); 121 resp = NULL; 122 goto out; 123 } 124 125 if (changed) { 126 u32 status = le32_to_cpu(resp->status); 127 128 *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || 129 status == MCC_RESP_ILLEGAL); 130 } 131 resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, 132 MCC_UPDATE_CMD, 0); 133 IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); 134 135 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 136 __le32_to_cpu(resp->n_channels), 137 resp->channels, 138 __le16_to_cpu(resp->mcc), 139 __le16_to_cpu(resp->geo_info), 140 __le16_to_cpu(resp->cap), resp_ver); 141 /* Store the return source id */ 142 src_id = resp->source_id; 143 if (IS_ERR_OR_NULL(regd)) { 144 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", 145 PTR_ERR_OR_ZERO(regd)); 146 goto out; 147 } 148 149 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", 150 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); 151 mvm->lar_regdom_set = true; 152 mvm->mcc_src = src_id; 153 154 iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); 155 156 out: 157 kfree(resp); 158 return regd; 159 } 160 161 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) 162 { 163 bool changed; 164 struct ieee80211_regdomain *regd; 165 166 if (!iwl_mvm_is_lar_supported(mvm)) 167 return; 168 169 regd = iwl_mvm_get_current_regdomain(mvm, &changed); 170 if (!IS_ERR_OR_NULL(regd)) { 171 /* only update the regulatory core if changed */ 172 if (changed) 173 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 174 175 kfree(regd); 176 } 177 } 178 179 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 180 bool *changed) 181 { 182 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", 183 iwl_mvm_is_wifi_mcc_supported(mvm) ? 184 MCC_SOURCE_GET_CURRENT : 185 MCC_SOURCE_OLD_FW, changed); 186 } 187 188 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) 189 { 190 enum iwl_mcc_source used_src; 191 struct ieee80211_regdomain *regd; 192 int ret; 193 bool changed; 194 const struct ieee80211_regdomain *r = 195 wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd); 196 197 if (!r) 198 return -ENOENT; 199 200 /* save the last source in case we overwrite it below */ 201 used_src = mvm->mcc_src; 202 if (iwl_mvm_is_wifi_mcc_supported(mvm)) { 203 /* Notify the firmware we support wifi location updates */ 204 regd = iwl_mvm_get_current_regdomain(mvm, NULL); 205 if (!IS_ERR_OR_NULL(regd)) 206 kfree(regd); 207 } 208 209 /* Now set our last stored MCC and source */ 210 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, 211 &changed); 212 if (IS_ERR_OR_NULL(regd)) 213 return -EIO; 214 215 /* update cfg80211 if the regdomain was changed */ 216 if (changed) 217 ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); 218 else 219 ret = 0; 220 221 kfree(regd); 222 return ret; 223 } 224 225 static const u8 he_if_types_ext_capa_sta[] = { 226 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 227 [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, 228 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 229 }; 230 231 static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { 232 { 233 .iftype = NL80211_IFTYPE_STATION, 234 .extended_capabilities = he_if_types_ext_capa_sta, 235 .extended_capabilities_mask = he_if_types_ext_capa_sta, 236 .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), 237 }, 238 }; 239 240 static int 241 iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 242 { 243 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 244 *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); 245 *rx_ant = iwl_mvm_get_valid_rx_ant(mvm); 246 return 0; 247 } 248 249 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 250 { 251 struct ieee80211_hw *hw = mvm->hw; 252 int num_mac, ret, i; 253 static const u32 mvm_ciphers[] = { 254 WLAN_CIPHER_SUITE_WEP40, 255 WLAN_CIPHER_SUITE_WEP104, 256 WLAN_CIPHER_SUITE_TKIP, 257 WLAN_CIPHER_SUITE_CCMP, 258 }; 259 #ifdef CONFIG_PM_SLEEP 260 bool unified = fw_has_capa(&mvm->fw->ucode_capa, 261 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 262 #endif 263 264 /* Tell mac80211 our characteristics */ 265 ieee80211_hw_set(hw, SIGNAL_DBM); 266 ieee80211_hw_set(hw, SPECTRUM_MGMT); 267 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 268 ieee80211_hw_set(hw, WANT_MONITOR_VIF); 269 ieee80211_hw_set(hw, SUPPORTS_PS); 270 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 271 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 272 ieee80211_hw_set(hw, TIMING_BEACON_ONLY); 273 ieee80211_hw_set(hw, CONNECTION_MONITOR); 274 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 275 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 276 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 277 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 278 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); 279 ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); 280 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); 281 ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); 282 ieee80211_hw_set(hw, STA_MMPDU_TXQ); 283 /* 284 * On older devices, enabling TX A-MSDU occasionally leads to 285 * something getting messed up, the command read from the FIFO 286 * gets out of sync and isn't a TX command, so that we have an 287 * assert EDC. 288 * 289 * It's not clear where the bug is, but since we didn't used to 290 * support A-MSDU until moving the mac80211 iTXQs, just leave it 291 * for older devices. We also don't see this issue on any newer 292 * devices. 293 */ 294 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) 295 ieee80211_hw_set(hw, TX_AMSDU); 296 ieee80211_hw_set(hw, TX_FRAG_LIST); 297 298 if (iwl_mvm_has_tlc_offload(mvm)) { 299 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); 300 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 301 } 302 303 if (iwl_mvm_has_new_rx_api(mvm)) 304 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 305 306 if (fw_has_capa(&mvm->fw->ucode_capa, 307 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { 308 ieee80211_hw_set(hw, AP_LINK_PS); 309 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { 310 /* 311 * we absolutely need this for the new TX API since that comes 312 * with many more queues than the current code can deal with 313 * for station powersave 314 */ 315 return -EINVAL; 316 } 317 318 if (mvm->trans->num_rx_queues > 1) 319 ieee80211_hw_set(hw, USES_RSS); 320 321 if (mvm->trans->max_skb_frags) 322 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; 323 324 hw->queues = IEEE80211_NUM_ACS; 325 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 326 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 327 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 328 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 329 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; 330 331 hw->radiotap_timestamp.units_pos = 332 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | 333 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; 334 /* this is the case for CCK frames, it's better (only 8) for OFDM */ 335 hw->radiotap_timestamp.accuracy = 22; 336 337 if (!iwl_mvm_has_tlc_offload(mvm)) 338 hw->rate_control_algorithm = RS_NAME; 339 340 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; 341 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 342 hw->max_tx_fragments = mvm->trans->max_skb_frags; 343 344 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); 345 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); 346 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); 347 hw->wiphy->cipher_suites = mvm->ciphers; 348 349 if (iwl_mvm_has_new_rx_api(mvm)) { 350 mvm->ciphers[hw->wiphy->n_cipher_suites] = 351 WLAN_CIPHER_SUITE_GCMP; 352 hw->wiphy->n_cipher_suites++; 353 mvm->ciphers[hw->wiphy->n_cipher_suites] = 354 WLAN_CIPHER_SUITE_GCMP_256; 355 hw->wiphy->n_cipher_suites++; 356 } 357 358 if (iwlwifi_mod_params.swcrypto) 359 IWL_ERR(mvm, 360 "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); 361 if (!iwlwifi_mod_params.bt_coex_active) 362 IWL_ERR(mvm, 363 "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); 364 365 ieee80211_hw_set(hw, MFP_CAPABLE); 366 mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; 367 hw->wiphy->n_cipher_suites++; 368 if (iwl_mvm_has_new_rx_api(mvm)) { 369 mvm->ciphers[hw->wiphy->n_cipher_suites] = 370 WLAN_CIPHER_SUITE_BIP_GMAC_128; 371 hw->wiphy->n_cipher_suites++; 372 mvm->ciphers[hw->wiphy->n_cipher_suites] = 373 WLAN_CIPHER_SUITE_BIP_GMAC_256; 374 hw->wiphy->n_cipher_suites++; 375 } 376 377 if (fw_has_capa(&mvm->fw->ucode_capa, 378 IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { 379 wiphy_ext_feature_set(hw->wiphy, 380 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); 381 hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; 382 } 383 384 if (fw_has_capa(&mvm->fw->ucode_capa, 385 IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) 386 wiphy_ext_feature_set(hw->wiphy, 387 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT); 388 389 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 390 hw->wiphy->features |= 391 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | 392 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | 393 NL80211_FEATURE_ND_RANDOM_MAC_ADDR; 394 395 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 396 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 397 hw->chanctx_data_size = sizeof(u16); 398 hw->txq_data_size = sizeof(struct iwl_mvm_txq); 399 400 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 401 BIT(NL80211_IFTYPE_P2P_CLIENT) | 402 BIT(NL80211_IFTYPE_AP) | 403 BIT(NL80211_IFTYPE_P2P_GO) | 404 BIT(NL80211_IFTYPE_P2P_DEVICE) | 405 BIT(NL80211_IFTYPE_ADHOC); 406 407 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 408 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 409 410 /* The new Tx API does not allow to pass the key or keyid of a MPDU to 411 * the hw, preventing us to control which key(id) to use per MPDU. 412 * Till that's fixed we can't use Extended Key ID for the newer cards. 413 */ 414 if (!iwl_mvm_has_new_tx_api(mvm)) 415 wiphy_ext_feature_set(hw->wiphy, 416 NL80211_EXT_FEATURE_EXT_KEY_ID); 417 hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; 418 419 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 420 if (iwl_mvm_is_lar_supported(mvm)) 421 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 422 else 423 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 424 REGULATORY_DISABLE_BEACON_HINTS; 425 426 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 427 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 428 hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; 429 430 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 431 hw->wiphy->n_iface_combinations = 432 ARRAY_SIZE(iwl_mvm_iface_combinations); 433 434 hw->wiphy->max_remain_on_channel_duration = 10000; 435 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 436 437 /* Extract MAC address */ 438 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 439 hw->wiphy->addresses = mvm->addresses; 440 hw->wiphy->n_addresses = 1; 441 442 /* Extract additional MAC addresses if available */ 443 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? 444 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; 445 446 for (i = 1; i < num_mac; i++) { 447 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, 448 ETH_ALEN); 449 mvm->addresses[i].addr[5]++; 450 hw->wiphy->n_addresses++; 451 } 452 453 iwl_mvm_reset_phy_ctxts(mvm); 454 455 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); 456 457 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 458 459 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); 460 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || 461 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); 462 463 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 464 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; 465 else 466 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; 467 468 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) 469 hw->wiphy->bands[NL80211_BAND_2GHZ] = 470 &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 471 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { 472 hw->wiphy->bands[NL80211_BAND_5GHZ] = 473 &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 474 475 if (fw_has_capa(&mvm->fw->ucode_capa, 476 IWL_UCODE_TLV_CAPA_BEAMFORMER) && 477 fw_has_api(&mvm->fw->ucode_capa, 478 IWL_UCODE_TLV_API_LQ_SS_PARAMS)) 479 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= 480 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 481 } 482 if (fw_has_capa(&mvm->fw->ucode_capa, 483 IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) && 484 mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels) 485 hw->wiphy->bands[NL80211_BAND_6GHZ] = 486 &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; 487 488 hw->wiphy->hw_version = mvm->trans->hw_id; 489 490 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 491 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 492 else 493 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 494 495 hw->wiphy->max_sched_scan_reqs = 1; 496 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 497 hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); 498 /* we create the 802.11 header and zero length SSID IE. */ 499 hw->wiphy->max_sched_scan_ie_len = 500 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; 501 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; 502 hw->wiphy->max_sched_scan_plan_interval = U16_MAX; 503 504 /* 505 * the firmware uses u8 for num of iterations, but 0xff is saved for 506 * infinite loop, so the maximum number of iterations is actually 254. 507 */ 508 hw->wiphy->max_sched_scan_plan_iterations = 254; 509 510 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 511 NL80211_FEATURE_LOW_PRIORITY_SCAN | 512 NL80211_FEATURE_P2P_GO_OPPPS | 513 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 514 NL80211_FEATURE_DYNAMIC_SMPS | 515 NL80211_FEATURE_STATIC_SMPS | 516 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 517 518 if (fw_has_capa(&mvm->fw->ucode_capa, 519 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) 520 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; 521 if (fw_has_capa(&mvm->fw->ucode_capa, 522 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) 523 hw->wiphy->features |= NL80211_FEATURE_QUIET; 524 525 if (fw_has_capa(&mvm->fw->ucode_capa, 526 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) 527 hw->wiphy->features |= 528 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; 529 530 if (fw_has_capa(&mvm->fw->ucode_capa, 531 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 532 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; 533 534 if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, 535 IWL_FW_CMD_VER_UNKNOWN) == 3) 536 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; 537 538 if (fw_has_api(&mvm->fw->ucode_capa, 539 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { 540 wiphy_ext_feature_set(hw->wiphy, 541 NL80211_EXT_FEATURE_SCAN_START_TIME); 542 wiphy_ext_feature_set(hw->wiphy, 543 NL80211_EXT_FEATURE_BSS_PARENT_TSF); 544 } 545 546 if (iwl_mvm_is_oce_supported(mvm)) { 547 u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0); 548 549 wiphy_ext_feature_set(hw->wiphy, 550 NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); 551 wiphy_ext_feature_set(hw->wiphy, 552 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); 553 wiphy_ext_feature_set(hw->wiphy, 554 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); 555 556 /* Old firmware also supports probe deferral and suppression */ 557 if (scan_ver < 15) 558 wiphy_ext_feature_set(hw->wiphy, 559 NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); 560 } 561 562 if (mvm->nvm_data->sku_cap_11ax_enable && 563 !iwlwifi_mod_params.disable_11ax) { 564 hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; 565 hw->wiphy->num_iftype_ext_capab = 566 ARRAY_SIZE(he_iftypes_ext_capa); 567 568 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); 569 ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); 570 } 571 572 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 573 574 #ifdef CONFIG_PM_SLEEP 575 if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && 576 mvm->trans->ops->d3_suspend && 577 mvm->trans->ops->d3_resume && 578 device_can_wakeup(mvm->trans->dev)) { 579 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | 580 WIPHY_WOWLAN_DISCONNECT | 581 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 582 WIPHY_WOWLAN_RFKILL_RELEASE | 583 WIPHY_WOWLAN_NET_DETECT; 584 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 585 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 586 WIPHY_WOWLAN_4WAY_HANDSHAKE; 587 588 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 589 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 590 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 591 mvm->wowlan.max_nd_match_sets = 592 iwl_umac_scan_get_max_profiles(mvm->fw); 593 hw->wiphy->wowlan = &mvm->wowlan; 594 } 595 #endif 596 597 ret = iwl_mvm_leds_init(mvm); 598 if (ret) 599 return ret; 600 601 if (fw_has_capa(&mvm->fw->ucode_capa, 602 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { 603 IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); 604 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 605 ieee80211_hw_set(hw, TDLS_WIDER_BW); 606 } 607 608 if (fw_has_capa(&mvm->fw->ucode_capa, 609 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { 610 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); 611 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; 612 } 613 614 hw->netdev_features |= mvm->cfg->features; 615 if (!iwl_mvm_is_csum_supported(mvm)) 616 hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; 617 618 if (mvm->cfg->vht_mu_mimo_supported) 619 wiphy_ext_feature_set(hw->wiphy, 620 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); 621 622 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) 623 wiphy_ext_feature_set(hw->wiphy, 624 NL80211_EXT_FEATURE_PROTECTED_TWT); 625 626 iwl_mvm_vendor_cmds_register(mvm); 627 628 hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); 629 hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); 630 631 ret = ieee80211_register_hw(mvm->hw); 632 if (ret) { 633 iwl_mvm_leds_exit(mvm); 634 } 635 636 return ret; 637 } 638 639 static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, 640 struct ieee80211_sta *sta) 641 { 642 if (likely(sta)) { 643 if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) 644 return; 645 } else { 646 if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) 647 return; 648 } 649 650 ieee80211_free_txskb(mvm->hw, skb); 651 } 652 653 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 654 struct ieee80211_tx_control *control, 655 struct sk_buff *skb) 656 { 657 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 658 struct ieee80211_sta *sta = control->sta; 659 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 660 struct ieee80211_hdr *hdr = (void *)skb->data; 661 bool offchannel = IEEE80211_SKB_CB(skb)->flags & 662 IEEE80211_TX_CTL_TX_OFFCHAN; 663 664 if (iwl_mvm_is_radio_killed(mvm)) { 665 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); 666 goto drop; 667 } 668 669 if (offchannel && 670 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && 671 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) 672 goto drop; 673 674 /* 675 * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs 676 * so we treat the others as broadcast 677 */ 678 if (ieee80211_is_mgmt(hdr->frame_control)) 679 sta = NULL; 680 681 /* If there is no sta, and it's not offchannel - send through AP */ 682 if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && 683 !offchannel) { 684 struct iwl_mvm_vif *mvmvif = 685 iwl_mvm_vif_from_mac80211(info->control.vif); 686 u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); 687 688 if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { 689 /* mac80211 holds rcu read lock */ 690 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); 691 if (IS_ERR_OR_NULL(sta)) 692 goto drop; 693 } 694 } 695 696 iwl_mvm_tx_skb(mvm, skb, sta); 697 return; 698 drop: 699 ieee80211_free_txskb(hw, skb); 700 } 701 702 void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 703 { 704 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 705 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 706 struct sk_buff *skb = NULL; 707 708 /* 709 * No need for threads to be pending here, they can leave the first 710 * taker all the work. 711 * 712 * mvmtxq->tx_request logic: 713 * 714 * If 0, no one is currently TXing, set to 1 to indicate current thread 715 * will now start TX and other threads should quit. 716 * 717 * If 1, another thread is currently TXing, set to 2 to indicate to 718 * that thread that there was another request. Since that request may 719 * have raced with the check whether the queue is empty, the TXing 720 * thread should check the queue's status one more time before leaving. 721 * This check is done in order to not leave any TX hanging in the queue 722 * until the next TX invocation (which may not even happen). 723 * 724 * If 2, another thread is currently TXing, and it will already double 725 * check the queue, so do nothing. 726 */ 727 if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) 728 return; 729 730 rcu_read_lock(); 731 do { 732 while (likely(!mvmtxq->stopped && 733 !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { 734 skb = ieee80211_tx_dequeue(hw, txq); 735 736 if (!skb) { 737 if (txq->sta) 738 IWL_DEBUG_TX(mvm, 739 "TXQ of sta %pM tid %d is now empty\n", 740 txq->sta->addr, 741 txq->tid); 742 break; 743 } 744 745 iwl_mvm_tx_skb(mvm, skb, txq->sta); 746 } 747 } while (atomic_dec_return(&mvmtxq->tx_request)); 748 rcu_read_unlock(); 749 } 750 751 static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, 752 struct ieee80211_txq *txq) 753 { 754 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 755 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 756 757 /* 758 * Please note that racing is handled very carefully here: 759 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is 760 * deleted afterwards. 761 * This means that if: 762 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): 763 * queue is allocated and we can TX. 764 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): 765 * a race, should defer the frame. 766 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): 767 * need to allocate the queue and defer the frame. 768 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): 769 * queue is already scheduled for allocation, no need to allocate, 770 * should defer the frame. 771 */ 772 773 /* If the queue is allocated TX and return. */ 774 if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { 775 /* 776 * Check that list is empty to avoid a race where txq_id is 777 * already updated, but the queue allocation work wasn't 778 * finished 779 */ 780 if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) 781 return; 782 783 iwl_mvm_mac_itxq_xmit(hw, txq); 784 return; 785 } 786 787 /* The list is being deleted only after the queue is fully allocated. */ 788 if (!list_empty(&mvmtxq->list)) 789 return; 790 791 list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); 792 schedule_work(&mvm->add_stream_wk); 793 } 794 795 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ 796 do { \ 797 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ 798 break; \ 799 iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ 800 } while (0) 801 802 static void 803 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 804 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, 805 enum ieee80211_ampdu_mlme_action action) 806 { 807 struct iwl_fw_dbg_trigger_tlv *trig; 808 struct iwl_fw_dbg_trigger_ba *ba_trig; 809 810 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 811 FW_DBG_TRIGGER_BA); 812 if (!trig) 813 return; 814 815 ba_trig = (void *)trig->data; 816 817 switch (action) { 818 case IEEE80211_AMPDU_TX_OPERATIONAL: { 819 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 820 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 821 822 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, 823 "TX AGG START: MAC %pM tid %d ssn %d\n", 824 sta->addr, tid, tid_data->ssn); 825 break; 826 } 827 case IEEE80211_AMPDU_TX_STOP_CONT: 828 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, 829 "TX AGG STOP: MAC %pM tid %d\n", 830 sta->addr, tid); 831 break; 832 case IEEE80211_AMPDU_RX_START: 833 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, 834 "RX AGG START: MAC %pM tid %d ssn %d\n", 835 sta->addr, tid, rx_ba_ssn); 836 break; 837 case IEEE80211_AMPDU_RX_STOP: 838 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, 839 "RX AGG STOP: MAC %pM tid %d\n", 840 sta->addr, tid); 841 break; 842 default: 843 break; 844 } 845 } 846 847 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 848 struct ieee80211_vif *vif, 849 struct ieee80211_ampdu_params *params) 850 { 851 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 852 int ret; 853 struct ieee80211_sta *sta = params->sta; 854 enum ieee80211_ampdu_mlme_action action = params->action; 855 u16 tid = params->tid; 856 u16 *ssn = ¶ms->ssn; 857 u16 buf_size = params->buf_size; 858 bool amsdu = params->amsdu; 859 u16 timeout = params->timeout; 860 861 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 862 sta->addr, tid, action); 863 864 if (!(mvm->nvm_data->sku_cap_11n_enable)) 865 return -EACCES; 866 867 mutex_lock(&mvm->mutex); 868 869 switch (action) { 870 case IEEE80211_AMPDU_RX_START: 871 if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == 872 iwl_mvm_sta_from_mac80211(sta)->sta_id) { 873 struct iwl_mvm_vif *mvmvif; 874 u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; 875 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; 876 877 mdata->opened_rx_ba_sessions = true; 878 mvmvif = iwl_mvm_vif_from_mac80211(vif); 879 cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); 880 } 881 if (!iwl_enable_rx_ampdu()) { 882 ret = -EINVAL; 883 break; 884 } 885 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, 886 timeout); 887 break; 888 case IEEE80211_AMPDU_RX_STOP: 889 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, 890 timeout); 891 break; 892 case IEEE80211_AMPDU_TX_START: 893 if (!iwl_enable_tx_ampdu()) { 894 ret = -EINVAL; 895 break; 896 } 897 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 898 break; 899 case IEEE80211_AMPDU_TX_STOP_CONT: 900 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 901 break; 902 case IEEE80211_AMPDU_TX_STOP_FLUSH: 903 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 904 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); 905 break; 906 case IEEE80211_AMPDU_TX_OPERATIONAL: 907 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, 908 buf_size, amsdu); 909 break; 910 default: 911 WARN_ON_ONCE(1); 912 ret = -EINVAL; 913 break; 914 } 915 916 if (!ret) { 917 u16 rx_ba_ssn = 0; 918 919 if (action == IEEE80211_AMPDU_RX_START) 920 rx_ba_ssn = *ssn; 921 922 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, 923 rx_ba_ssn, action); 924 } 925 mutex_unlock(&mvm->mutex); 926 927 return ret; 928 } 929 930 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, 931 struct ieee80211_vif *vif) 932 { 933 struct iwl_mvm *mvm = data; 934 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 935 936 mvmvif->uploaded = false; 937 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 938 939 spin_lock_bh(&mvm->time_event_lock); 940 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 941 spin_unlock_bh(&mvm->time_event_lock); 942 943 mvmvif->phy_ctxt = NULL; 944 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); 945 memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); 946 } 947 948 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 949 { 950 iwl_mvm_stop_device(mvm); 951 952 mvm->cur_aid = 0; 953 954 mvm->scan_status = 0; 955 mvm->ps_disabled = false; 956 mvm->rfkill_safe_init_done = false; 957 958 /* just in case one was running */ 959 iwl_mvm_cleanup_roc_te(mvm); 960 ieee80211_remain_on_channel_expired(mvm->hw); 961 962 iwl_mvm_ftm_restart(mvm); 963 964 /* 965 * cleanup all interfaces, even inactive ones, as some might have 966 * gone down during the HW restart 967 */ 968 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); 969 970 mvm->p2p_device_vif = NULL; 971 972 iwl_mvm_reset_phy_ctxts(mvm); 973 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 974 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 975 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); 976 977 ieee80211_wake_queues(mvm->hw); 978 979 mvm->rx_ba_sessions = 0; 980 mvm->fwrt.dump.conf = FW_DBG_INVALID; 981 mvm->monitor_on = false; 982 983 /* keep statistics ticking */ 984 iwl_mvm_accu_radio_stats(mvm); 985 } 986 987 int __iwl_mvm_mac_start(struct iwl_mvm *mvm) 988 { 989 int ret; 990 991 lockdep_assert_held(&mvm->mutex); 992 993 ret = iwl_mvm_mei_get_ownership(mvm); 994 if (ret) 995 return ret; 996 997 if (mvm->mei_nvm_data) { 998 /* We got the NIC, we can now free the MEI NVM data */ 999 kfree(mvm->mei_nvm_data); 1000 mvm->mei_nvm_data = NULL; 1001 1002 /* 1003 * We can't free the nvm_data we allocated based on the SAP 1004 * data because we registered to cfg80211 with the channels 1005 * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data 1006 * just in order to be able free it later. 1007 * NULLify nvm_data so that we will read the NVM from the 1008 * firmware this time. 1009 */ 1010 mvm->temp_nvm_data = mvm->nvm_data; 1011 mvm->nvm_data = NULL; 1012 } 1013 1014 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { 1015 /* 1016 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART 1017 * so later code will - from now on - see that we're doing it. 1018 */ 1019 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1020 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1021 /* Clean up some internal and mac80211 state on restart */ 1022 iwl_mvm_restart_cleanup(mvm); 1023 } 1024 ret = iwl_mvm_up(mvm); 1025 1026 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, 1027 NULL); 1028 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, 1029 NULL); 1030 1031 mvm->last_reset_or_resume_time_jiffies = jiffies; 1032 1033 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1034 /* Something went wrong - we need to finish some cleanup 1035 * that normally iwl_mvm_mac_restart_complete() below 1036 * would do. 1037 */ 1038 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1039 } 1040 1041 return ret; 1042 } 1043 1044 static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 1045 { 1046 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1047 int ret; 1048 int retry, max_retry = 0; 1049 1050 mutex_lock(&mvm->mutex); 1051 1052 /* we are starting the mac not in error flow, and restart is enabled */ 1053 if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && 1054 iwlwifi_mod_params.fw_restart) { 1055 max_retry = IWL_MAX_INIT_RETRY; 1056 /* 1057 * This will prevent mac80211 recovery flows to trigger during 1058 * init failures 1059 */ 1060 set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); 1061 } 1062 1063 for (retry = 0; retry <= max_retry; retry++) { 1064 ret = __iwl_mvm_mac_start(mvm); 1065 if (!ret) 1066 break; 1067 1068 IWL_ERR(mvm, "mac start retry %d\n", retry); 1069 } 1070 clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); 1071 1072 mutex_unlock(&mvm->mutex); 1073 1074 iwl_mvm_mei_set_sw_rfkill_state(mvm); 1075 1076 return ret; 1077 } 1078 1079 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) 1080 { 1081 int ret; 1082 1083 mutex_lock(&mvm->mutex); 1084 1085 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1086 1087 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1088 if (ret) 1089 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1090 ret); 1091 1092 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); 1093 1094 /* 1095 * If we have TDLS peers, remove them. We don't know the last seqno/PN 1096 * of packets the FW sent out, so we must reconnect. 1097 */ 1098 iwl_mvm_teardown_tdls_peers(mvm); 1099 1100 mutex_unlock(&mvm->mutex); 1101 } 1102 1103 static void 1104 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, 1105 enum ieee80211_reconfig_type reconfig_type) 1106 { 1107 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1108 1109 switch (reconfig_type) { 1110 case IEEE80211_RECONFIG_TYPE_RESTART: 1111 iwl_mvm_restart_complete(mvm); 1112 break; 1113 case IEEE80211_RECONFIG_TYPE_SUSPEND: 1114 break; 1115 } 1116 } 1117 1118 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) 1119 { 1120 lockdep_assert_held(&mvm->mutex); 1121 1122 iwl_mvm_ftm_initiator_smooth_stop(mvm); 1123 1124 /* firmware counters are obviously reset now, but we shouldn't 1125 * partially track so also clear the fw_reset_accu counters. 1126 */ 1127 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); 1128 1129 /* async_handlers_wk is now blocked */ 1130 1131 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) 1132 iwl_mvm_rm_aux_sta(mvm); 1133 1134 iwl_mvm_stop_device(mvm); 1135 1136 iwl_mvm_async_handlers_purge(mvm); 1137 /* async_handlers_list is empty and will stay empty: HW is stopped */ 1138 1139 /* 1140 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the 1141 * hw (as restart_complete() won't be called in this case) and mac80211 1142 * won't execute the restart. 1143 * But make sure to cleanup interfaces that have gone down before/during 1144 * HW restart was requested. 1145 */ 1146 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 1147 test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 1148 &mvm->status)) 1149 ieee80211_iterate_interfaces(mvm->hw, 0, 1150 iwl_mvm_cleanup_iterator, mvm); 1151 1152 /* We shouldn't have any UIDs still set. Loop over all the UIDs to 1153 * make sure there's nothing left there and warn if any is found. 1154 */ 1155 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1156 int i; 1157 1158 for (i = 0; i < mvm->max_scans; i++) { 1159 if (WARN_ONCE(mvm->scan_uid_status[i], 1160 "UMAC scan UID %d status was not cleaned\n", 1161 i)) 1162 mvm->scan_uid_status[i] = 0; 1163 } 1164 } 1165 } 1166 1167 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) 1168 { 1169 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1170 1171 flush_work(&mvm->async_handlers_wk); 1172 flush_work(&mvm->add_stream_wk); 1173 1174 /* 1175 * Lock and clear the firmware running bit here already, so that 1176 * new commands coming in elsewhere, e.g. from debugfs, will not 1177 * be able to proceed. This is important here because one of those 1178 * debugfs files causes the firmware dump to be triggered, and if we 1179 * don't stop debugfs accesses before canceling that it could be 1180 * retriggered after we flush it but before we've cleared the bit. 1181 */ 1182 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1183 1184 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); 1185 cancel_delayed_work_sync(&mvm->scan_timeout_dwork); 1186 1187 /* 1188 * The work item could be running or queued if the 1189 * ROC time event stops just as we get here. 1190 */ 1191 flush_work(&mvm->roc_done_wk); 1192 1193 iwl_mvm_mei_set_sw_rfkill_state(mvm); 1194 1195 mutex_lock(&mvm->mutex); 1196 __iwl_mvm_mac_stop(mvm); 1197 mutex_unlock(&mvm->mutex); 1198 1199 /* 1200 * The worker might have been waiting for the mutex, let it run and 1201 * discover that its list is now empty. 1202 */ 1203 cancel_work_sync(&mvm->async_handlers_wk); 1204 } 1205 1206 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) 1207 { 1208 u16 i; 1209 1210 lockdep_assert_held(&mvm->mutex); 1211 1212 for (i = 0; i < NUM_PHY_CTX; i++) 1213 if (!mvm->phy_ctxts[i].ref) 1214 return &mvm->phy_ctxts[i]; 1215 1216 IWL_ERR(mvm, "No available PHY context\n"); 1217 return NULL; 1218 } 1219 1220 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1221 s16 tx_power) 1222 { 1223 u32 cmd_id = REDUCE_TX_POWER_CMD; 1224 int len; 1225 struct iwl_dev_tx_power_cmd cmd = { 1226 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), 1227 .common.mac_context_id = 1228 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), 1229 .common.pwr_restriction = cpu_to_le16(8 * tx_power), 1230 }; 1231 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1232 IWL_FW_CMD_VER_UNKNOWN); 1233 1234 if (tx_power == IWL_DEFAULT_MAX_TX_POWER) 1235 cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); 1236 1237 if (cmd_ver == 7) 1238 len = sizeof(cmd.v7); 1239 else if (cmd_ver == 6) 1240 len = sizeof(cmd.v6); 1241 else if (fw_has_api(&mvm->fw->ucode_capa, 1242 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) 1243 len = sizeof(cmd.v5); 1244 else if (fw_has_capa(&mvm->fw->ucode_capa, 1245 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 1246 len = sizeof(cmd.v4); 1247 else 1248 len = sizeof(cmd.v3); 1249 1250 /* all structs have the same common part, add it */ 1251 len += sizeof(cmd.common); 1252 1253 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); 1254 } 1255 1256 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, 1257 struct ieee80211_vif *vif) 1258 { 1259 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1260 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1261 int ret; 1262 1263 mutex_lock(&mvm->mutex); 1264 1265 if (vif->type == NL80211_IFTYPE_STATION) { 1266 struct iwl_mvm_sta *mvmsta; 1267 1268 mvmvif->csa_bcn_pending = false; 1269 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, 1270 mvmvif->ap_sta_id); 1271 1272 if (WARN_ON(!mvmsta)) { 1273 ret = -EIO; 1274 goto out_unlock; 1275 } 1276 1277 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 1278 1279 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1280 1281 if (!fw_has_capa(&mvm->fw->ucode_capa, 1282 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 1283 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 1284 if (ret) 1285 goto out_unlock; 1286 1287 iwl_mvm_stop_session_protection(mvm, vif); 1288 } 1289 } 1290 1291 mvmvif->ps_disabled = false; 1292 1293 ret = iwl_mvm_power_update_ps(mvm); 1294 1295 out_unlock: 1296 if (mvmvif->csa_failed) 1297 ret = -EIO; 1298 mutex_unlock(&mvm->mutex); 1299 1300 return ret; 1301 } 1302 1303 static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, 1304 struct ieee80211_vif *vif) 1305 { 1306 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1307 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1308 struct iwl_chan_switch_te_cmd cmd = { 1309 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 1310 mvmvif->color)), 1311 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), 1312 }; 1313 1314 /* 1315 * In the new flow since FW is in charge of the timing, 1316 * if driver has canceled the channel switch he will receive the 1317 * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it 1318 */ 1319 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 1320 CHANNEL_SWITCH_ERROR_NOTIF, 0)) 1321 return; 1322 1323 IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); 1324 1325 mutex_lock(&mvm->mutex); 1326 if (!fw_has_capa(&mvm->fw->ucode_capa, 1327 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) 1328 iwl_mvm_remove_csa_period(mvm, vif); 1329 else 1330 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, 1331 WIDE_ID(MAC_CONF_GROUP, 1332 CHANNEL_SWITCH_TIME_EVENT_CMD), 1333 0, sizeof(cmd), &cmd)); 1334 mvmvif->csa_failed = true; 1335 mutex_unlock(&mvm->mutex); 1336 1337 iwl_mvm_post_channel_switch(hw, vif); 1338 } 1339 1340 static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) 1341 { 1342 struct iwl_mvm_vif *mvmvif; 1343 struct ieee80211_vif *vif; 1344 1345 mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); 1346 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); 1347 1348 /* Trigger disconnect (should clear the CSA state) */ 1349 ieee80211_chswitch_done(vif, false); 1350 } 1351 1352 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 1353 struct ieee80211_vif *vif) 1354 { 1355 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1356 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1357 int ret; 1358 1359 mvmvif->mvm = mvm; 1360 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1361 1362 /* 1363 * Not much to do here. The stack will not allow interface 1364 * types or combinations that we didn't advertise, so we 1365 * don't really have to check the types. 1366 */ 1367 1368 mutex_lock(&mvm->mutex); 1369 1370 /* make sure that beacon statistics don't go backwards with FW reset */ 1371 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1372 mvmvif->beacon_stats.accu_num_beacons += 1373 mvmvif->beacon_stats.num_beacons; 1374 1375 /* Allocate resources for the MAC context, and add it to the fw */ 1376 ret = iwl_mvm_mac_ctxt_init(mvm, vif); 1377 if (ret) 1378 goto out_unlock; 1379 1380 rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); 1381 1382 /* 1383 * The AP binding flow can be done only after the beacon 1384 * template is configured (which happens only in the mac80211 1385 * start_ap() flow), and adding the broadcast station can happen 1386 * only after the binding. 1387 * In addition, since modifying the MAC before adding a bcast 1388 * station is not allowed by the FW, delay the adding of MAC context to 1389 * the point where we can also add the bcast station. 1390 * In short: there's not much we can do at this point, other than 1391 * allocating resources :) 1392 */ 1393 if (vif->type == NL80211_IFTYPE_AP || 1394 vif->type == NL80211_IFTYPE_ADHOC) { 1395 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1396 if (ret) { 1397 IWL_ERR(mvm, "Failed to allocate bcast sta\n"); 1398 goto out_unlock; 1399 } 1400 1401 /* 1402 * Only queue for this station is the mcast queue, 1403 * which shouldn't be in TFD mask anyway 1404 */ 1405 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 1406 0, vif->type, 1407 IWL_STA_MULTICAST); 1408 if (ret) 1409 goto out_unlock; 1410 1411 iwl_mvm_vif_dbgfs_register(mvm, vif); 1412 goto out_unlock; 1413 } 1414 1415 mvmvif->features |= hw->netdev_features; 1416 1417 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 1418 if (ret) 1419 goto out_unlock; 1420 1421 ret = iwl_mvm_power_update_mac(mvm); 1422 if (ret) 1423 goto out_remove_mac; 1424 1425 /* beacon filtering */ 1426 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 1427 if (ret) 1428 goto out_remove_mac; 1429 1430 if (!mvm->bf_allowed_vif && 1431 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { 1432 mvm->bf_allowed_vif = mvmvif; 1433 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 1434 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 1435 } 1436 1437 /* 1438 * P2P_DEVICE interface does not have a channel context assigned to it, 1439 * so a dedicated PHY context is allocated to it and the corresponding 1440 * MAC context is bound to it at this stage. 1441 */ 1442 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1443 1444 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 1445 if (!mvmvif->phy_ctxt) { 1446 ret = -ENOSPC; 1447 goto out_free_bf; 1448 } 1449 1450 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 1451 ret = iwl_mvm_binding_add_vif(mvm, vif); 1452 if (ret) 1453 goto out_unref_phy; 1454 1455 ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); 1456 if (ret) 1457 goto out_unbind; 1458 1459 /* Save a pointer to p2p device vif, so it can later be used to 1460 * update the p2p device MAC when a GO is started/stopped */ 1461 mvm->p2p_device_vif = vif; 1462 } 1463 1464 iwl_mvm_tcm_add_vif(mvm, vif); 1465 INIT_DELAYED_WORK(&mvmvif->csa_work, 1466 iwl_mvm_channel_switch_disconnect_wk); 1467 1468 if (vif->type == NL80211_IFTYPE_MONITOR) 1469 mvm->monitor_on = true; 1470 1471 iwl_mvm_vif_dbgfs_register(mvm, vif); 1472 1473 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 1474 vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 1475 !mvm->csme_vif && mvm->mei_registered) { 1476 iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); 1477 iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); 1478 mvm->csme_vif = vif; 1479 } 1480 1481 goto out_unlock; 1482 1483 out_unbind: 1484 iwl_mvm_binding_remove_vif(mvm, vif); 1485 out_unref_phy: 1486 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1487 out_free_bf: 1488 if (mvm->bf_allowed_vif == mvmvif) { 1489 mvm->bf_allowed_vif = NULL; 1490 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1491 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1492 } 1493 out_remove_mac: 1494 mvmvif->phy_ctxt = NULL; 1495 iwl_mvm_mac_ctxt_remove(mvm, vif); 1496 out_unlock: 1497 mutex_unlock(&mvm->mutex); 1498 1499 return ret; 1500 } 1501 1502 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, 1503 struct ieee80211_vif *vif) 1504 { 1505 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1506 /* 1507 * Flush the ROC worker which will flush the OFFCHANNEL queue. 1508 * We assume here that all the packets sent to the OFFCHANNEL 1509 * queue are sent in ROC session. 1510 */ 1511 flush_work(&mvm->roc_done_wk); 1512 } 1513 } 1514 1515 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 1516 struct ieee80211_vif *vif) 1517 { 1518 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1519 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1520 struct iwl_probe_resp_data *probe_data; 1521 1522 iwl_mvm_prepare_mac_removal(mvm, vif); 1523 1524 if (!(vif->type == NL80211_IFTYPE_AP || 1525 vif->type == NL80211_IFTYPE_ADHOC)) 1526 iwl_mvm_tcm_rm_vif(mvm, vif); 1527 1528 mutex_lock(&mvm->mutex); 1529 1530 if (vif == mvm->csme_vif) { 1531 iwl_mei_set_netdev(NULL); 1532 mvm->csme_vif = NULL; 1533 } 1534 1535 probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, 1536 lockdep_is_held(&mvm->mutex)); 1537 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1538 if (probe_data) 1539 kfree_rcu(probe_data, rcu_head); 1540 1541 if (mvm->bf_allowed_vif == mvmvif) { 1542 mvm->bf_allowed_vif = NULL; 1543 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1544 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1545 } 1546 1547 if (vif->bss_conf.ftm_responder) 1548 memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); 1549 1550 iwl_mvm_vif_dbgfs_clean(mvm, vif); 1551 1552 /* 1553 * For AP/GO interface, the tear down of the resources allocated to the 1554 * interface is be handled as part of the stop_ap flow. 1555 */ 1556 if (vif->type == NL80211_IFTYPE_AP || 1557 vif->type == NL80211_IFTYPE_ADHOC) { 1558 #ifdef CONFIG_NL80211_TESTMODE 1559 if (vif == mvm->noa_vif) { 1560 mvm->noa_vif = NULL; 1561 mvm->noa_duration = 0; 1562 } 1563 #endif 1564 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); 1565 iwl_mvm_dealloc_bcast_sta(mvm, vif); 1566 goto out_release; 1567 } 1568 1569 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1570 mvm->p2p_device_vif = NULL; 1571 iwl_mvm_rm_p2p_bcast_sta(mvm, vif); 1572 iwl_mvm_binding_remove_vif(mvm, vif); 1573 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1574 mvmvif->phy_ctxt = NULL; 1575 } 1576 1577 iwl_mvm_power_update_mac(mvm); 1578 iwl_mvm_mac_ctxt_remove(mvm, vif); 1579 1580 RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); 1581 1582 if (vif->type == NL80211_IFTYPE_MONITOR) 1583 mvm->monitor_on = false; 1584 1585 out_release: 1586 mutex_unlock(&mvm->mutex); 1587 } 1588 1589 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) 1590 { 1591 return 0; 1592 } 1593 1594 struct iwl_mvm_mc_iter_data { 1595 struct iwl_mvm *mvm; 1596 int port_id; 1597 }; 1598 1599 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, 1600 struct ieee80211_vif *vif) 1601 { 1602 struct iwl_mvm_mc_iter_data *data = _data; 1603 struct iwl_mvm *mvm = data->mvm; 1604 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1605 struct iwl_host_cmd hcmd = { 1606 .id = MCAST_FILTER_CMD, 1607 .flags = CMD_ASYNC, 1608 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1609 }; 1610 int ret, len; 1611 1612 /* if we don't have free ports, mcast frames will be dropped */ 1613 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) 1614 return; 1615 1616 if (vif->type != NL80211_IFTYPE_STATION || 1617 !vif->cfg.assoc) 1618 return; 1619 1620 cmd->port_id = data->port_id++; 1621 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1622 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1623 1624 hcmd.len[0] = len; 1625 hcmd.data[0] = cmd; 1626 1627 ret = iwl_mvm_send_cmd(mvm, &hcmd); 1628 if (ret) 1629 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1630 } 1631 1632 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) 1633 { 1634 struct iwl_mvm_mc_iter_data iter_data = { 1635 .mvm = mvm, 1636 }; 1637 int ret; 1638 1639 lockdep_assert_held(&mvm->mutex); 1640 1641 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1642 return; 1643 1644 ieee80211_iterate_active_interfaces_atomic( 1645 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1646 iwl_mvm_mc_iface_iterator, &iter_data); 1647 1648 /* 1649 * Send a (synchronous) ech command so that we wait for the 1650 * multiple asynchronous MCAST_FILTER_CMD commands sent by 1651 * the interface iterator. Otherwise, we might get here over 1652 * and over again (by userspace just sending a lot of these) 1653 * and the CPU can send them faster than the firmware can 1654 * process them. 1655 * Note that the CPU is still faster - but with this we'll 1656 * actually send fewer commands overall because the CPU will 1657 * not schedule the work in mac80211 as frequently if it's 1658 * still running when rescheduled (possibly multiple times). 1659 */ 1660 ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); 1661 if (ret) 1662 IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); 1663 } 1664 1665 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, 1666 struct netdev_hw_addr_list *mc_list) 1667 { 1668 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1669 struct iwl_mcast_filter_cmd *cmd; 1670 struct netdev_hw_addr *addr; 1671 int addr_count; 1672 bool pass_all; 1673 int len; 1674 1675 addr_count = netdev_hw_addr_list_count(mc_list); 1676 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || 1677 IWL_MVM_FW_MCAST_FILTER_PASS_ALL; 1678 if (pass_all) 1679 addr_count = 0; 1680 1681 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); 1682 cmd = kzalloc(len, GFP_ATOMIC); 1683 if (!cmd) 1684 return 0; 1685 1686 if (pass_all) { 1687 cmd->pass_all = 1; 1688 return (u64)(unsigned long)cmd; 1689 } 1690 1691 netdev_hw_addr_list_for_each(addr, mc_list) { 1692 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", 1693 cmd->count, addr->addr); 1694 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], 1695 addr->addr, ETH_ALEN); 1696 cmd->count++; 1697 } 1698 1699 return (u64)(unsigned long)cmd; 1700 } 1701 1702 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, 1703 unsigned int changed_flags, 1704 unsigned int *total_flags, 1705 u64 multicast) 1706 { 1707 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1708 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; 1709 1710 mutex_lock(&mvm->mutex); 1711 1712 /* replace previous configuration */ 1713 kfree(mvm->mcast_filter_cmd); 1714 mvm->mcast_filter_cmd = cmd; 1715 1716 if (!cmd) 1717 goto out; 1718 1719 if (changed_flags & FIF_ALLMULTI) 1720 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); 1721 1722 if (cmd->pass_all) 1723 cmd->count = 0; 1724 1725 iwl_mvm_recalc_multicast(mvm); 1726 out: 1727 mutex_unlock(&mvm->mutex); 1728 *total_flags = 0; 1729 } 1730 1731 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, 1732 struct ieee80211_vif *vif, 1733 unsigned int filter_flags, 1734 unsigned int changed_flags) 1735 { 1736 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1737 1738 /* We support only filter for probe requests */ 1739 if (!(changed_flags & FIF_PROBE_REQ)) 1740 return; 1741 1742 /* Supported only for p2p client interfaces */ 1743 if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc || 1744 !vif->p2p) 1745 return; 1746 1747 mutex_lock(&mvm->mutex); 1748 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1749 mutex_unlock(&mvm->mutex); 1750 } 1751 1752 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, 1753 struct ieee80211_vif *vif) 1754 { 1755 struct iwl_mu_group_mgmt_cmd cmd = {}; 1756 1757 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, 1758 WLAN_MEMBERSHIP_LEN); 1759 memcpy(cmd.user_position, vif->bss_conf.mu_group.position, 1760 WLAN_USER_POSITION_LEN); 1761 1762 return iwl_mvm_send_cmd_pdu(mvm, 1763 WIDE_ID(DATA_PATH_GROUP, 1764 UPDATE_MU_GROUPS_CMD), 1765 0, sizeof(cmd), &cmd); 1766 } 1767 1768 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, 1769 struct ieee80211_vif *vif) 1770 { 1771 if (vif->bss_conf.mu_mimo_owner) { 1772 struct iwl_mu_group_mgmt_notif *notif = _data; 1773 1774 /* 1775 * MU-MIMO Group Id action frame is little endian. We treat 1776 * the data received from firmware as if it came from the 1777 * action frame, so no conversion is needed. 1778 */ 1779 ieee80211_update_mu_groups(vif, 0, 1780 (u8 *)¬if->membership_status, 1781 (u8 *)¬if->user_position); 1782 } 1783 } 1784 1785 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, 1786 struct iwl_rx_cmd_buffer *rxb) 1787 { 1788 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1789 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; 1790 1791 ieee80211_iterate_active_interfaces_atomic( 1792 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1793 iwl_mvm_mu_mimo_iface_iterator, notif); 1794 } 1795 1796 static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) 1797 { 1798 u8 byte_num = ppe_pos_bit / 8; 1799 u8 bit_num = ppe_pos_bit % 8; 1800 u8 residue_bits; 1801 u8 res; 1802 1803 if (bit_num <= 5) 1804 return (ppe[byte_num] >> bit_num) & 1805 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); 1806 1807 /* 1808 * If bit_num > 5, we have to combine bits with next byte. 1809 * Calculate how many bits we need to take from current byte (called 1810 * here "residue_bits"), and add them to bits from next byte. 1811 */ 1812 1813 residue_bits = 8 - bit_num; 1814 1815 res = (ppe[byte_num + 1] & 1816 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << 1817 residue_bits; 1818 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); 1819 1820 return res; 1821 } 1822 1823 static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, 1824 struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, 1825 u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) 1826 { 1827 int i; 1828 1829 /* 1830 * FW currently supports only nss == MAX_HE_SUPP_NSS 1831 * 1832 * If nss > MAX: we can ignore values we don't support 1833 * If nss < MAX: we can set zeros in other streams 1834 */ 1835 if (nss > MAX_HE_SUPP_NSS) { 1836 IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, 1837 MAX_HE_SUPP_NSS); 1838 nss = MAX_HE_SUPP_NSS; 1839 } 1840 1841 for (i = 0; i < nss; i++) { 1842 u8 ru_index_tmp = ru_index_bitmap << 1; 1843 u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; 1844 u8 bw; 1845 1846 for (bw = 0; 1847 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); 1848 bw++) { 1849 ru_index_tmp >>= 1; 1850 1851 if (!(ru_index_tmp & 1)) 1852 continue; 1853 1854 high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); 1855 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1856 low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); 1857 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1858 1859 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; 1860 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; 1861 } 1862 } 1863 } 1864 1865 static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, 1866 struct ieee80211_sta *sta, 1867 struct iwl_he_pkt_ext_v2 *pkt_ext) 1868 { 1869 u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; 1870 u8 *ppe = &sta->deflink.he_cap.ppe_thres[0]; 1871 u8 ru_index_bitmap = 1872 u8_get_bits(*ppe, 1873 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); 1874 /* Starting after PPE header */ 1875 u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; 1876 1877 iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); 1878 } 1879 1880 static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, 1881 u8 nominal_padding, 1882 u32 *flags) 1883 { 1884 int low_th = -1; 1885 int high_th = -1; 1886 int i; 1887 1888 switch (nominal_padding) { 1889 case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: 1890 low_th = IWL_HE_PKT_EXT_NONE; 1891 high_th = IWL_HE_PKT_EXT_NONE; 1892 break; 1893 case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: 1894 low_th = IWL_HE_PKT_EXT_BPSK; 1895 high_th = IWL_HE_PKT_EXT_NONE; 1896 break; 1897 case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: 1898 low_th = IWL_HE_PKT_EXT_NONE; 1899 high_th = IWL_HE_PKT_EXT_BPSK; 1900 break; 1901 } 1902 1903 /* Set the PPE thresholds accordingly */ 1904 if (low_th >= 0 && high_th >= 0) { 1905 for (i = 0; i < MAX_HE_SUPP_NSS; i++) { 1906 u8 bw; 1907 1908 for (bw = 0; 1909 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); 1910 bw++) { 1911 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; 1912 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; 1913 } 1914 } 1915 1916 *flags |= STA_CTXT_HE_PACKET_EXT; 1917 } 1918 } 1919 1920 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, 1921 struct ieee80211_vif *vif, u8 sta_id) 1922 { 1923 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1924 struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = { 1925 .sta_id = sta_id, 1926 .tid_limit = IWL_MAX_TID_COUNT, 1927 .bss_color = vif->bss_conf.he_bss_color.color, 1928 .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, 1929 .frame_time_rts_th = 1930 cpu_to_le16(vif->bss_conf.frame_time_rts_th), 1931 }; 1932 struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {}; 1933 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD); 1934 u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2); 1935 int size; 1936 struct ieee80211_sta *sta; 1937 u32 flags; 1938 int i; 1939 const struct ieee80211_sta_he_cap *own_he_cap = NULL; 1940 struct ieee80211_chanctx_conf *chanctx_conf; 1941 const struct ieee80211_supported_band *sband; 1942 void *cmd; 1943 1944 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) 1945 ver = 1; 1946 1947 switch (ver) { 1948 case 1: 1949 /* same layout as v2 except some data at the end */ 1950 cmd = &sta_ctxt_cmd_v2; 1951 size = sizeof(struct iwl_he_sta_context_cmd_v1); 1952 break; 1953 case 2: 1954 cmd = &sta_ctxt_cmd_v2; 1955 size = sizeof(struct iwl_he_sta_context_cmd_v2); 1956 break; 1957 case 3: 1958 cmd = &sta_ctxt_cmd; 1959 size = sizeof(struct iwl_he_sta_context_cmd_v3); 1960 break; 1961 default: 1962 IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver); 1963 return; 1964 } 1965 1966 rcu_read_lock(); 1967 1968 chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); 1969 if (WARN_ON(!chanctx_conf)) { 1970 rcu_read_unlock(); 1971 return; 1972 } 1973 1974 sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; 1975 own_he_cap = ieee80211_get_he_iftype_cap(sband, 1976 ieee80211_vif_type_p2p(vif)); 1977 1978 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); 1979 if (IS_ERR_OR_NULL(sta)) { 1980 rcu_read_unlock(); 1981 WARN(1, "Can't find STA to configure HE\n"); 1982 return; 1983 } 1984 1985 if (!sta->deflink.he_cap.has_he) { 1986 rcu_read_unlock(); 1987 return; 1988 } 1989 1990 flags = 0; 1991 1992 /* Block 26-tone RU OFDMA transmissions */ 1993 if (mvmvif->he_ru_2mhz_block) 1994 flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; 1995 1996 /* HTC flags */ 1997 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] & 1998 IEEE80211_HE_MAC_CAP0_HTC_HE) 1999 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); 2000 if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & 2001 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || 2002 (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2003 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { 2004 u8 link_adap = 2005 ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2006 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + 2007 (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & 2008 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); 2009 2010 if (link_adap == 2) 2011 sta_ctxt_cmd.htc_flags |= 2012 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); 2013 else if (link_adap == 3) 2014 sta_ctxt_cmd.htc_flags |= 2015 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); 2016 } 2017 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) 2018 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); 2019 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] & 2020 IEEE80211_HE_MAC_CAP3_OMI_CONTROL) 2021 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); 2022 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) 2023 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); 2024 2025 /* 2026 * Initialize the PPE thresholds to "None" (7), as described in Table 2027 * 9-262ac of 80211.ax/D3.0. 2028 */ 2029 memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, 2030 sizeof(sta_ctxt_cmd.pkt_ext)); 2031 2032 /* If PPE Thresholds exist, parse them into a FW-familiar format. */ 2033 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] & 2034 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { 2035 iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, 2036 &sta_ctxt_cmd.pkt_ext); 2037 flags |= STA_CTXT_HE_PACKET_EXT; 2038 /* PPE Thresholds doesn't exist - set the API PPE values 2039 * according to Common Nominal Packet Padding fiels. */ 2040 } else { 2041 u8 nominal_padding = 2042 u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9], 2043 IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); 2044 if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) 2045 iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, 2046 nominal_padding, 2047 &flags); 2048 } 2049 2050 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2051 IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) 2052 flags |= STA_CTXT_HE_32BIT_BA_BITMAP; 2053 2054 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2055 IEEE80211_HE_MAC_CAP2_ACK_EN) 2056 flags |= STA_CTXT_HE_ACK_ENABLED; 2057 2058 rcu_read_unlock(); 2059 2060 /* Mark MU EDCA as enabled, unless none detected on some AC */ 2061 flags |= STA_CTXT_HE_MU_EDCA_CW; 2062 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 2063 struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = 2064 &mvmvif->queue_params[i].mu_edca_param_rec; 2065 u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); 2066 2067 if (!mvmvif->queue_params[i].mu_edca) { 2068 flags &= ~STA_CTXT_HE_MU_EDCA_CW; 2069 break; 2070 } 2071 2072 sta_ctxt_cmd.trig_based_txf[ac].cwmin = 2073 cpu_to_le16(mu_edca->ecw_min_max & 0xf); 2074 sta_ctxt_cmd.trig_based_txf[ac].cwmax = 2075 cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); 2076 sta_ctxt_cmd.trig_based_txf[ac].aifsn = 2077 cpu_to_le16(mu_edca->aifsn); 2078 sta_ctxt_cmd.trig_based_txf[ac].mu_time = 2079 cpu_to_le16(mu_edca->mu_edca_timer); 2080 } 2081 2082 2083 if (vif->bss_conf.uora_exists) { 2084 flags |= STA_CTXT_HE_TRIG_RND_ALLOC; 2085 2086 sta_ctxt_cmd.rand_alloc_ecwmin = 2087 vif->bss_conf.uora_ocw_range & 0x7; 2088 sta_ctxt_cmd.rand_alloc_ecwmax = 2089 (vif->bss_conf.uora_ocw_range >> 3) & 0x7; 2090 } 2091 2092 if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] & 2093 IEEE80211_HE_MAC_CAP2_ACK_EN)) 2094 flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; 2095 2096 if (vif->bss_conf.nontransmitted) { 2097 flags |= STA_CTXT_HE_REF_BSSID_VALID; 2098 ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, 2099 vif->bss_conf.transmitter_bssid); 2100 sta_ctxt_cmd.max_bssid_indicator = 2101 vif->bss_conf.bssid_indicator; 2102 sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; 2103 sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; 2104 sta_ctxt_cmd.profile_periodicity = 2105 vif->bss_conf.profile_periodicity; 2106 } 2107 2108 sta_ctxt_cmd.flags = cpu_to_le32(flags); 2109 2110 if (ver < 3) { 2111 /* fields before pkt_ext */ 2112 BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) != 2113 offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext)); 2114 memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd, 2115 offsetof(typeof(sta_ctxt_cmd), pkt_ext)); 2116 2117 /* pkt_ext */ 2118 for (i = 0; 2119 i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th); 2120 i++) { 2121 u8 bw; 2122 2123 for (bw = 0; 2124 bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]); 2125 bw++) { 2126 BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) != 2127 sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw])); 2128 2129 memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw], 2130 &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw], 2131 sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw])); 2132 } 2133 } 2134 2135 /* fields after pkt_ext */ 2136 BUILD_BUG_ON(sizeof(sta_ctxt_cmd) - 2137 offsetofend(typeof(sta_ctxt_cmd), pkt_ext) != 2138 sizeof(sta_ctxt_cmd_v2) - 2139 offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext)); 2140 memcpy((u8 *)&sta_ctxt_cmd_v2 + 2141 offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext), 2142 (u8 *)&sta_ctxt_cmd + 2143 offsetofend(typeof(sta_ctxt_cmd), pkt_ext), 2144 sizeof(sta_ctxt_cmd) - 2145 offsetofend(typeof(sta_ctxt_cmd), pkt_ext)); 2146 sta_ctxt_cmd_v2.reserved3 = 0; 2147 } 2148 2149 if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd)) 2150 IWL_ERR(mvm, "Failed to config FW to work HE!\n"); 2151 } 2152 2153 static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, 2154 struct ieee80211_vif *vif, 2155 u32 duration_override) 2156 { 2157 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 2158 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; 2159 2160 if (duration_override > duration) 2161 duration = duration_override; 2162 2163 /* Try really hard to protect the session and hear a beacon 2164 * The new session protection command allows us to protect the 2165 * session for a much longer time since the firmware will internally 2166 * create two events: a 300TU one with a very high priority that 2167 * won't be fragmented which should be enough for 99% of the cases, 2168 * and another one (which we configure here to be 900TU long) which 2169 * will have a slightly lower priority, but more importantly, can be 2170 * fragmented so that it'll allow other activities to run. 2171 */ 2172 if (fw_has_capa(&mvm->fw->ucode_capa, 2173 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 2174 iwl_mvm_schedule_session_protection(mvm, vif, 900, 2175 min_duration, false); 2176 else 2177 iwl_mvm_protect_session(mvm, vif, duration, 2178 min_duration, 500, false); 2179 } 2180 2181 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 2182 struct ieee80211_vif *vif, 2183 struct ieee80211_bss_conf *bss_conf, 2184 u64 changes) 2185 { 2186 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2187 int ret; 2188 2189 /* 2190 * Re-calculate the tsf id, as the leader-follower relations depend 2191 * on the beacon interval, which was not known when the station 2192 * interface was added. 2193 */ 2194 if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) { 2195 if (vif->bss_conf.he_support && 2196 !iwlwifi_mod_params.disable_11ax) 2197 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2198 2199 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2200 } 2201 2202 /* Update MU EDCA params */ 2203 if (changes & BSS_CHANGED_QOS && mvmvif->associated && 2204 vif->cfg.assoc && vif->bss_conf.he_support && 2205 !iwlwifi_mod_params.disable_11ax) 2206 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2207 2208 /* 2209 * If we're not associated yet, take the (new) BSSID before associating 2210 * so the firmware knows. If we're already associated, then use the old 2211 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC 2212 * branch for disassociation below. 2213 */ 2214 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) 2215 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2216 2217 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); 2218 if (ret) 2219 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2220 2221 /* after sending it once, adopt mac80211 data */ 2222 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2223 mvmvif->associated = vif->cfg.assoc; 2224 2225 if (changes & BSS_CHANGED_ASSOC) { 2226 if (vif->cfg.assoc) { 2227 /* clear statistics to get clean beacon counter */ 2228 iwl_mvm_request_statistics(mvm, true); 2229 memset(&mvmvif->beacon_stats, 0, 2230 sizeof(mvmvif->beacon_stats)); 2231 2232 /* add quota for this interface */ 2233 ret = iwl_mvm_update_quotas(mvm, true, NULL); 2234 if (ret) { 2235 IWL_ERR(mvm, "failed to update quotas\n"); 2236 return; 2237 } 2238 2239 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2240 &mvm->status) && 2241 !fw_has_capa(&mvm->fw->ucode_capa, 2242 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 2243 /* 2244 * If we're restarting then the firmware will 2245 * obviously have lost synchronisation with 2246 * the AP. It will attempt to synchronise by 2247 * itself, but we can make it more reliable by 2248 * scheduling a session protection time event. 2249 * 2250 * The firmware needs to receive a beacon to 2251 * catch up with synchronisation, use 110% of 2252 * the beacon interval. 2253 * 2254 * Set a large maximum delay to allow for more 2255 * than a single interface. 2256 * 2257 * For new firmware versions, rely on the 2258 * firmware. This is relevant for DCM scenarios 2259 * only anyway. 2260 */ 2261 u32 dur = (11 * vif->bss_conf.beacon_int) / 10; 2262 iwl_mvm_protect_session(mvm, vif, dur, dur, 2263 5 * dur, false); 2264 } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2265 &mvm->status) && 2266 !vif->bss_conf.dtim_period) { 2267 /* 2268 * If we're not restarting and still haven't 2269 * heard a beacon (dtim period unknown) then 2270 * make sure we still have enough minimum time 2271 * remaining in the time event, since the auth 2272 * might actually have taken quite a while 2273 * (especially for SAE) and so the remaining 2274 * time could be small without us having heard 2275 * a beacon yet. 2276 */ 2277 iwl_mvm_protect_assoc(mvm, vif, 0); 2278 } 2279 2280 iwl_mvm_sf_update(mvm, vif, false); 2281 iwl_mvm_power_vif_assoc(mvm, vif); 2282 if (vif->p2p) { 2283 iwl_mvm_update_smps(mvm, vif, 2284 IWL_MVM_SMPS_REQ_PROT, 2285 IEEE80211_SMPS_DYNAMIC); 2286 } 2287 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 2288 iwl_mvm_mei_host_disassociated(mvm); 2289 /* 2290 * If update fails - SF might be running in associated 2291 * mode while disassociated - which is forbidden. 2292 */ 2293 ret = iwl_mvm_sf_update(mvm, vif, false); 2294 WARN_ONCE(ret && 2295 !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 2296 &mvm->status), 2297 "Failed to update SF upon disassociation\n"); 2298 2299 /* 2300 * If we get an assert during the connection (after the 2301 * station has been added, but before the vif is set 2302 * to associated), mac80211 will re-add the station and 2303 * then configure the vif. Since the vif is not 2304 * associated, we would remove the station here and 2305 * this would fail the recovery. 2306 */ 2307 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2308 &mvm->status)) { 2309 /* 2310 * Remove AP station now that 2311 * the MAC is unassoc 2312 */ 2313 ret = iwl_mvm_rm_sta_id(mvm, vif, 2314 mvmvif->ap_sta_id); 2315 if (ret) 2316 IWL_ERR(mvm, 2317 "failed to remove AP station\n"); 2318 2319 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 2320 } 2321 2322 /* remove quota for this interface */ 2323 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2324 if (ret) 2325 IWL_ERR(mvm, "failed to update quotas\n"); 2326 2327 /* this will take the cleared BSSID from bss_conf */ 2328 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2329 if (ret) 2330 IWL_ERR(mvm, 2331 "failed to update MAC %pM (clear after unassoc)\n", 2332 vif->addr); 2333 } 2334 2335 /* 2336 * The firmware tracks the MU-MIMO group on its own. 2337 * However, on HW restart we should restore this data. 2338 */ 2339 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2340 (changes & BSS_CHANGED_MU_GROUPS) && vif->bss_conf.mu_mimo_owner) { 2341 ret = iwl_mvm_update_mu_groups(mvm, vif); 2342 if (ret) 2343 IWL_ERR(mvm, 2344 "failed to update VHT MU_MIMO groups\n"); 2345 } 2346 2347 iwl_mvm_recalc_multicast(mvm); 2348 2349 /* reset rssi values */ 2350 mvmvif->bf_data.ave_beacon_signal = 0; 2351 2352 iwl_mvm_bt_coex_vif_change(mvm); 2353 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, 2354 IEEE80211_SMPS_AUTOMATIC); 2355 if (fw_has_capa(&mvm->fw->ucode_capa, 2356 IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 2357 iwl_mvm_config_scan(mvm); 2358 } 2359 2360 if (changes & BSS_CHANGED_BEACON_INFO) { 2361 /* 2362 * We received a beacon from the associated AP so 2363 * remove the session protection. 2364 */ 2365 iwl_mvm_stop_session_protection(mvm, vif); 2366 2367 iwl_mvm_sf_update(mvm, vif, false); 2368 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2369 } 2370 2371 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | 2372 /* 2373 * Send power command on every beacon change, 2374 * because we may have not enabled beacon abort yet. 2375 */ 2376 BSS_CHANGED_BEACON_INFO)) { 2377 ret = iwl_mvm_power_update_mac(mvm); 2378 if (ret) 2379 IWL_ERR(mvm, "failed to update power mode\n"); 2380 } 2381 2382 if (changes & BSS_CHANGED_CQM) { 2383 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); 2384 /* reset cqm events tracking */ 2385 mvmvif->bf_data.last_cqm_event = 0; 2386 if (mvmvif->bf_data.bf_enabled) { 2387 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 2388 if (ret) 2389 IWL_ERR(mvm, 2390 "failed to update CQM thresholds\n"); 2391 } 2392 } 2393 2394 if (changes & BSS_CHANGED_BANDWIDTH) 2395 iwl_mvm_apply_fw_smps_request(vif); 2396 } 2397 2398 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, 2399 struct ieee80211_vif *vif, 2400 struct ieee80211_bss_conf *link_conf) 2401 { 2402 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2403 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2404 int ret, i; 2405 2406 mutex_lock(&mvm->mutex); 2407 2408 /* Send the beacon template */ 2409 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); 2410 if (ret) 2411 goto out_unlock; 2412 2413 /* 2414 * Re-calculate the tsf id, as the leader-follower relations depend on 2415 * the beacon interval, which was not known when the AP interface 2416 * was added. 2417 */ 2418 if (vif->type == NL80211_IFTYPE_AP) 2419 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2420 2421 mvmvif->ap_assoc_sta_count = 0; 2422 2423 /* Add the mac context */ 2424 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 2425 if (ret) 2426 goto out_unlock; 2427 2428 /* Perform the binding */ 2429 ret = iwl_mvm_binding_add_vif(mvm, vif); 2430 if (ret) 2431 goto out_remove; 2432 2433 /* 2434 * This is not very nice, but the simplest: 2435 * For older FWs adding the mcast sta before the bcast station may 2436 * cause assert 0x2b00. 2437 * This is fixed in later FW so make the order of removal depend on 2438 * the TLV 2439 */ 2440 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2441 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2442 if (ret) 2443 goto out_unbind; 2444 /* 2445 * Send the bcast station. At this stage the TBTT and DTIM time 2446 * events are added and applied to the scheduler 2447 */ 2448 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2449 if (ret) { 2450 iwl_mvm_rm_mcast_sta(mvm, vif); 2451 goto out_unbind; 2452 } 2453 } else { 2454 /* 2455 * Send the bcast station. At this stage the TBTT and DTIM time 2456 * events are added and applied to the scheduler 2457 */ 2458 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2459 if (ret) 2460 goto out_unbind; 2461 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2462 if (ret) { 2463 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2464 goto out_unbind; 2465 } 2466 } 2467 2468 /* must be set before quota calculations */ 2469 mvmvif->ap_ibss_active = true; 2470 2471 /* send all the early keys to the device now */ 2472 for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { 2473 struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; 2474 2475 if (!key) 2476 continue; 2477 2478 mvmvif->ap_early_keys[i] = NULL; 2479 2480 ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); 2481 if (ret) 2482 goto out_quota_failed; 2483 } 2484 2485 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2486 iwl_mvm_vif_set_low_latency(mvmvif, true, 2487 LOW_LATENCY_VIF_TYPE); 2488 iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); 2489 } 2490 2491 /* power updated needs to be done before quotas */ 2492 iwl_mvm_power_update_mac(mvm); 2493 2494 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2495 if (ret) 2496 goto out_quota_failed; 2497 2498 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2499 if (vif->p2p && mvm->p2p_device_vif) 2500 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2501 2502 iwl_mvm_bt_coex_vif_change(mvm); 2503 2504 /* we don't support TDLS during DCM */ 2505 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2506 iwl_mvm_teardown_tdls_peers(mvm); 2507 2508 iwl_mvm_ftm_restart_responder(mvm, vif); 2509 2510 goto out_unlock; 2511 2512 out_quota_failed: 2513 iwl_mvm_power_update_mac(mvm); 2514 mvmvif->ap_ibss_active = false; 2515 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2516 iwl_mvm_rm_mcast_sta(mvm, vif); 2517 out_unbind: 2518 iwl_mvm_binding_remove_vif(mvm, vif); 2519 out_remove: 2520 iwl_mvm_mac_ctxt_remove(mvm, vif); 2521 out_unlock: 2522 mutex_unlock(&mvm->mutex); 2523 return ret; 2524 } 2525 2526 static int iwl_mvm_start_ap(struct ieee80211_hw *hw, 2527 struct ieee80211_vif *vif, 2528 struct ieee80211_bss_conf *link_conf) 2529 { 2530 return iwl_mvm_start_ap_ibss(hw, vif, link_conf); 2531 } 2532 2533 static int iwl_mvm_start_ibss(struct ieee80211_hw *hw, 2534 struct ieee80211_vif *vif) 2535 { 2536 return iwl_mvm_start_ap_ibss(hw, vif, &vif->bss_conf); 2537 } 2538 2539 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, 2540 struct ieee80211_vif *vif, 2541 struct ieee80211_bss_conf *link_conf) 2542 { 2543 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2544 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2545 2546 iwl_mvm_prepare_mac_removal(mvm, vif); 2547 2548 mutex_lock(&mvm->mutex); 2549 2550 /* Handle AP stop while in CSA */ 2551 if (rcu_access_pointer(mvm->csa_vif) == vif) { 2552 iwl_mvm_remove_time_event(mvm, mvmvif, 2553 &mvmvif->time_event_data); 2554 RCU_INIT_POINTER(mvm->csa_vif, NULL); 2555 mvmvif->csa_countdown = false; 2556 } 2557 2558 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { 2559 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); 2560 mvm->csa_tx_block_bcn_timeout = 0; 2561 } 2562 2563 mvmvif->ap_ibss_active = false; 2564 mvm->ap_last_beacon_gp2 = 0; 2565 2566 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2567 iwl_mvm_vif_set_low_latency(mvmvif, false, 2568 LOW_LATENCY_VIF_TYPE); 2569 iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); 2570 } 2571 2572 iwl_mvm_bt_coex_vif_change(mvm); 2573 2574 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2575 if (vif->p2p && mvm->p2p_device_vif) 2576 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2577 2578 iwl_mvm_update_quotas(mvm, false, NULL); 2579 2580 iwl_mvm_ftm_responder_clear(mvm, vif); 2581 2582 /* 2583 * This is not very nice, but the simplest: 2584 * For older FWs removing the mcast sta before the bcast station may 2585 * cause assert 0x2b00. 2586 * This is fixed in later FW (which will stop beaconing when removing 2587 * bcast station). 2588 * So make the order of removal depend on the TLV 2589 */ 2590 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2591 iwl_mvm_rm_mcast_sta(mvm, vif); 2592 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2593 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2594 iwl_mvm_rm_mcast_sta(mvm, vif); 2595 iwl_mvm_binding_remove_vif(mvm, vif); 2596 2597 iwl_mvm_power_update_mac(mvm); 2598 2599 iwl_mvm_mac_ctxt_remove(mvm, vif); 2600 2601 mutex_unlock(&mvm->mutex); 2602 } 2603 2604 static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, 2605 struct ieee80211_vif *vif, 2606 struct ieee80211_bss_conf *link_conf) 2607 { 2608 iwl_mvm_stop_ap_ibss(hw, vif, link_conf); 2609 } 2610 2611 static void iwl_mvm_stop_ibss(struct ieee80211_hw *hw, 2612 struct ieee80211_vif *vif) 2613 { 2614 iwl_mvm_stop_ap_ibss(hw, vif, &vif->bss_conf); 2615 } 2616 2617 static void 2618 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, 2619 struct ieee80211_vif *vif, 2620 struct ieee80211_bss_conf *bss_conf, 2621 u64 changes) 2622 { 2623 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2624 2625 /* Changes will be applied when the AP/IBSS is started */ 2626 if (!mvmvif->ap_ibss_active) 2627 return; 2628 2629 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 2630 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && 2631 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) 2632 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2633 2634 /* Need to send a new beacon template to the FW */ 2635 if (changes & BSS_CHANGED_BEACON && 2636 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 2637 IWL_WARN(mvm, "Failed updating beacon data\n"); 2638 2639 if (changes & BSS_CHANGED_FTM_RESPONDER) { 2640 int ret = iwl_mvm_ftm_start_responder(mvm, vif); 2641 2642 if (ret) 2643 IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", 2644 ret); 2645 } 2646 2647 } 2648 2649 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 2650 struct ieee80211_vif *vif, 2651 struct ieee80211_bss_conf *bss_conf, 2652 u64 changes) 2653 { 2654 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2655 2656 mutex_lock(&mvm->mutex); 2657 2658 if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) 2659 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 2660 2661 switch (vif->type) { 2662 case NL80211_IFTYPE_STATION: 2663 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 2664 break; 2665 case NL80211_IFTYPE_AP: 2666 case NL80211_IFTYPE_ADHOC: 2667 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); 2668 break; 2669 case NL80211_IFTYPE_MONITOR: 2670 if (changes & BSS_CHANGED_MU_GROUPS) 2671 iwl_mvm_update_mu_groups(mvm, vif); 2672 break; 2673 default: 2674 /* shouldn't happen */ 2675 WARN_ON_ONCE(1); 2676 } 2677 2678 if (changes & BSS_CHANGED_TXPOWER) { 2679 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", 2680 bss_conf->txpower); 2681 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2682 } 2683 2684 mutex_unlock(&mvm->mutex); 2685 } 2686 2687 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, 2688 struct ieee80211_vif *vif, 2689 struct ieee80211_scan_request *hw_req) 2690 { 2691 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2692 int ret; 2693 2694 if (hw_req->req.n_channels == 0 || 2695 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) 2696 return -EINVAL; 2697 2698 mutex_lock(&mvm->mutex); 2699 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); 2700 mutex_unlock(&mvm->mutex); 2701 2702 return ret; 2703 } 2704 2705 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, 2706 struct ieee80211_vif *vif) 2707 { 2708 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2709 2710 mutex_lock(&mvm->mutex); 2711 2712 /* Due to a race condition, it's possible that mac80211 asks 2713 * us to stop a hw_scan when it's already stopped. This can 2714 * happen, for instance, if we stopped the scan ourselves, 2715 * called ieee80211_scan_completed() and the userspace called 2716 * cancel scan scan before ieee80211_scan_work() could run. 2717 * To handle that, simply return if the scan is not running. 2718 */ 2719 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) 2720 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 2721 2722 mutex_unlock(&mvm->mutex); 2723 } 2724 2725 static void 2726 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, 2727 struct ieee80211_sta *sta, u16 tids, 2728 int num_frames, 2729 enum ieee80211_frame_release_type reason, 2730 bool more_data) 2731 { 2732 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2733 2734 /* Called when we need to transmit (a) frame(s) from mac80211 */ 2735 2736 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2737 tids, more_data, false); 2738 } 2739 2740 static void 2741 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, 2742 struct ieee80211_sta *sta, u16 tids, 2743 int num_frames, 2744 enum ieee80211_frame_release_type reason, 2745 bool more_data) 2746 { 2747 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2748 2749 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ 2750 2751 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2752 tids, more_data, true); 2753 } 2754 2755 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2756 enum sta_notify_cmd cmd, 2757 struct ieee80211_sta *sta) 2758 { 2759 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2760 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2761 unsigned long txqs = 0, tids = 0; 2762 int tid; 2763 2764 /* 2765 * If we have TVQM then we get too high queue numbers - luckily 2766 * we really shouldn't get here with that because such hardware 2767 * should have firmware supporting buffer station offload. 2768 */ 2769 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 2770 return; 2771 2772 spin_lock_bh(&mvmsta->lock); 2773 for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { 2774 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2775 2776 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) 2777 continue; 2778 2779 __set_bit(tid_data->txq_id, &txqs); 2780 2781 if (iwl_mvm_tid_queued(mvm, tid_data) == 0) 2782 continue; 2783 2784 __set_bit(tid, &tids); 2785 } 2786 2787 switch (cmd) { 2788 case STA_NOTIFY_SLEEP: 2789 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) 2790 ieee80211_sta_set_buffered(sta, tid, true); 2791 2792 if (txqs) 2793 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); 2794 /* 2795 * The fw updates the STA to be asleep. Tx packets on the Tx 2796 * queues to this station will not be transmitted. The fw will 2797 * send a Tx response with TX_STATUS_FAIL_DEST_PS. 2798 */ 2799 break; 2800 case STA_NOTIFY_AWAKE: 2801 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 2802 break; 2803 2804 if (txqs) 2805 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); 2806 iwl_mvm_sta_modify_ps_wake(mvm, sta); 2807 break; 2808 default: 2809 break; 2810 } 2811 spin_unlock_bh(&mvmsta->lock); 2812 } 2813 2814 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2815 struct ieee80211_vif *vif, 2816 enum sta_notify_cmd cmd, 2817 struct ieee80211_sta *sta) 2818 { 2819 __iwl_mvm_mac_sta_notify(hw, cmd, sta); 2820 } 2821 2822 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 2823 { 2824 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2825 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; 2826 struct ieee80211_sta *sta; 2827 struct iwl_mvm_sta *mvmsta; 2828 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); 2829 2830 if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations)) 2831 return; 2832 2833 rcu_read_lock(); 2834 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); 2835 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2836 rcu_read_unlock(); 2837 return; 2838 } 2839 2840 mvmsta = iwl_mvm_sta_from_mac80211(sta); 2841 2842 if (!mvmsta->vif || 2843 mvmsta->vif->type != NL80211_IFTYPE_AP) { 2844 rcu_read_unlock(); 2845 return; 2846 } 2847 2848 if (mvmsta->sleeping != sleeping) { 2849 mvmsta->sleeping = sleeping; 2850 __iwl_mvm_mac_sta_notify(mvm->hw, 2851 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, 2852 sta); 2853 ieee80211_sta_ps_transition(sta, sleeping); 2854 } 2855 2856 if (sleeping) { 2857 switch (notif->type) { 2858 case IWL_MVM_PM_EVENT_AWAKE: 2859 case IWL_MVM_PM_EVENT_ASLEEP: 2860 break; 2861 case IWL_MVM_PM_EVENT_UAPSD: 2862 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); 2863 break; 2864 case IWL_MVM_PM_EVENT_PS_POLL: 2865 ieee80211_sta_pspoll(sta); 2866 break; 2867 default: 2868 break; 2869 } 2870 } 2871 2872 rcu_read_unlock(); 2873 } 2874 2875 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, 2876 struct ieee80211_vif *vif, 2877 struct ieee80211_sta *sta) 2878 { 2879 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2880 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2881 2882 /* 2883 * This is called before mac80211 does RCU synchronisation, 2884 * so here we already invalidate our internal RCU-protected 2885 * station pointer. The rest of the code will thus no longer 2886 * be able to find the station this way, and we don't rely 2887 * on further RCU synchronisation after the sta_state() 2888 * callback deleted the station. 2889 */ 2890 mutex_lock(&mvm->mutex); 2891 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) 2892 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 2893 ERR_PTR(-ENOENT)); 2894 2895 mutex_unlock(&mvm->mutex); 2896 } 2897 2898 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2899 const u8 *bssid) 2900 { 2901 int i; 2902 2903 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 2904 struct iwl_mvm_tcm_mac *mdata; 2905 2906 mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; 2907 ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); 2908 mdata->opened_rx_ba_sessions = false; 2909 } 2910 2911 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) 2912 return; 2913 2914 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { 2915 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2916 return; 2917 } 2918 2919 if (!vif->p2p && 2920 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { 2921 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2922 return; 2923 } 2924 2925 for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { 2926 if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { 2927 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2928 return; 2929 } 2930 } 2931 2932 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 2933 } 2934 2935 static void 2936 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, 2937 struct ieee80211_vif *vif, u8 *peer_addr, 2938 enum nl80211_tdls_operation action) 2939 { 2940 struct iwl_fw_dbg_trigger_tlv *trig; 2941 struct iwl_fw_dbg_trigger_tdls *tdls_trig; 2942 2943 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 2944 FW_DBG_TRIGGER_TDLS); 2945 if (!trig) 2946 return; 2947 2948 tdls_trig = (void *)trig->data; 2949 2950 if (!(tdls_trig->action_bitmap & BIT(action))) 2951 return; 2952 2953 if (tdls_trig->peer_mode && 2954 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) 2955 return; 2956 2957 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 2958 "TDLS event occurred, peer %pM, action %d", 2959 peer_addr, action); 2960 } 2961 2962 struct iwl_mvm_he_obss_narrow_bw_ru_data { 2963 bool tolerated; 2964 }; 2965 2966 static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, 2967 struct cfg80211_bss *bss, 2968 void *_data) 2969 { 2970 struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; 2971 const struct cfg80211_bss_ies *ies; 2972 const struct element *elem; 2973 2974 rcu_read_lock(); 2975 ies = rcu_dereference(bss->ies); 2976 elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, 2977 ies->len); 2978 2979 if (!elem || elem->datalen < 10 || 2980 !(elem->data[10] & 2981 WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { 2982 data->tolerated = false; 2983 } 2984 rcu_read_unlock(); 2985 } 2986 2987 static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, 2988 struct ieee80211_vif *vif) 2989 { 2990 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2991 struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { 2992 .tolerated = true, 2993 }; 2994 2995 if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { 2996 mvmvif->he_ru_2mhz_block = false; 2997 return; 2998 } 2999 3000 cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, 3001 iwl_mvm_check_he_obss_narrow_bw_ru_iter, 3002 &iter_data); 3003 3004 /* 3005 * If there is at least one AP on radar channel that cannot 3006 * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. 3007 */ 3008 mvmvif->he_ru_2mhz_block = !iter_data.tolerated; 3009 } 3010 3011 static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, 3012 struct ieee80211_vif *vif) 3013 { 3014 struct ieee80211_supported_band *sband; 3015 const struct ieee80211_sta_he_cap *he_cap; 3016 3017 if (vif->type != NL80211_IFTYPE_STATION) 3018 return; 3019 3020 if (!mvm->cca_40mhz_workaround) 3021 return; 3022 3023 /* decrement and check that we reached zero */ 3024 mvm->cca_40mhz_workaround--; 3025 if (mvm->cca_40mhz_workaround) 3026 return; 3027 3028 sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; 3029 3030 sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 3031 3032 he_cap = ieee80211_get_he_iftype_cap(sband, 3033 ieee80211_vif_type_p2p(vif)); 3034 3035 if (he_cap) { 3036 /* we know that ours is writable */ 3037 struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; 3038 3039 he->he_cap_elem.phy_cap_info[0] |= 3040 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; 3041 } 3042 } 3043 3044 static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, 3045 struct ieee80211_vif *vif, 3046 struct iwl_mvm_sta *mvm_sta) 3047 { 3048 #if IS_ENABLED(CONFIG_IWLMEI) 3049 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3050 struct iwl_mei_conn_info conn_info = { 3051 .ssid_len = vif->cfg.ssid_len, 3052 .channel = vif->bss_conf.chandef.chan->hw_value, 3053 }; 3054 3055 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3056 return; 3057 3058 if (!mvm->mei_registered) 3059 return; 3060 3061 switch (mvm_sta->pairwise_cipher) { 3062 case WLAN_CIPHER_SUITE_CCMP: 3063 conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; 3064 break; 3065 case WLAN_CIPHER_SUITE_GCMP: 3066 conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; 3067 break; 3068 case WLAN_CIPHER_SUITE_GCMP_256: 3069 conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; 3070 break; 3071 case 0: 3072 /* open profile */ 3073 break; 3074 default: 3075 /* cipher not supported, don't send anything to iwlmei */ 3076 return; 3077 } 3078 3079 switch (mvmvif->rekey_data.akm) { 3080 case WLAN_AKM_SUITE_SAE & 0xff: 3081 conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; 3082 break; 3083 case WLAN_AKM_SUITE_PSK & 0xff: 3084 conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; 3085 break; 3086 case WLAN_AKM_SUITE_8021X & 0xff: 3087 conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; 3088 break; 3089 case 0: 3090 /* open profile */ 3091 conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; 3092 break; 3093 default: 3094 /* auth method / AKM not supported */ 3095 /* TODO: All the FT vesions of these? */ 3096 return; 3097 } 3098 3099 memcpy(conn_info.ssid, vif->cfg.ssid, vif->cfg.ssid_len); 3100 memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); 3101 3102 /* TODO: add support for collocated AP data */ 3103 iwl_mei_host_associated(&conn_info, NULL); 3104 #endif 3105 } 3106 3107 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 3108 struct ieee80211_vif *vif, 3109 struct ieee80211_sta *sta, 3110 enum ieee80211_sta_state old_state, 3111 enum ieee80211_sta_state new_state) 3112 { 3113 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3114 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3115 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3116 int ret; 3117 3118 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", 3119 sta->addr, old_state, new_state); 3120 3121 /* this would be a mac80211 bug ... but don't crash */ 3122 if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) 3123 return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; 3124 3125 /* 3126 * If we are in a STA removal flow and in DQA mode: 3127 * 3128 * This is after the sync_rcu part, so the queues have already been 3129 * flushed. No more TXs on their way in mac80211's path, and no more in 3130 * the queues. 3131 * Also, we won't be getting any new TX frames for this station. 3132 * What we might have are deferred TX frames that need to be taken care 3133 * of. 3134 * 3135 * Drop any still-queued deferred-frame before removing the STA, and 3136 * make sure the worker is no longer handling frames for this STA. 3137 */ 3138 if (old_state == IEEE80211_STA_NONE && 3139 new_state == IEEE80211_STA_NOTEXIST) { 3140 flush_work(&mvm->add_stream_wk); 3141 3142 /* 3143 * No need to make sure deferred TX indication is off since the 3144 * worker will already remove it if it was on 3145 */ 3146 3147 /* 3148 * Additionally, reset the 40 MHz capability if we disconnected 3149 * from the AP now. 3150 */ 3151 iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); 3152 } 3153 3154 mutex_lock(&mvm->mutex); 3155 /* track whether or not the station is associated */ 3156 mvm_sta->sta_state = new_state; 3157 3158 if (old_state == IEEE80211_STA_NOTEXIST && 3159 new_state == IEEE80211_STA_NONE) { 3160 /* 3161 * Firmware bug - it'll crash if the beacon interval is less 3162 * than 16. We can't avoid connecting at all, so refuse the 3163 * station state change, this will cause mac80211 to abandon 3164 * attempts to connect to this AP, and eventually wpa_s will 3165 * blocklist the AP... 3166 */ 3167 if (vif->type == NL80211_IFTYPE_STATION && 3168 vif->bss_conf.beacon_int < 16) { 3169 IWL_ERR(mvm, 3170 "AP %pM beacon interval is %d, refusing due to firmware bug!\n", 3171 sta->addr, vif->bss_conf.beacon_int); 3172 ret = -EINVAL; 3173 goto out_unlock; 3174 } 3175 3176 if (vif->type == NL80211_IFTYPE_STATION) 3177 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3178 3179 if (sta->tdls && 3180 (vif->p2p || 3181 iwl_mvm_tdls_sta_count(mvm, NULL) == 3182 IWL_MVM_TDLS_STA_COUNT || 3183 iwl_mvm_phy_ctx_count(mvm) > 1)) { 3184 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); 3185 ret = -EBUSY; 3186 goto out_unlock; 3187 } 3188 3189 ret = iwl_mvm_add_sta(mvm, vif, sta); 3190 if (sta->tdls && ret == 0) { 3191 iwl_mvm_recalc_tdls_state(mvm, vif, true); 3192 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3193 NL80211_TDLS_SETUP); 3194 } 3195 3196 sta->max_rc_amsdu_len = 1; 3197 } else if (old_state == IEEE80211_STA_NONE && 3198 new_state == IEEE80211_STA_AUTH) { 3199 /* 3200 * EBS may be disabled due to previous failures reported by FW. 3201 * Reset EBS status here assuming environment has been changed. 3202 */ 3203 mvm->last_ebs_successful = true; 3204 iwl_mvm_check_uapsd(mvm, vif, sta->addr); 3205 ret = 0; 3206 } else if (old_state == IEEE80211_STA_AUTH && 3207 new_state == IEEE80211_STA_ASSOC) { 3208 if (vif->type == NL80211_IFTYPE_AP) { 3209 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3210 mvmvif->ap_assoc_sta_count++; 3211 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3212 if (vif->bss_conf.he_support && 3213 !iwlwifi_mod_params.disable_11ax) 3214 iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); 3215 } else if (vif->type == NL80211_IFTYPE_STATION) { 3216 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3217 3218 mvmvif->he_ru_2mhz_block = false; 3219 if (sta->deflink.he_cap.has_he) 3220 iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); 3221 3222 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3223 } 3224 3225 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3226 false); 3227 ret = iwl_mvm_update_sta(mvm, vif, sta); 3228 } else if (old_state == IEEE80211_STA_ASSOC && 3229 new_state == IEEE80211_STA_AUTHORIZED) { 3230 ret = 0; 3231 3232 /* we don't support TDLS during DCM */ 3233 if (iwl_mvm_phy_ctx_count(mvm) > 1) 3234 iwl_mvm_teardown_tdls_peers(mvm); 3235 3236 if (sta->tdls) { 3237 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3238 NL80211_TDLS_ENABLE_LINK); 3239 } else { 3240 /* enable beacon filtering */ 3241 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3242 3243 mvmvif->authorized = 1; 3244 3245 /* 3246 * Now that the station is authorized, i.e., keys were already 3247 * installed, need to indicate to the FW that 3248 * multicast data frames can be forwarded to the driver 3249 */ 3250 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3251 iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); 3252 } 3253 3254 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3255 true); 3256 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3257 new_state == IEEE80211_STA_ASSOC) { 3258 /* once we move into assoc state, need to update rate scale to 3259 * disable using wide bandwidth 3260 */ 3261 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3262 false); 3263 if (!sta->tdls) { 3264 /* Multicast data frames are no longer allowed */ 3265 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3266 3267 /* 3268 * Set this after the above iwl_mvm_mac_ctxt_changed() 3269 * to avoid sending high prio again for a little time. 3270 */ 3271 mvmvif->authorized = 0; 3272 3273 /* disable beacon filtering */ 3274 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3275 WARN_ON(ret && 3276 !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3277 &mvm->status)); 3278 } 3279 ret = 0; 3280 } else if (old_state == IEEE80211_STA_ASSOC && 3281 new_state == IEEE80211_STA_AUTH) { 3282 if (vif->type == NL80211_IFTYPE_AP) { 3283 mvmvif->ap_assoc_sta_count--; 3284 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3285 } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 3286 iwl_mvm_stop_session_protection(mvm, vif); 3287 ret = 0; 3288 } else if (old_state == IEEE80211_STA_AUTH && 3289 new_state == IEEE80211_STA_NONE) { 3290 ret = 0; 3291 } else if (old_state == IEEE80211_STA_NONE && 3292 new_state == IEEE80211_STA_NOTEXIST) { 3293 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 3294 iwl_mvm_stop_session_protection(mvm, vif); 3295 ret = iwl_mvm_rm_sta(mvm, vif, sta); 3296 if (sta->tdls) { 3297 iwl_mvm_recalc_tdls_state(mvm, vif, false); 3298 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3299 NL80211_TDLS_DISABLE_LINK); 3300 } 3301 3302 if (unlikely(ret && 3303 test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3304 &mvm->status))) 3305 ret = 0; 3306 } else { 3307 ret = -EIO; 3308 } 3309 out_unlock: 3310 mutex_unlock(&mvm->mutex); 3311 3312 if (sta->tdls && ret == 0) { 3313 if (old_state == IEEE80211_STA_NOTEXIST && 3314 new_state == IEEE80211_STA_NONE) 3315 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3316 else if (old_state == IEEE80211_STA_NONE && 3317 new_state == IEEE80211_STA_NOTEXIST) 3318 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3319 } 3320 3321 return ret; 3322 } 3323 3324 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3325 { 3326 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3327 3328 mvm->rts_threshold = value; 3329 3330 return 0; 3331 } 3332 3333 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, 3334 struct ieee80211_vif *vif, 3335 struct ieee80211_sta *sta, u32 changed) 3336 { 3337 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3338 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3339 3340 if (changed & (IEEE80211_RC_BW_CHANGED | 3341 IEEE80211_RC_SUPP_RATES_CHANGED | 3342 IEEE80211_RC_NSS_CHANGED)) 3343 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3344 true); 3345 3346 if (vif->type == NL80211_IFTYPE_STATION && 3347 changed & IEEE80211_RC_NSS_CHANGED) 3348 iwl_mvm_sf_update(mvm, vif, false); 3349 } 3350 3351 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, 3352 struct ieee80211_vif *vif, 3353 unsigned int link_id, u16 ac, 3354 const struct ieee80211_tx_queue_params *params) 3355 { 3356 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3357 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3358 3359 mvmvif->queue_params[ac] = *params; 3360 3361 /* 3362 * No need to update right away, we'll get BSS_CHANGED_QOS 3363 * The exception is P2P_DEVICE interface which needs immediate update. 3364 */ 3365 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 3366 int ret; 3367 3368 mutex_lock(&mvm->mutex); 3369 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3370 mutex_unlock(&mvm->mutex); 3371 return ret; 3372 } 3373 return 0; 3374 } 3375 3376 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, 3377 struct ieee80211_vif *vif, 3378 struct ieee80211_prep_tx_info *info) 3379 { 3380 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3381 3382 mutex_lock(&mvm->mutex); 3383 iwl_mvm_protect_assoc(mvm, vif, info->duration); 3384 mutex_unlock(&mvm->mutex); 3385 } 3386 3387 static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, 3388 struct ieee80211_vif *vif, 3389 struct ieee80211_prep_tx_info *info) 3390 { 3391 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3392 3393 /* for successful cases (auth/assoc), don't cancel session protection */ 3394 if (info->success) 3395 return; 3396 3397 mutex_lock(&mvm->mutex); 3398 iwl_mvm_stop_session_protection(mvm, vif); 3399 mutex_unlock(&mvm->mutex); 3400 } 3401 3402 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, 3403 struct ieee80211_vif *vif, 3404 struct cfg80211_sched_scan_request *req, 3405 struct ieee80211_scan_ies *ies) 3406 { 3407 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3408 3409 int ret; 3410 3411 mutex_lock(&mvm->mutex); 3412 3413 if (!vif->cfg.idle) { 3414 ret = -EBUSY; 3415 goto out; 3416 } 3417 3418 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); 3419 3420 out: 3421 mutex_unlock(&mvm->mutex); 3422 return ret; 3423 } 3424 3425 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, 3426 struct ieee80211_vif *vif) 3427 { 3428 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3429 int ret; 3430 3431 mutex_lock(&mvm->mutex); 3432 3433 /* Due to a race condition, it's possible that mac80211 asks 3434 * us to stop a sched_scan when it's already stopped. This 3435 * can happen, for instance, if we stopped the scan ourselves, 3436 * called ieee80211_sched_scan_stopped() and the userspace called 3437 * stop sched scan scan before ieee80211_sched_scan_stopped_work() 3438 * could run. To handle this, simply return if the scan is 3439 * not running. 3440 */ 3441 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { 3442 mutex_unlock(&mvm->mutex); 3443 return 0; 3444 } 3445 3446 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); 3447 mutex_unlock(&mvm->mutex); 3448 iwl_mvm_wait_for_async_handlers(mvm); 3449 3450 return ret; 3451 } 3452 3453 static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3454 enum set_key_cmd cmd, 3455 struct ieee80211_vif *vif, 3456 struct ieee80211_sta *sta, 3457 struct ieee80211_key_conf *key) 3458 { 3459 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3460 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3461 struct iwl_mvm_sta *mvmsta = NULL; 3462 struct iwl_mvm_key_pn *ptk_pn; 3463 int keyidx = key->keyidx; 3464 int ret, i; 3465 u8 key_offset; 3466 3467 if (sta) 3468 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3469 3470 switch (key->cipher) { 3471 case WLAN_CIPHER_SUITE_TKIP: 3472 if (!mvm->trans->trans_cfg->gen2) { 3473 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 3474 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3475 } else if (vif->type == NL80211_IFTYPE_STATION) { 3476 key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; 3477 } else { 3478 IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); 3479 return -EOPNOTSUPP; 3480 } 3481 break; 3482 case WLAN_CIPHER_SUITE_CCMP: 3483 case WLAN_CIPHER_SUITE_GCMP: 3484 case WLAN_CIPHER_SUITE_GCMP_256: 3485 if (!iwl_mvm_has_new_tx_api(mvm)) 3486 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3487 break; 3488 case WLAN_CIPHER_SUITE_AES_CMAC: 3489 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3490 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3491 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); 3492 break; 3493 case WLAN_CIPHER_SUITE_WEP40: 3494 case WLAN_CIPHER_SUITE_WEP104: 3495 if (vif->type == NL80211_IFTYPE_STATION) 3496 break; 3497 if (iwl_mvm_has_new_tx_api(mvm)) 3498 return -EOPNOTSUPP; 3499 /* support HW crypto on TX */ 3500 return 0; 3501 default: 3502 return -EOPNOTSUPP; 3503 } 3504 3505 switch (cmd) { 3506 case SET_KEY: 3507 if (keyidx == 6 || keyidx == 7) 3508 rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6], 3509 key); 3510 3511 if ((vif->type == NL80211_IFTYPE_ADHOC || 3512 vif->type == NL80211_IFTYPE_AP) && !sta) { 3513 /* 3514 * GTK on AP interface is a TX-only key, return 0; 3515 * on IBSS they're per-station and because we're lazy 3516 * we don't support them for RX, so do the same. 3517 * CMAC/GMAC in AP/IBSS modes must be done in software. 3518 */ 3519 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3520 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3521 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3522 ret = -EOPNOTSUPP; 3523 break; 3524 } 3525 3526 if (key->cipher != WLAN_CIPHER_SUITE_GCMP && 3527 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && 3528 !iwl_mvm_has_new_tx_api(mvm)) { 3529 key->hw_key_idx = STA_KEY_IDX_INVALID; 3530 ret = 0; 3531 break; 3532 } 3533 3534 if (!mvmvif->ap_ibss_active) { 3535 for (i = 0; 3536 i < ARRAY_SIZE(mvmvif->ap_early_keys); 3537 i++) { 3538 if (!mvmvif->ap_early_keys[i]) { 3539 mvmvif->ap_early_keys[i] = key; 3540 break; 3541 } 3542 } 3543 3544 if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) 3545 ret = -ENOSPC; 3546 else 3547 ret = 0; 3548 3549 break; 3550 } 3551 } 3552 3553 /* During FW restart, in order to restore the state as it was, 3554 * don't try to reprogram keys we previously failed for. 3555 */ 3556 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3557 key->hw_key_idx == STA_KEY_IDX_INVALID) { 3558 IWL_DEBUG_MAC80211(mvm, 3559 "skip invalid idx key programming during restart\n"); 3560 ret = 0; 3561 break; 3562 } 3563 3564 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3565 mvmsta && iwl_mvm_has_new_rx_api(mvm) && 3566 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3567 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3568 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3569 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3570 struct ieee80211_key_seq seq; 3571 int tid, q; 3572 3573 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 3574 ptk_pn = kzalloc(struct_size(ptk_pn, q, 3575 mvm->trans->num_rx_queues), 3576 GFP_KERNEL); 3577 if (!ptk_pn) { 3578 ret = -ENOMEM; 3579 break; 3580 } 3581 3582 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 3583 ieee80211_get_key_rx_seq(key, tid, &seq); 3584 for (q = 0; q < mvm->trans->num_rx_queues; q++) 3585 memcpy(ptk_pn->q[q].pn[tid], 3586 seq.ccmp.pn, 3587 IEEE80211_CCMP_PN_LEN); 3588 } 3589 3590 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); 3591 } 3592 3593 /* in HW restart reuse the index, otherwise request a new one */ 3594 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3595 key_offset = key->hw_key_idx; 3596 else 3597 key_offset = STA_KEY_IDX_INVALID; 3598 3599 if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 3600 mvmsta->pairwise_cipher = key->cipher; 3601 3602 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3603 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); 3604 if (ret) { 3605 IWL_WARN(mvm, "set key failed\n"); 3606 key->hw_key_idx = STA_KEY_IDX_INVALID; 3607 /* 3608 * can't add key for RX, but we don't need it 3609 * in the device for TX so still return 0, 3610 * unless we have new TX API where we cannot 3611 * put key material into the TX_CMD 3612 */ 3613 if (iwl_mvm_has_new_tx_api(mvm)) 3614 ret = -EOPNOTSUPP; 3615 else 3616 ret = 0; 3617 } 3618 3619 break; 3620 case DISABLE_KEY: 3621 if (keyidx == 6 || keyidx == 7) 3622 RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6], 3623 NULL); 3624 3625 ret = -ENOENT; 3626 for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { 3627 if (mvmvif->ap_early_keys[i] == key) { 3628 mvmvif->ap_early_keys[i] = NULL; 3629 ret = 0; 3630 } 3631 } 3632 3633 /* found in pending list - don't do anything else */ 3634 if (ret == 0) 3635 break; 3636 3637 if (key->hw_key_idx == STA_KEY_IDX_INVALID) { 3638 ret = 0; 3639 break; 3640 } 3641 3642 if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && 3643 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3644 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3645 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3646 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3647 ptk_pn = rcu_dereference_protected( 3648 mvmsta->ptk_pn[keyidx], 3649 lockdep_is_held(&mvm->mutex)); 3650 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); 3651 if (ptk_pn) 3652 kfree_rcu(ptk_pn, rcu_head); 3653 } 3654 3655 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); 3656 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); 3657 break; 3658 default: 3659 ret = -EINVAL; 3660 } 3661 3662 return ret; 3663 } 3664 3665 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3666 enum set_key_cmd cmd, 3667 struct ieee80211_vif *vif, 3668 struct ieee80211_sta *sta, 3669 struct ieee80211_key_conf *key) 3670 { 3671 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3672 int ret; 3673 3674 mutex_lock(&mvm->mutex); 3675 ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); 3676 mutex_unlock(&mvm->mutex); 3677 3678 return ret; 3679 } 3680 3681 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, 3682 struct ieee80211_vif *vif, 3683 struct ieee80211_key_conf *keyconf, 3684 struct ieee80211_sta *sta, 3685 u32 iv32, u16 *phase1key) 3686 { 3687 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3688 3689 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 3690 return; 3691 3692 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); 3693 } 3694 3695 3696 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, 3697 struct iwl_rx_packet *pkt, void *data) 3698 { 3699 struct iwl_mvm *mvm = 3700 container_of(notif_wait, struct iwl_mvm, notif_wait); 3701 struct iwl_hs20_roc_res *resp; 3702 int resp_len = iwl_rx_packet_payload_len(pkt); 3703 struct iwl_mvm_time_event_data *te_data = data; 3704 3705 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) 3706 return true; 3707 3708 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 3709 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); 3710 return true; 3711 } 3712 3713 resp = (void *)pkt->data; 3714 3715 IWL_DEBUG_TE(mvm, 3716 "Aux ROC: Received response from ucode: status=%d uid=%d\n", 3717 resp->status, resp->event_unique_id); 3718 3719 te_data->uid = le32_to_cpu(resp->event_unique_id); 3720 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 3721 te_data->uid); 3722 3723 spin_lock_bh(&mvm->time_event_lock); 3724 list_add_tail(&te_data->list, &mvm->aux_roc_te_list); 3725 spin_unlock_bh(&mvm->time_event_lock); 3726 3727 return true; 3728 } 3729 3730 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) 3731 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) 3732 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) 3733 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) 3734 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) 3735 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, 3736 struct ieee80211_channel *channel, 3737 struct ieee80211_vif *vif, 3738 int duration) 3739 { 3740 int res; 3741 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3742 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; 3743 static const u16 time_event_response[] = { HOT_SPOT_CMD }; 3744 struct iwl_notification_wait wait_time_event; 3745 u32 dtim_interval = vif->bss_conf.dtim_period * 3746 vif->bss_conf.beacon_int; 3747 u32 req_dur, delay; 3748 struct iwl_hs20_roc_req aux_roc_req = { 3749 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 3750 .id_and_color = 3751 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), 3752 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), 3753 }; 3754 struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, 3755 &aux_roc_req.channel_info); 3756 u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); 3757 3758 /* Set the channel info data */ 3759 iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, 3760 iwl_mvm_phy_band_from_nl80211(channel->band), 3761 PHY_VHT_CHANNEL_MODE20, 3762 0); 3763 3764 /* Set the time and duration */ 3765 tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); 3766 3767 delay = AUX_ROC_MIN_DELAY; 3768 req_dur = MSEC_TO_TU(duration); 3769 3770 /* 3771 * If we are associated we want the delay time to be at least one 3772 * dtim interval so that the FW can wait until after the DTIM and 3773 * then start the time event, this will potentially allow us to 3774 * remain off-channel for the max duration. 3775 * Since we want to use almost a whole dtim interval we would also 3776 * like the delay to be for 2-3 dtim intervals, in case there are 3777 * other time events with higher priority. 3778 */ 3779 if (vif->cfg.assoc) { 3780 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); 3781 /* We cannot remain off-channel longer than the DTIM interval */ 3782 if (dtim_interval <= req_dur) { 3783 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; 3784 if (req_dur <= AUX_ROC_MIN_DURATION) 3785 req_dur = dtim_interval - 3786 AUX_ROC_MIN_SAFETY_BUFFER; 3787 } 3788 } 3789 3790 tail->duration = cpu_to_le32(req_dur); 3791 tail->apply_time_max_delay = cpu_to_le32(delay); 3792 3793 IWL_DEBUG_TE(mvm, 3794 "ROC: Requesting to remain on channel %u for %ums\n", 3795 channel->hw_value, req_dur); 3796 IWL_DEBUG_TE(mvm, 3797 "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", 3798 duration, delay, dtim_interval); 3799 3800 /* Set the node address */ 3801 memcpy(tail->node_addr, vif->addr, ETH_ALEN); 3802 3803 lockdep_assert_held(&mvm->mutex); 3804 3805 spin_lock_bh(&mvm->time_event_lock); 3806 3807 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { 3808 spin_unlock_bh(&mvm->time_event_lock); 3809 return -EIO; 3810 } 3811 3812 te_data->vif = vif; 3813 te_data->duration = duration; 3814 te_data->id = HOT_SPOT_CMD; 3815 3816 spin_unlock_bh(&mvm->time_event_lock); 3817 3818 /* 3819 * Use a notification wait, which really just processes the 3820 * command response and doesn't wait for anything, in order 3821 * to be able to process the response and get the UID inside 3822 * the RX path. Using CMD_WANT_SKB doesn't work because it 3823 * stores the buffer and then wakes up this thread, by which 3824 * time another notification (that the time event started) 3825 * might already be processed unsuccessfully. 3826 */ 3827 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 3828 time_event_response, 3829 ARRAY_SIZE(time_event_response), 3830 iwl_mvm_rx_aux_roc, te_data); 3831 3832 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, 3833 &aux_roc_req); 3834 3835 if (res) { 3836 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); 3837 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 3838 goto out_clear_te; 3839 } 3840 3841 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 3842 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 3843 /* should never fail */ 3844 WARN_ON_ONCE(res); 3845 3846 if (res) { 3847 out_clear_te: 3848 spin_lock_bh(&mvm->time_event_lock); 3849 iwl_mvm_te_clear_data(mvm, te_data); 3850 spin_unlock_bh(&mvm->time_event_lock); 3851 } 3852 3853 return res; 3854 } 3855 3856 static int iwl_mvm_roc(struct ieee80211_hw *hw, 3857 struct ieee80211_vif *vif, 3858 struct ieee80211_channel *channel, 3859 int duration, 3860 enum ieee80211_roc_type type) 3861 { 3862 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3863 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3864 struct cfg80211_chan_def chandef; 3865 struct iwl_mvm_phy_ctxt *phy_ctxt; 3866 bool band_change_removal; 3867 int ret, i; 3868 3869 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 3870 duration, type); 3871 3872 /* 3873 * Flush the done work, just in case it's still pending, so that 3874 * the work it does can complete and we can accept new frames. 3875 */ 3876 flush_work(&mvm->roc_done_wk); 3877 3878 mutex_lock(&mvm->mutex); 3879 3880 switch (vif->type) { 3881 case NL80211_IFTYPE_STATION: 3882 if (fw_has_capa(&mvm->fw->ucode_capa, 3883 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { 3884 /* Use aux roc framework (HS20) */ 3885 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { 3886 u32 lmac_id; 3887 3888 lmac_id = iwl_mvm_get_lmac_id(mvm->fw, 3889 channel->band); 3890 ret = iwl_mvm_add_aux_sta(mvm, lmac_id); 3891 if (WARN(ret, 3892 "Failed to allocate aux station")) 3893 goto out_unlock; 3894 } 3895 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 3896 vif, duration); 3897 goto out_unlock; 3898 } 3899 IWL_ERR(mvm, "hotspot not supported\n"); 3900 ret = -EINVAL; 3901 goto out_unlock; 3902 case NL80211_IFTYPE_P2P_DEVICE: 3903 /* handle below */ 3904 break; 3905 default: 3906 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 3907 ret = -EINVAL; 3908 goto out_unlock; 3909 } 3910 3911 for (i = 0; i < NUM_PHY_CTX; i++) { 3912 phy_ctxt = &mvm->phy_ctxts[i]; 3913 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 3914 continue; 3915 3916 if (phy_ctxt->ref && channel == phy_ctxt->channel) { 3917 /* 3918 * Unbind the P2P_DEVICE from the current PHY context, 3919 * and if the PHY context is not used remove it. 3920 */ 3921 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3922 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3923 goto out_unlock; 3924 3925 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3926 3927 /* Bind the P2P_DEVICE to the current PHY Context */ 3928 mvmvif->phy_ctxt = phy_ctxt; 3929 3930 ret = iwl_mvm_binding_add_vif(mvm, vif); 3931 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3932 goto out_unlock; 3933 3934 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3935 goto schedule_time_event; 3936 } 3937 } 3938 3939 /* Need to update the PHY context only if the ROC channel changed */ 3940 if (channel == mvmvif->phy_ctxt->channel) 3941 goto schedule_time_event; 3942 3943 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); 3944 3945 /* 3946 * Check if the remain-on-channel is on a different band and that 3947 * requires context removal, see iwl_mvm_phy_ctxt_changed(). If 3948 * so, we'll need to release and then re-configure here, since we 3949 * must not remove a PHY context that's part of a binding. 3950 */ 3951 band_change_removal = 3952 fw_has_capa(&mvm->fw->ucode_capa, 3953 IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && 3954 mvmvif->phy_ctxt->channel->band != chandef.chan->band; 3955 3956 if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) { 3957 /* 3958 * Change the PHY context configuration as it is currently 3959 * referenced only by the P2P Device MAC (and we can modify it) 3960 */ 3961 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, 3962 &chandef, 1, 1); 3963 if (ret) 3964 goto out_unlock; 3965 } else { 3966 /* 3967 * The PHY context is shared with other MACs (or we're trying to 3968 * switch bands), so remove the P2P Device from the binding, 3969 * allocate an new PHY context and create a new binding. 3970 */ 3971 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3972 if (!phy_ctxt) { 3973 ret = -ENOSPC; 3974 goto out_unlock; 3975 } 3976 3977 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 3978 1, 1); 3979 if (ret) { 3980 IWL_ERR(mvm, "Failed to change PHY context\n"); 3981 goto out_unlock; 3982 } 3983 3984 /* Unbind the P2P_DEVICE from the current PHY context */ 3985 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3986 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3987 goto out_unlock; 3988 3989 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3990 3991 /* Bind the P2P_DEVICE to the new allocated PHY context */ 3992 mvmvif->phy_ctxt = phy_ctxt; 3993 3994 ret = iwl_mvm_binding_add_vif(mvm, vif); 3995 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3996 goto out_unlock; 3997 3998 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3999 } 4000 4001 schedule_time_event: 4002 /* Schedule the time events */ 4003 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); 4004 4005 out_unlock: 4006 mutex_unlock(&mvm->mutex); 4007 IWL_DEBUG_MAC80211(mvm, "leave\n"); 4008 return ret; 4009 } 4010 4011 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, 4012 struct ieee80211_vif *vif) 4013 { 4014 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4015 4016 IWL_DEBUG_MAC80211(mvm, "enter\n"); 4017 4018 mutex_lock(&mvm->mutex); 4019 iwl_mvm_stop_roc(mvm, vif); 4020 mutex_unlock(&mvm->mutex); 4021 4022 IWL_DEBUG_MAC80211(mvm, "leave\n"); 4023 return 0; 4024 } 4025 4026 struct iwl_mvm_ftm_responder_iter_data { 4027 bool responder; 4028 struct ieee80211_chanctx_conf *ctx; 4029 }; 4030 4031 static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, 4032 struct ieee80211_vif *vif) 4033 { 4034 struct iwl_mvm_ftm_responder_iter_data *data = _data; 4035 4036 if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx && 4037 vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) 4038 data->responder = true; 4039 } 4040 4041 static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, 4042 struct ieee80211_chanctx_conf *ctx) 4043 { 4044 struct iwl_mvm_ftm_responder_iter_data data = { 4045 .responder = false, 4046 .ctx = ctx, 4047 }; 4048 4049 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 4050 IEEE80211_IFACE_ITER_NORMAL, 4051 iwl_mvm_ftm_responder_chanctx_iter, 4052 &data); 4053 return data.responder; 4054 } 4055 4056 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, 4057 struct ieee80211_chanctx_conf *ctx) 4058 { 4059 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4060 struct iwl_mvm_phy_ctxt *phy_ctxt; 4061 bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); 4062 struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; 4063 int ret; 4064 4065 lockdep_assert_held(&mvm->mutex); 4066 4067 IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); 4068 4069 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 4070 if (!phy_ctxt) { 4071 ret = -ENOSPC; 4072 goto out; 4073 } 4074 4075 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, 4076 ctx->rx_chains_static, 4077 ctx->rx_chains_dynamic); 4078 if (ret) { 4079 IWL_ERR(mvm, "Failed to add PHY context\n"); 4080 goto out; 4081 } 4082 4083 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); 4084 *phy_ctxt_id = phy_ctxt->id; 4085 out: 4086 return ret; 4087 } 4088 4089 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, 4090 struct ieee80211_chanctx_conf *ctx) 4091 { 4092 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4093 int ret; 4094 4095 mutex_lock(&mvm->mutex); 4096 ret = __iwl_mvm_add_chanctx(mvm, ctx); 4097 mutex_unlock(&mvm->mutex); 4098 4099 return ret; 4100 } 4101 4102 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, 4103 struct ieee80211_chanctx_conf *ctx) 4104 { 4105 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4106 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4107 4108 lockdep_assert_held(&mvm->mutex); 4109 4110 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); 4111 } 4112 4113 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, 4114 struct ieee80211_chanctx_conf *ctx) 4115 { 4116 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4117 4118 mutex_lock(&mvm->mutex); 4119 __iwl_mvm_remove_chanctx(mvm, ctx); 4120 mutex_unlock(&mvm->mutex); 4121 } 4122 4123 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, 4124 struct ieee80211_chanctx_conf *ctx, 4125 u32 changed) 4126 { 4127 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4128 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4129 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4130 bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); 4131 struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; 4132 4133 if (WARN_ONCE((phy_ctxt->ref > 1) && 4134 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | 4135 IEEE80211_CHANCTX_CHANGE_RX_CHAINS | 4136 IEEE80211_CHANCTX_CHANGE_RADAR | 4137 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), 4138 "Cannot change PHY. Ref=%d, changed=0x%X\n", 4139 phy_ctxt->ref, changed)) 4140 return; 4141 4142 mutex_lock(&mvm->mutex); 4143 4144 /* we are only changing the min_width, may be a noop */ 4145 if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { 4146 if (phy_ctxt->width == def->width) 4147 goto out_unlock; 4148 4149 /* we are just toggling between 20_NOHT and 20 */ 4150 if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && 4151 def->width <= NL80211_CHAN_WIDTH_20) 4152 goto out_unlock; 4153 } 4154 4155 iwl_mvm_bt_coex_vif_change(mvm); 4156 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, 4157 ctx->rx_chains_static, 4158 ctx->rx_chains_dynamic); 4159 4160 out_unlock: 4161 mutex_unlock(&mvm->mutex); 4162 } 4163 4164 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, 4165 struct ieee80211_vif *vif, 4166 struct ieee80211_chanctx_conf *ctx, 4167 bool switching_chanctx) 4168 { 4169 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4170 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4171 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4172 int ret; 4173 4174 lockdep_assert_held(&mvm->mutex); 4175 4176 mvmvif->phy_ctxt = phy_ctxt; 4177 4178 switch (vif->type) { 4179 case NL80211_IFTYPE_AP: 4180 /* only needed if we're switching chanctx (i.e. during CSA) */ 4181 if (switching_chanctx) { 4182 mvmvif->ap_ibss_active = true; 4183 break; 4184 } 4185 fallthrough; 4186 case NL80211_IFTYPE_ADHOC: 4187 /* 4188 * The AP binding flow is handled as part of the start_ap flow 4189 * (in bss_info_changed), similarly for IBSS. 4190 */ 4191 ret = 0; 4192 goto out; 4193 case NL80211_IFTYPE_STATION: 4194 mvmvif->csa_bcn_pending = false; 4195 break; 4196 case NL80211_IFTYPE_MONITOR: 4197 /* always disable PS when a monitor interface is active */ 4198 mvmvif->ps_disabled = true; 4199 break; 4200 default: 4201 ret = -EINVAL; 4202 goto out; 4203 } 4204 4205 ret = iwl_mvm_binding_add_vif(mvm, vif); 4206 if (ret) 4207 goto out; 4208 4209 /* 4210 * Power state must be updated before quotas, 4211 * otherwise fw will complain. 4212 */ 4213 iwl_mvm_power_update_mac(mvm); 4214 4215 /* Setting the quota at this stage is only required for monitor 4216 * interfaces. For the other types, the bss_info changed flow 4217 * will handle quota settings. 4218 */ 4219 if (vif->type == NL80211_IFTYPE_MONITOR) { 4220 mvmvif->monitor_active = true; 4221 ret = iwl_mvm_update_quotas(mvm, false, NULL); 4222 if (ret) 4223 goto out_remove_binding; 4224 4225 ret = iwl_mvm_add_snif_sta(mvm, vif); 4226 if (ret) 4227 goto out_remove_binding; 4228 4229 } 4230 4231 /* Handle binding during CSA */ 4232 if (vif->type == NL80211_IFTYPE_AP) { 4233 iwl_mvm_update_quotas(mvm, false, NULL); 4234 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 4235 } 4236 4237 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { 4238 mvmvif->csa_bcn_pending = true; 4239 4240 if (!fw_has_capa(&mvm->fw->ucode_capa, 4241 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 4242 u32 duration = 3 * vif->bss_conf.beacon_int; 4243 4244 /* Protect the session to make sure we hear the first 4245 * beacon on the new channel. 4246 */ 4247 iwl_mvm_protect_session(mvm, vif, duration, duration, 4248 vif->bss_conf.beacon_int / 2, 4249 true); 4250 } 4251 4252 iwl_mvm_update_quotas(mvm, false, NULL); 4253 } 4254 4255 goto out; 4256 4257 out_remove_binding: 4258 iwl_mvm_binding_remove_vif(mvm, vif); 4259 iwl_mvm_power_update_mac(mvm); 4260 out: 4261 if (ret) 4262 mvmvif->phy_ctxt = NULL; 4263 return ret; 4264 } 4265 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, 4266 struct ieee80211_vif *vif, 4267 struct ieee80211_bss_conf *link_conf, 4268 struct ieee80211_chanctx_conf *ctx) 4269 { 4270 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4271 int ret; 4272 4273 mutex_lock(&mvm->mutex); 4274 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); 4275 mutex_unlock(&mvm->mutex); 4276 4277 return ret; 4278 } 4279 4280 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, 4281 struct ieee80211_vif *vif, 4282 struct ieee80211_chanctx_conf *ctx, 4283 bool switching_chanctx) 4284 { 4285 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4286 struct ieee80211_vif *disabled_vif = NULL; 4287 4288 lockdep_assert_held(&mvm->mutex); 4289 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 4290 4291 switch (vif->type) { 4292 case NL80211_IFTYPE_ADHOC: 4293 goto out; 4294 case NL80211_IFTYPE_MONITOR: 4295 mvmvif->monitor_active = false; 4296 mvmvif->ps_disabled = false; 4297 iwl_mvm_rm_snif_sta(mvm, vif); 4298 break; 4299 case NL80211_IFTYPE_AP: 4300 /* This part is triggered only during CSA */ 4301 if (!switching_chanctx || !mvmvif->ap_ibss_active) 4302 goto out; 4303 4304 mvmvif->csa_countdown = false; 4305 4306 /* Set CS bit on all the stations */ 4307 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); 4308 4309 /* Save blocked iface, the timeout is set on the next beacon */ 4310 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); 4311 4312 mvmvif->ap_ibss_active = false; 4313 break; 4314 case NL80211_IFTYPE_STATION: 4315 if (!switching_chanctx) 4316 break; 4317 4318 disabled_vif = vif; 4319 4320 if (!fw_has_capa(&mvm->fw->ucode_capa, 4321 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) 4322 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); 4323 break; 4324 default: 4325 break; 4326 } 4327 4328 iwl_mvm_update_quotas(mvm, false, disabled_vif); 4329 iwl_mvm_binding_remove_vif(mvm, vif); 4330 4331 out: 4332 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && 4333 switching_chanctx) 4334 return; 4335 mvmvif->phy_ctxt = NULL; 4336 iwl_mvm_power_update_mac(mvm); 4337 } 4338 4339 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, 4340 struct ieee80211_vif *vif, 4341 struct ieee80211_bss_conf *link_conf, 4342 struct ieee80211_chanctx_conf *ctx) 4343 { 4344 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4345 4346 mutex_lock(&mvm->mutex); 4347 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); 4348 mutex_unlock(&mvm->mutex); 4349 } 4350 4351 static int 4352 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, 4353 struct ieee80211_vif_chanctx_switch *vifs) 4354 { 4355 int ret; 4356 4357 mutex_lock(&mvm->mutex); 4358 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4359 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); 4360 4361 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); 4362 if (ret) { 4363 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); 4364 goto out_reassign; 4365 } 4366 4367 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4368 true); 4369 if (ret) { 4370 IWL_ERR(mvm, 4371 "failed to assign new_ctx during channel switch\n"); 4372 goto out_remove; 4373 } 4374 4375 /* we don't support TDLS during DCM - can be caused by channel switch */ 4376 if (iwl_mvm_phy_ctx_count(mvm) > 1) 4377 iwl_mvm_teardown_tdls_peers(mvm); 4378 4379 goto out; 4380 4381 out_remove: 4382 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); 4383 4384 out_reassign: 4385 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { 4386 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); 4387 goto out_restart; 4388 } 4389 4390 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4391 true)) { 4392 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4393 goto out_restart; 4394 } 4395 4396 goto out; 4397 4398 out_restart: 4399 /* things keep failing, better restart the hw */ 4400 iwl_mvm_nic_restart(mvm, false); 4401 4402 out: 4403 mutex_unlock(&mvm->mutex); 4404 4405 return ret; 4406 } 4407 4408 static int 4409 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, 4410 struct ieee80211_vif_chanctx_switch *vifs) 4411 { 4412 int ret; 4413 4414 mutex_lock(&mvm->mutex); 4415 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4416 4417 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4418 true); 4419 if (ret) { 4420 IWL_ERR(mvm, 4421 "failed to assign new_ctx during channel switch\n"); 4422 goto out_reassign; 4423 } 4424 4425 goto out; 4426 4427 out_reassign: 4428 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4429 true)) { 4430 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4431 goto out_restart; 4432 } 4433 4434 goto out; 4435 4436 out_restart: 4437 /* things keep failing, better restart the hw */ 4438 iwl_mvm_nic_restart(mvm, false); 4439 4440 out: 4441 mutex_unlock(&mvm->mutex); 4442 4443 return ret; 4444 } 4445 4446 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, 4447 struct ieee80211_vif_chanctx_switch *vifs, 4448 int n_vifs, 4449 enum ieee80211_chanctx_switch_mode mode) 4450 { 4451 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4452 int ret; 4453 4454 /* we only support a single-vif right now */ 4455 if (n_vifs > 1) 4456 return -EOPNOTSUPP; 4457 4458 switch (mode) { 4459 case CHANCTX_SWMODE_SWAP_CONTEXTS: 4460 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); 4461 break; 4462 case CHANCTX_SWMODE_REASSIGN_VIF: 4463 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); 4464 break; 4465 default: 4466 ret = -EOPNOTSUPP; 4467 break; 4468 } 4469 4470 return ret; 4471 } 4472 4473 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) 4474 { 4475 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4476 4477 return mvm->ibss_manager; 4478 } 4479 4480 static int iwl_mvm_set_tim(struct ieee80211_hw *hw, 4481 struct ieee80211_sta *sta, 4482 bool set) 4483 { 4484 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4485 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4486 4487 if (!mvm_sta || !mvm_sta->vif) { 4488 IWL_ERR(mvm, "Station is not associated to a vif\n"); 4489 return -EINVAL; 4490 } 4491 4492 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 4493 } 4494 4495 #ifdef CONFIG_NL80211_TESTMODE 4496 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { 4497 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, 4498 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, 4499 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, 4500 }; 4501 4502 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, 4503 struct ieee80211_vif *vif, 4504 void *data, int len) 4505 { 4506 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; 4507 int err; 4508 u32 noa_duration; 4509 4510 err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, 4511 iwl_mvm_tm_policy, NULL); 4512 if (err) 4513 return err; 4514 4515 if (!tb[IWL_MVM_TM_ATTR_CMD]) 4516 return -EINVAL; 4517 4518 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { 4519 case IWL_MVM_TM_CMD_SET_NOA: 4520 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || 4521 !vif->bss_conf.enable_beacon || 4522 !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) 4523 return -EINVAL; 4524 4525 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); 4526 if (noa_duration >= vif->bss_conf.beacon_int) 4527 return -EINVAL; 4528 4529 mvm->noa_duration = noa_duration; 4530 mvm->noa_vif = vif; 4531 4532 return iwl_mvm_update_quotas(mvm, true, NULL); 4533 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 4534 /* must be associated client vif - ignore authorized */ 4535 if (!vif || vif->type != NL80211_IFTYPE_STATION || 4536 !vif->cfg.assoc || !vif->bss_conf.dtim_period || 4537 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) 4538 return -EINVAL; 4539 4540 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 4541 return iwl_mvm_enable_beacon_filter(mvm, vif, 0); 4542 return iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4543 } 4544 4545 return -EOPNOTSUPP; 4546 } 4547 4548 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, 4549 struct ieee80211_vif *vif, 4550 void *data, int len) 4551 { 4552 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4553 int err; 4554 4555 mutex_lock(&mvm->mutex); 4556 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); 4557 mutex_unlock(&mvm->mutex); 4558 4559 return err; 4560 } 4561 #endif 4562 4563 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, 4564 struct ieee80211_vif *vif, 4565 struct ieee80211_channel_switch *chsw) 4566 { 4567 /* By implementing this operation, we prevent mac80211 from 4568 * starting its own channel switch timer, so that we can call 4569 * ieee80211_chswitch_done() ourselves at the right time 4570 * (which is when the absence time event starts). 4571 */ 4572 4573 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), 4574 "dummy channel switch op\n"); 4575 } 4576 4577 static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, 4578 struct ieee80211_vif *vif, 4579 struct ieee80211_channel_switch *chsw) 4580 { 4581 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4582 struct iwl_chan_switch_te_cmd cmd = { 4583 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 4584 mvmvif->color)), 4585 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 4586 .tsf = cpu_to_le32(chsw->timestamp), 4587 .cs_count = chsw->count, 4588 .cs_mode = chsw->block_tx, 4589 }; 4590 4591 lockdep_assert_held(&mvm->mutex); 4592 4593 if (chsw->delay) 4594 cmd.cs_delayed_bcn_count = 4595 DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); 4596 4597 return iwl_mvm_send_cmd_pdu(mvm, 4598 WIDE_ID(MAC_CONF_GROUP, 4599 CHANNEL_SWITCH_TIME_EVENT_CMD), 4600 0, sizeof(cmd), &cmd); 4601 } 4602 4603 static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, 4604 struct ieee80211_vif *vif, 4605 struct ieee80211_channel_switch *chsw) 4606 { 4607 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4608 u32 apply_time; 4609 4610 /* Schedule the time event to a bit before beacon 1, 4611 * to make sure we're in the new channel when the 4612 * GO/AP arrives. In case count <= 1 immediately schedule the 4613 * TE (this might result with some packet loss or connection 4614 * loss). 4615 */ 4616 if (chsw->count <= 1) 4617 apply_time = 0; 4618 else 4619 apply_time = chsw->device_timestamp + 4620 ((vif->bss_conf.beacon_int * (chsw->count - 1) - 4621 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); 4622 4623 if (chsw->block_tx) 4624 iwl_mvm_csa_client_absent(mvm, vif); 4625 4626 if (mvmvif->bf_data.bf_enabled) { 4627 int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4628 4629 if (ret) 4630 return ret; 4631 } 4632 4633 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, 4634 apply_time); 4635 4636 return 0; 4637 } 4638 4639 #define IWL_MAX_CSA_BLOCK_TX 1500 4640 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, 4641 struct ieee80211_vif *vif, 4642 struct ieee80211_channel_switch *chsw) 4643 { 4644 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4645 struct ieee80211_vif *csa_vif; 4646 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4647 int ret; 4648 4649 mutex_lock(&mvm->mutex); 4650 4651 mvmvif->csa_failed = false; 4652 4653 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", 4654 chsw->chandef.center_freq1); 4655 4656 iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, 4657 ieee80211_vif_to_wdev(vif), 4658 FW_DBG_TRIGGER_CHANNEL_SWITCH); 4659 4660 switch (vif->type) { 4661 case NL80211_IFTYPE_AP: 4662 csa_vif = 4663 rcu_dereference_protected(mvm->csa_vif, 4664 lockdep_is_held(&mvm->mutex)); 4665 if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active, 4666 "Another CSA is already in progress")) { 4667 ret = -EBUSY; 4668 goto out_unlock; 4669 } 4670 4671 /* we still didn't unblock tx. prevent new CS meanwhile */ 4672 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, 4673 lockdep_is_held(&mvm->mutex))) { 4674 ret = -EBUSY; 4675 goto out_unlock; 4676 } 4677 4678 rcu_assign_pointer(mvm->csa_vif, vif); 4679 4680 if (WARN_ONCE(mvmvif->csa_countdown, 4681 "Previous CSA countdown didn't complete")) { 4682 ret = -EBUSY; 4683 goto out_unlock; 4684 } 4685 4686 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; 4687 4688 break; 4689 case NL80211_IFTYPE_STATION: 4690 /* 4691 * In the new flow FW is in charge of timing the switch so there 4692 * is no need for all of this 4693 */ 4694 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 4695 CHANNEL_SWITCH_ERROR_NOTIF, 4696 0)) 4697 break; 4698 4699 /* 4700 * We haven't configured the firmware to be associated yet since 4701 * we don't know the dtim period. In this case, the firmware can't 4702 * track the beacons. 4703 */ 4704 if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) { 4705 ret = -EBUSY; 4706 goto out_unlock; 4707 } 4708 4709 if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) 4710 schedule_delayed_work(&mvmvif->csa_work, 0); 4711 4712 if (chsw->block_tx) { 4713 /* 4714 * In case of undetermined / long time with immediate 4715 * quiet monitor status to gracefully disconnect 4716 */ 4717 if (!chsw->count || 4718 chsw->count * vif->bss_conf.beacon_int > 4719 IWL_MAX_CSA_BLOCK_TX) 4720 schedule_delayed_work(&mvmvif->csa_work, 4721 msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); 4722 } 4723 4724 if (!fw_has_capa(&mvm->fw->ucode_capa, 4725 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 4726 ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); 4727 if (ret) 4728 goto out_unlock; 4729 } else { 4730 iwl_mvm_schedule_client_csa(mvm, vif, chsw); 4731 } 4732 4733 mvmvif->csa_count = chsw->count; 4734 mvmvif->csa_misbehave = false; 4735 break; 4736 default: 4737 break; 4738 } 4739 4740 mvmvif->ps_disabled = true; 4741 4742 ret = iwl_mvm_power_update_ps(mvm); 4743 if (ret) 4744 goto out_unlock; 4745 4746 /* we won't be on this channel any longer */ 4747 iwl_mvm_teardown_tdls_peers(mvm); 4748 4749 out_unlock: 4750 mutex_unlock(&mvm->mutex); 4751 4752 return ret; 4753 } 4754 4755 static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, 4756 struct ieee80211_vif *vif, 4757 struct ieee80211_channel_switch *chsw) 4758 { 4759 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4760 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4761 struct iwl_chan_switch_te_cmd cmd = { 4762 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 4763 mvmvif->color)), 4764 .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), 4765 .tsf = cpu_to_le32(chsw->timestamp), 4766 .cs_count = chsw->count, 4767 .cs_mode = chsw->block_tx, 4768 }; 4769 4770 /* 4771 * In the new flow FW is in charge of timing the switch so there is no 4772 * need for all of this 4773 */ 4774 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 4775 CHANNEL_SWITCH_ERROR_NOTIF, 0)) 4776 return; 4777 4778 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) 4779 return; 4780 4781 IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n", 4782 mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx); 4783 4784 if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { 4785 if (mvmvif->csa_misbehave) { 4786 /* Second time, give up on this AP*/ 4787 iwl_mvm_abort_channel_switch(hw, vif); 4788 ieee80211_chswitch_done(vif, false); 4789 mvmvif->csa_misbehave = false; 4790 return; 4791 } 4792 mvmvif->csa_misbehave = true; 4793 } 4794 mvmvif->csa_count = chsw->count; 4795 4796 mutex_lock(&mvm->mutex); 4797 if (mvmvif->csa_failed) 4798 goto out_unlock; 4799 4800 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, 4801 WIDE_ID(MAC_CONF_GROUP, 4802 CHANNEL_SWITCH_TIME_EVENT_CMD), 4803 0, sizeof(cmd), &cmd)); 4804 out_unlock: 4805 mutex_unlock(&mvm->mutex); 4806 } 4807 4808 static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) 4809 { 4810 int i; 4811 4812 if (!iwl_mvm_has_new_tx_api(mvm)) { 4813 if (drop) { 4814 mutex_lock(&mvm->mutex); 4815 iwl_mvm_flush_tx_path(mvm, 4816 iwl_mvm_flushable_queues(mvm) & queues); 4817 mutex_unlock(&mvm->mutex); 4818 } else { 4819 iwl_trans_wait_tx_queues_empty(mvm->trans, queues); 4820 } 4821 return; 4822 } 4823 4824 mutex_lock(&mvm->mutex); 4825 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 4826 struct ieee80211_sta *sta; 4827 4828 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4829 lockdep_is_held(&mvm->mutex)); 4830 if (IS_ERR_OR_NULL(sta)) 4831 continue; 4832 4833 if (drop) 4834 iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF); 4835 else 4836 iwl_mvm_wait_sta_queues_empty(mvm, 4837 iwl_mvm_sta_from_mac80211(sta)); 4838 } 4839 mutex_unlock(&mvm->mutex); 4840 } 4841 4842 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 4843 struct ieee80211_vif *vif, u32 queues, bool drop) 4844 { 4845 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4846 struct iwl_mvm_vif *mvmvif; 4847 struct iwl_mvm_sta *mvmsta; 4848 struct ieee80211_sta *sta; 4849 int i; 4850 u32 msk = 0; 4851 4852 if (!vif) { 4853 iwl_mvm_flush_no_vif(mvm, queues, drop); 4854 return; 4855 } 4856 4857 if (vif->type != NL80211_IFTYPE_STATION) 4858 return; 4859 4860 /* Make sure we're done with the deferred traffic before flushing */ 4861 flush_work(&mvm->add_stream_wk); 4862 4863 mutex_lock(&mvm->mutex); 4864 mvmvif = iwl_mvm_vif_from_mac80211(vif); 4865 4866 /* flush the AP-station and all TDLS peers */ 4867 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 4868 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4869 lockdep_is_held(&mvm->mutex)); 4870 if (IS_ERR_OR_NULL(sta)) 4871 continue; 4872 4873 mvmsta = iwl_mvm_sta_from_mac80211(sta); 4874 if (mvmsta->vif != vif) 4875 continue; 4876 4877 /* make sure only TDLS peers or the AP are flushed */ 4878 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); 4879 4880 if (drop) { 4881 if (iwl_mvm_flush_sta(mvm, mvmsta, false)) 4882 IWL_ERR(mvm, "flush request fail\n"); 4883 } else { 4884 msk |= mvmsta->tfd_queue_msk; 4885 if (iwl_mvm_has_new_tx_api(mvm)) 4886 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); 4887 } 4888 } 4889 4890 mutex_unlock(&mvm->mutex); 4891 4892 /* this can take a while, and we may need/want other operations 4893 * to succeed while doing this, so do it without the mutex held 4894 */ 4895 if (!drop && !iwl_mvm_has_new_tx_api(mvm)) 4896 iwl_trans_wait_tx_queues_empty(mvm->trans, msk); 4897 } 4898 4899 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, 4900 struct survey_info *survey) 4901 { 4902 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4903 int ret; 4904 4905 memset(survey, 0, sizeof(*survey)); 4906 4907 /* only support global statistics right now */ 4908 if (idx != 0) 4909 return -ENOENT; 4910 4911 if (!fw_has_capa(&mvm->fw->ucode_capa, 4912 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 4913 return -ENOENT; 4914 4915 mutex_lock(&mvm->mutex); 4916 4917 if (iwl_mvm_firmware_running(mvm)) { 4918 ret = iwl_mvm_request_statistics(mvm, false); 4919 if (ret) 4920 goto out; 4921 } 4922 4923 survey->filled = SURVEY_INFO_TIME | 4924 SURVEY_INFO_TIME_RX | 4925 SURVEY_INFO_TIME_TX | 4926 SURVEY_INFO_TIME_SCAN; 4927 survey->time = mvm->accu_radio_stats.on_time_rf + 4928 mvm->radio_stats.on_time_rf; 4929 do_div(survey->time, USEC_PER_MSEC); 4930 4931 survey->time_rx = mvm->accu_radio_stats.rx_time + 4932 mvm->radio_stats.rx_time; 4933 do_div(survey->time_rx, USEC_PER_MSEC); 4934 4935 survey->time_tx = mvm->accu_radio_stats.tx_time + 4936 mvm->radio_stats.tx_time; 4937 do_div(survey->time_tx, USEC_PER_MSEC); 4938 4939 survey->time_scan = mvm->accu_radio_stats.on_time_scan + 4940 mvm->radio_stats.on_time_scan; 4941 do_div(survey->time_scan, USEC_PER_MSEC); 4942 4943 ret = 0; 4944 out: 4945 mutex_unlock(&mvm->mutex); 4946 return ret; 4947 } 4948 4949 static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) 4950 { 4951 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; 4952 4953 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { 4954 case RATE_MCS_CHAN_WIDTH_20: 4955 rinfo->bw = RATE_INFO_BW_20; 4956 break; 4957 case RATE_MCS_CHAN_WIDTH_40: 4958 rinfo->bw = RATE_INFO_BW_40; 4959 break; 4960 case RATE_MCS_CHAN_WIDTH_80: 4961 rinfo->bw = RATE_INFO_BW_80; 4962 break; 4963 case RATE_MCS_CHAN_WIDTH_160: 4964 rinfo->bw = RATE_INFO_BW_160; 4965 break; 4966 } 4967 4968 if (format == RATE_MCS_CCK_MSK || 4969 format == RATE_MCS_LEGACY_OFDM_MSK) { 4970 int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); 4971 4972 /* add the offset needed to get to the legacy ofdm indices */ 4973 if (format == RATE_MCS_LEGACY_OFDM_MSK) 4974 rate += IWL_FIRST_OFDM_RATE; 4975 4976 switch (rate) { 4977 case IWL_RATE_1M_INDEX: 4978 rinfo->legacy = 10; 4979 break; 4980 case IWL_RATE_2M_INDEX: 4981 rinfo->legacy = 20; 4982 break; 4983 case IWL_RATE_5M_INDEX: 4984 rinfo->legacy = 55; 4985 break; 4986 case IWL_RATE_11M_INDEX: 4987 rinfo->legacy = 110; 4988 break; 4989 case IWL_RATE_6M_INDEX: 4990 rinfo->legacy = 60; 4991 break; 4992 case IWL_RATE_9M_INDEX: 4993 rinfo->legacy = 90; 4994 break; 4995 case IWL_RATE_12M_INDEX: 4996 rinfo->legacy = 120; 4997 break; 4998 case IWL_RATE_18M_INDEX: 4999 rinfo->legacy = 180; 5000 break; 5001 case IWL_RATE_24M_INDEX: 5002 rinfo->legacy = 240; 5003 break; 5004 case IWL_RATE_36M_INDEX: 5005 rinfo->legacy = 360; 5006 break; 5007 case IWL_RATE_48M_INDEX: 5008 rinfo->legacy = 480; 5009 break; 5010 case IWL_RATE_54M_INDEX: 5011 rinfo->legacy = 540; 5012 } 5013 return; 5014 } 5015 5016 rinfo->nss = u32_get_bits(rate_n_flags, 5017 RATE_MCS_NSS_MSK) + 1; 5018 rinfo->mcs = format == RATE_MCS_HT_MSK ? 5019 RATE_HT_MCS_INDEX(rate_n_flags) : 5020 u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); 5021 5022 if (format == RATE_MCS_HE_MSK) { 5023 u32 gi_ltf = u32_get_bits(rate_n_flags, 5024 RATE_MCS_HE_GI_LTF_MSK); 5025 5026 rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; 5027 5028 if (rate_n_flags & RATE_MCS_HE_106T_MSK) { 5029 rinfo->bw = RATE_INFO_BW_HE_RU; 5030 rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; 5031 } 5032 5033 switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { 5034 case RATE_MCS_HE_TYPE_SU: 5035 case RATE_MCS_HE_TYPE_EXT_SU: 5036 if (gi_ltf == 0 || gi_ltf == 1) 5037 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5038 else if (gi_ltf == 2) 5039 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5040 else if (gi_ltf == 3) 5041 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5042 else 5043 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5044 break; 5045 case RATE_MCS_HE_TYPE_MU: 5046 if (gi_ltf == 0 || gi_ltf == 1) 5047 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5048 else if (gi_ltf == 2) 5049 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5050 else 5051 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5052 break; 5053 case RATE_MCS_HE_TYPE_TRIG: 5054 if (gi_ltf == 0 || gi_ltf == 1) 5055 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5056 else 5057 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5058 break; 5059 } 5060 5061 if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) 5062 rinfo->he_dcm = 1; 5063 return; 5064 } 5065 5066 if (rate_n_flags & RATE_MCS_SGI_MSK) 5067 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 5068 5069 if (format == RATE_MCS_HT_MSK) { 5070 rinfo->flags |= RATE_INFO_FLAGS_MCS; 5071 5072 } else if (format == RATE_MCS_VHT_MSK) { 5073 rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; 5074 } 5075 5076 } 5077 5078 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, 5079 struct ieee80211_vif *vif, 5080 struct ieee80211_sta *sta, 5081 struct station_info *sinfo) 5082 { 5083 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5084 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 5085 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 5086 5087 if (mvmsta->avg_energy) { 5088 sinfo->signal_avg = -(s8)mvmsta->avg_energy; 5089 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 5090 } 5091 5092 if (iwl_mvm_has_tlc_offload(mvm)) { 5093 struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; 5094 5095 iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); 5096 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 5097 } 5098 5099 /* if beacon filtering isn't on mac80211 does it anyway */ 5100 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 5101 return; 5102 5103 if (!vif->cfg.assoc) 5104 return; 5105 5106 mutex_lock(&mvm->mutex); 5107 5108 if (mvmvif->ap_sta_id != mvmsta->sta_id) 5109 goto unlock; 5110 5111 if (iwl_mvm_request_statistics(mvm, false)) 5112 goto unlock; 5113 5114 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + 5115 mvmvif->beacon_stats.accu_num_beacons; 5116 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); 5117 if (mvmvif->beacon_stats.avg_signal) { 5118 /* firmware only reports a value after RXing a few beacons */ 5119 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; 5120 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 5121 } 5122 unlock: 5123 mutex_unlock(&mvm->mutex); 5124 } 5125 5126 static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, 5127 struct ieee80211_vif *vif, 5128 const struct ieee80211_mlme_event *mlme) 5129 { 5130 if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && 5131 (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { 5132 iwl_dbg_tlv_time_point(&mvm->fwrt, 5133 IWL_FW_INI_TIME_POINT_ASSOC_FAILED, 5134 NULL); 5135 return; 5136 } 5137 5138 if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { 5139 iwl_dbg_tlv_time_point(&mvm->fwrt, 5140 IWL_FW_INI_TIME_POINT_DEASSOC, 5141 NULL); 5142 return; 5143 } 5144 } 5145 5146 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, 5147 struct ieee80211_vif *vif, 5148 const struct ieee80211_event *event) 5149 { 5150 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ 5151 do { \ 5152 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ 5153 break; \ 5154 iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ 5155 } while (0) 5156 5157 struct iwl_fw_dbg_trigger_tlv *trig; 5158 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 5159 5160 if (iwl_trans_dbg_ini_valid(mvm->trans)) { 5161 iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme); 5162 return; 5163 } 5164 5165 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 5166 FW_DBG_TRIGGER_MLME); 5167 if (!trig) 5168 return; 5169 5170 trig_mlme = (void *)trig->data; 5171 5172 if (event->u.mlme.data == ASSOC_EVENT) { 5173 if (event->u.mlme.status == MLME_DENIED) 5174 CHECK_MLME_TRIGGER(stop_assoc_denied, 5175 "DENIED ASSOC: reason %d", 5176 event->u.mlme.reason); 5177 else if (event->u.mlme.status == MLME_TIMEOUT) 5178 CHECK_MLME_TRIGGER(stop_assoc_timeout, 5179 "ASSOC TIMEOUT"); 5180 } else if (event->u.mlme.data == AUTH_EVENT) { 5181 if (event->u.mlme.status == MLME_DENIED) 5182 CHECK_MLME_TRIGGER(stop_auth_denied, 5183 "DENIED AUTH: reason %d", 5184 event->u.mlme.reason); 5185 else if (event->u.mlme.status == MLME_TIMEOUT) 5186 CHECK_MLME_TRIGGER(stop_auth_timeout, 5187 "AUTH TIMEOUT"); 5188 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { 5189 CHECK_MLME_TRIGGER(stop_rx_deauth, 5190 "DEAUTH RX %d", event->u.mlme.reason); 5191 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { 5192 CHECK_MLME_TRIGGER(stop_tx_deauth, 5193 "DEAUTH TX %d", event->u.mlme.reason); 5194 } 5195 #undef CHECK_MLME_TRIGGER 5196 } 5197 5198 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, 5199 struct ieee80211_vif *vif, 5200 const struct ieee80211_event *event) 5201 { 5202 struct iwl_fw_dbg_trigger_tlv *trig; 5203 struct iwl_fw_dbg_trigger_ba *ba_trig; 5204 5205 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 5206 FW_DBG_TRIGGER_BA); 5207 if (!trig) 5208 return; 5209 5210 ba_trig = (void *)trig->data; 5211 5212 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) 5213 return; 5214 5215 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 5216 "BAR received from %pM, tid %d, ssn %d", 5217 event->u.ba.sta->addr, event->u.ba.tid, 5218 event->u.ba.ssn); 5219 } 5220 5221 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, 5222 struct ieee80211_vif *vif, 5223 const struct ieee80211_event *event) 5224 { 5225 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5226 5227 switch (event->type) { 5228 case MLME_EVENT: 5229 iwl_mvm_event_mlme_callback(mvm, vif, event); 5230 break; 5231 case BAR_RX_EVENT: 5232 iwl_mvm_event_bar_rx_callback(mvm, vif, event); 5233 break; 5234 case BA_FRAME_TIMEOUT: 5235 iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, 5236 event->u.ba.tid); 5237 break; 5238 default: 5239 break; 5240 } 5241 } 5242 5243 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, 5244 enum iwl_mvm_rxq_notif_type type, 5245 bool sync, 5246 const void *data, u32 size) 5247 { 5248 struct { 5249 struct iwl_rxq_sync_cmd cmd; 5250 struct iwl_mvm_internal_rxq_notif notif; 5251 } __packed cmd = { 5252 .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), 5253 .cmd.count = 5254 cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + 5255 size), 5256 .notif.type = type, 5257 .notif.sync = sync, 5258 }; 5259 struct iwl_host_cmd hcmd = { 5260 .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), 5261 .data[0] = &cmd, 5262 .len[0] = sizeof(cmd), 5263 .data[1] = data, 5264 .len[1] = size, 5265 .flags = sync ? 0 : CMD_ASYNC, 5266 }; 5267 int ret; 5268 5269 /* size must be a multiple of DWORD */ 5270 if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) 5271 return; 5272 5273 if (!iwl_mvm_has_new_rx_api(mvm)) 5274 return; 5275 5276 if (sync) { 5277 cmd.notif.cookie = mvm->queue_sync_cookie; 5278 mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; 5279 } 5280 5281 ret = iwl_mvm_send_cmd(mvm, &hcmd); 5282 if (ret) { 5283 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 5284 goto out; 5285 } 5286 5287 if (sync) { 5288 lockdep_assert_held(&mvm->mutex); 5289 ret = wait_event_timeout(mvm->rx_sync_waitq, 5290 READ_ONCE(mvm->queue_sync_state) == 0 || 5291 iwl_mvm_is_radio_killed(mvm), 5292 HZ); 5293 WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), 5294 "queue sync: failed to sync, state is 0x%lx\n", 5295 mvm->queue_sync_state); 5296 } 5297 5298 out: 5299 if (sync) { 5300 mvm->queue_sync_state = 0; 5301 mvm->queue_sync_cookie++; 5302 } 5303 } 5304 5305 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) 5306 { 5307 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5308 5309 mutex_lock(&mvm->mutex); 5310 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); 5311 mutex_unlock(&mvm->mutex); 5312 } 5313 5314 static int 5315 iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, 5316 struct ieee80211_vif *vif, 5317 struct cfg80211_ftm_responder_stats *stats) 5318 { 5319 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5320 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 5321 5322 if (vif->p2p || vif->type != NL80211_IFTYPE_AP || 5323 !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) 5324 return -EINVAL; 5325 5326 mutex_lock(&mvm->mutex); 5327 *stats = mvm->ftm_resp_stats; 5328 mutex_unlock(&mvm->mutex); 5329 5330 stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | 5331 BIT(NL80211_FTM_STATS_PARTIAL_NUM) | 5332 BIT(NL80211_FTM_STATS_FAILED_NUM) | 5333 BIT(NL80211_FTM_STATS_ASAP_NUM) | 5334 BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | 5335 BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | 5336 BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | 5337 BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | 5338 BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); 5339 5340 return 0; 5341 } 5342 5343 static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, 5344 struct ieee80211_vif *vif, 5345 struct cfg80211_pmsr_request *request) 5346 { 5347 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5348 int ret; 5349 5350 mutex_lock(&mvm->mutex); 5351 ret = iwl_mvm_ftm_start(mvm, vif, request); 5352 mutex_unlock(&mvm->mutex); 5353 5354 return ret; 5355 } 5356 5357 static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, 5358 struct ieee80211_vif *vif, 5359 struct cfg80211_pmsr_request *request) 5360 { 5361 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5362 5363 mutex_lock(&mvm->mutex); 5364 iwl_mvm_ftm_abort(mvm, request); 5365 mutex_unlock(&mvm->mutex); 5366 } 5367 5368 static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) 5369 { 5370 u8 protocol = ip_hdr(skb)->protocol; 5371 5372 if (!IS_ENABLED(CONFIG_INET)) 5373 return false; 5374 5375 return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; 5376 } 5377 5378 static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, 5379 struct sk_buff *head, 5380 struct sk_buff *skb) 5381 { 5382 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5383 5384 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 5385 return iwl_mvm_tx_csum_bz(mvm, head, true) == 5386 iwl_mvm_tx_csum_bz(mvm, skb, true); 5387 5388 /* For now don't aggregate IPv6 in AMSDU */ 5389 if (skb->protocol != htons(ETH_P_IP)) 5390 return false; 5391 5392 if (!iwl_mvm_is_csum_supported(mvm)) 5393 return true; 5394 5395 return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); 5396 } 5397 5398 const struct ieee80211_ops iwl_mvm_hw_ops = { 5399 .tx = iwl_mvm_mac_tx, 5400 .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, 5401 .ampdu_action = iwl_mvm_mac_ampdu_action, 5402 .get_antenna = iwl_mvm_op_get_antenna, 5403 .start = iwl_mvm_mac_start, 5404 .reconfig_complete = iwl_mvm_mac_reconfig_complete, 5405 .stop = iwl_mvm_mac_stop, 5406 .add_interface = iwl_mvm_mac_add_interface, 5407 .remove_interface = iwl_mvm_mac_remove_interface, 5408 .config = iwl_mvm_mac_config, 5409 .prepare_multicast = iwl_mvm_prepare_multicast, 5410 .configure_filter = iwl_mvm_configure_filter, 5411 .config_iface_filter = iwl_mvm_config_iface_filter, 5412 .bss_info_changed = iwl_mvm_bss_info_changed, 5413 .hw_scan = iwl_mvm_mac_hw_scan, 5414 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, 5415 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, 5416 .sta_state = iwl_mvm_mac_sta_state, 5417 .sta_notify = iwl_mvm_mac_sta_notify, 5418 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, 5419 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, 5420 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 5421 .sta_rc_update = iwl_mvm_sta_rc_update, 5422 .conf_tx = iwl_mvm_mac_conf_tx, 5423 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 5424 .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, 5425 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, 5426 .flush = iwl_mvm_mac_flush, 5427 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 5428 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 5429 .set_key = iwl_mvm_mac_set_key, 5430 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 5431 .remain_on_channel = iwl_mvm_roc, 5432 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 5433 .add_chanctx = iwl_mvm_add_chanctx, 5434 .remove_chanctx = iwl_mvm_remove_chanctx, 5435 .change_chanctx = iwl_mvm_change_chanctx, 5436 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, 5437 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, 5438 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, 5439 5440 .start_ap = iwl_mvm_start_ap, 5441 .stop_ap = iwl_mvm_stop_ap, 5442 .join_ibss = iwl_mvm_start_ibss, 5443 .leave_ibss = iwl_mvm_stop_ibss, 5444 5445 .tx_last_beacon = iwl_mvm_tx_last_beacon, 5446 5447 .set_tim = iwl_mvm_set_tim, 5448 5449 .channel_switch = iwl_mvm_channel_switch, 5450 .pre_channel_switch = iwl_mvm_pre_channel_switch, 5451 .post_channel_switch = iwl_mvm_post_channel_switch, 5452 .abort_channel_switch = iwl_mvm_abort_channel_switch, 5453 .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, 5454 5455 .tdls_channel_switch = iwl_mvm_tdls_channel_switch, 5456 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, 5457 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, 5458 5459 .event_callback = iwl_mvm_mac_event_callback, 5460 5461 .sync_rx_queues = iwl_mvm_sync_rx_queues, 5462 5463 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 5464 5465 #ifdef CONFIG_PM_SLEEP 5466 /* look at d3.c */ 5467 .suspend = iwl_mvm_suspend, 5468 .resume = iwl_mvm_resume, 5469 .set_wakeup = iwl_mvm_set_wakeup, 5470 .set_rekey_data = iwl_mvm_set_rekey_data, 5471 #if IS_ENABLED(CONFIG_IPV6) 5472 .ipv6_addr_change = iwl_mvm_ipv6_addr_change, 5473 #endif 5474 .set_default_unicast_key = iwl_mvm_set_default_unicast_key, 5475 #endif 5476 .get_survey = iwl_mvm_mac_get_survey, 5477 .sta_statistics = iwl_mvm_mac_sta_statistics, 5478 .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, 5479 .start_pmsr = iwl_mvm_start_pmsr, 5480 .abort_pmsr = iwl_mvm_abort_pmsr, 5481 5482 .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, 5483 #ifdef CONFIG_IWLWIFI_DEBUGFS 5484 .sta_add_debugfs = iwl_mvm_sta_add_debugfs, 5485 #endif 5486 }; 5487