1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2014, 2018-2022 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/kernel.h> 8 #include <linux/slab.h> 9 #include <linux/skbuff.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/ip.h> 13 #include <linux/if_arp.h> 14 #include <linux/time.h> 15 #include <net/mac80211.h> 16 #include <net/ieee80211_radiotap.h> 17 #include <net/tcp.h> 18 19 #include "iwl-drv.h" 20 #include "iwl-op-mode.h" 21 #include "iwl-io.h" 22 #include "mvm.h" 23 #include "sta.h" 24 #include "time-event.h" 25 #include "iwl-eeprom-parse.h" 26 #include "iwl-phy-db.h" 27 #include "testmode.h" 28 #include "fw/error-dump.h" 29 #include "iwl-prph.h" 30 #include "iwl-nvm-parse.h" 31 32 static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 33 { 34 .max = 1, 35 .types = BIT(NL80211_IFTYPE_STATION), 36 }, 37 { 38 .max = 1, 39 .types = BIT(NL80211_IFTYPE_AP) | 40 BIT(NL80211_IFTYPE_P2P_CLIENT) | 41 BIT(NL80211_IFTYPE_P2P_GO), 42 }, 43 { 44 .max = 1, 45 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 46 }, 47 }; 48 49 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { 50 { 51 .num_different_channels = 2, 52 .max_interfaces = 3, 53 .limits = iwl_mvm_limits, 54 .n_limits = ARRAY_SIZE(iwl_mvm_limits), 55 }, 56 }; 57 58 static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { 59 .max_peers = IWL_MVM_TOF_MAX_APS, 60 .report_ap_tsf = 1, 61 .randomize_mac_addr = 1, 62 63 .ftm = { 64 .supported = 1, 65 .asap = 1, 66 .non_asap = 1, 67 .request_lci = 1, 68 .request_civicloc = 1, 69 .trigger_based = 1, 70 .non_trigger_based = 1, 71 .max_bursts_exponent = -1, /* all supported */ 72 .max_ftms_per_burst = 0, /* no limits */ 73 .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 74 BIT(NL80211_CHAN_WIDTH_20) | 75 BIT(NL80211_CHAN_WIDTH_40) | 76 BIT(NL80211_CHAN_WIDTH_80) | 77 BIT(NL80211_CHAN_WIDTH_160), 78 .preambles = BIT(NL80211_PREAMBLE_LEGACY) | 79 BIT(NL80211_PREAMBLE_HT) | 80 BIT(NL80211_PREAMBLE_VHT) | 81 BIT(NL80211_PREAMBLE_HE), 82 }, 83 }; 84 85 static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 86 enum set_key_cmd cmd, 87 struct ieee80211_vif *vif, 88 struct ieee80211_sta *sta, 89 struct ieee80211_key_conf *key); 90 91 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) 92 { 93 int i; 94 95 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); 96 for (i = 0; i < NUM_PHY_CTX; i++) { 97 mvm->phy_ctxts[i].id = i; 98 mvm->phy_ctxts[i].ref = 0; 99 } 100 } 101 102 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 103 const char *alpha2, 104 enum iwl_mcc_source src_id, 105 bool *changed) 106 { 107 struct ieee80211_regdomain *regd = NULL; 108 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 109 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 110 struct iwl_mcc_update_resp *resp; 111 u8 resp_ver; 112 113 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); 114 115 lockdep_assert_held(&mvm->mutex); 116 117 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); 118 if (IS_ERR_OR_NULL(resp)) { 119 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", 120 PTR_ERR_OR_ZERO(resp)); 121 resp = NULL; 122 goto out; 123 } 124 125 if (changed) { 126 u32 status = le32_to_cpu(resp->status); 127 128 *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || 129 status == MCC_RESP_ILLEGAL); 130 } 131 resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, 132 MCC_UPDATE_CMD, 0); 133 IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); 134 135 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 136 __le32_to_cpu(resp->n_channels), 137 resp->channels, 138 __le16_to_cpu(resp->mcc), 139 __le16_to_cpu(resp->geo_info), 140 __le16_to_cpu(resp->cap), resp_ver); 141 /* Store the return source id */ 142 src_id = resp->source_id; 143 if (IS_ERR_OR_NULL(regd)) { 144 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", 145 PTR_ERR_OR_ZERO(regd)); 146 goto out; 147 } 148 149 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", 150 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); 151 mvm->lar_regdom_set = true; 152 mvm->mcc_src = src_id; 153 154 iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); 155 156 out: 157 kfree(resp); 158 return regd; 159 } 160 161 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) 162 { 163 bool changed; 164 struct ieee80211_regdomain *regd; 165 166 if (!iwl_mvm_is_lar_supported(mvm)) 167 return; 168 169 regd = iwl_mvm_get_current_regdomain(mvm, &changed); 170 if (!IS_ERR_OR_NULL(regd)) { 171 /* only update the regulatory core if changed */ 172 if (changed) 173 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 174 175 kfree(regd); 176 } 177 } 178 179 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 180 bool *changed) 181 { 182 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", 183 iwl_mvm_is_wifi_mcc_supported(mvm) ? 184 MCC_SOURCE_GET_CURRENT : 185 MCC_SOURCE_OLD_FW, changed); 186 } 187 188 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) 189 { 190 enum iwl_mcc_source used_src; 191 struct ieee80211_regdomain *regd; 192 int ret; 193 bool changed; 194 const struct ieee80211_regdomain *r = 195 wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd); 196 197 if (!r) 198 return -ENOENT; 199 200 /* save the last source in case we overwrite it below */ 201 used_src = mvm->mcc_src; 202 if (iwl_mvm_is_wifi_mcc_supported(mvm)) { 203 /* Notify the firmware we support wifi location updates */ 204 regd = iwl_mvm_get_current_regdomain(mvm, NULL); 205 if (!IS_ERR_OR_NULL(regd)) 206 kfree(regd); 207 } 208 209 /* Now set our last stored MCC and source */ 210 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, 211 &changed); 212 if (IS_ERR_OR_NULL(regd)) 213 return -EIO; 214 215 /* update cfg80211 if the regdomain was changed */ 216 if (changed) 217 ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); 218 else 219 ret = 0; 220 221 kfree(regd); 222 return ret; 223 } 224 225 static const u8 he_if_types_ext_capa_sta[] = { 226 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 227 [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, 228 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 229 }; 230 231 static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { 232 { 233 .iftype = NL80211_IFTYPE_STATION, 234 .extended_capabilities = he_if_types_ext_capa_sta, 235 .extended_capabilities_mask = he_if_types_ext_capa_sta, 236 .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), 237 }, 238 }; 239 240 static int 241 iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 242 { 243 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 244 *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); 245 *rx_ant = iwl_mvm_get_valid_rx_ant(mvm); 246 return 0; 247 } 248 249 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 250 { 251 struct ieee80211_hw *hw = mvm->hw; 252 int num_mac, ret, i; 253 static const u32 mvm_ciphers[] = { 254 WLAN_CIPHER_SUITE_WEP40, 255 WLAN_CIPHER_SUITE_WEP104, 256 WLAN_CIPHER_SUITE_TKIP, 257 WLAN_CIPHER_SUITE_CCMP, 258 }; 259 #ifdef CONFIG_PM_SLEEP 260 bool unified = fw_has_capa(&mvm->fw->ucode_capa, 261 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 262 #endif 263 264 /* Tell mac80211 our characteristics */ 265 ieee80211_hw_set(hw, SIGNAL_DBM); 266 ieee80211_hw_set(hw, SPECTRUM_MGMT); 267 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 268 ieee80211_hw_set(hw, WANT_MONITOR_VIF); 269 ieee80211_hw_set(hw, SUPPORTS_PS); 270 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 271 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 272 ieee80211_hw_set(hw, TIMING_BEACON_ONLY); 273 ieee80211_hw_set(hw, CONNECTION_MONITOR); 274 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 275 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 276 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 277 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 278 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); 279 ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); 280 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); 281 ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); 282 ieee80211_hw_set(hw, STA_MMPDU_TXQ); 283 /* 284 * On older devices, enabling TX A-MSDU occasionally leads to 285 * something getting messed up, the command read from the FIFO 286 * gets out of sync and isn't a TX command, so that we have an 287 * assert EDC. 288 * 289 * It's not clear where the bug is, but since we didn't used to 290 * support A-MSDU until moving the mac80211 iTXQs, just leave it 291 * for older devices. We also don't see this issue on any newer 292 * devices. 293 */ 294 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) 295 ieee80211_hw_set(hw, TX_AMSDU); 296 ieee80211_hw_set(hw, TX_FRAG_LIST); 297 298 if (iwl_mvm_has_tlc_offload(mvm)) { 299 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); 300 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 301 } 302 303 if (iwl_mvm_has_new_rx_api(mvm)) 304 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 305 306 if (fw_has_capa(&mvm->fw->ucode_capa, 307 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { 308 ieee80211_hw_set(hw, AP_LINK_PS); 309 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { 310 /* 311 * we absolutely need this for the new TX API since that comes 312 * with many more queues than the current code can deal with 313 * for station powersave 314 */ 315 return -EINVAL; 316 } 317 318 if (mvm->trans->num_rx_queues > 1) 319 ieee80211_hw_set(hw, USES_RSS); 320 321 if (mvm->trans->max_skb_frags) 322 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; 323 324 hw->queues = IEEE80211_NUM_ACS; 325 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 326 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 327 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 328 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 329 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; 330 331 hw->radiotap_timestamp.units_pos = 332 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | 333 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; 334 /* this is the case for CCK frames, it's better (only 8) for OFDM */ 335 hw->radiotap_timestamp.accuracy = 22; 336 337 if (!iwl_mvm_has_tlc_offload(mvm)) 338 hw->rate_control_algorithm = RS_NAME; 339 340 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; 341 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 342 hw->max_tx_fragments = mvm->trans->max_skb_frags; 343 344 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); 345 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); 346 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); 347 hw->wiphy->cipher_suites = mvm->ciphers; 348 349 if (iwl_mvm_has_new_rx_api(mvm)) { 350 mvm->ciphers[hw->wiphy->n_cipher_suites] = 351 WLAN_CIPHER_SUITE_GCMP; 352 hw->wiphy->n_cipher_suites++; 353 mvm->ciphers[hw->wiphy->n_cipher_suites] = 354 WLAN_CIPHER_SUITE_GCMP_256; 355 hw->wiphy->n_cipher_suites++; 356 } 357 358 if (iwlwifi_mod_params.swcrypto) 359 IWL_ERR(mvm, 360 "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); 361 if (!iwlwifi_mod_params.bt_coex_active) 362 IWL_ERR(mvm, 363 "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); 364 365 ieee80211_hw_set(hw, MFP_CAPABLE); 366 mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; 367 hw->wiphy->n_cipher_suites++; 368 if (iwl_mvm_has_new_rx_api(mvm)) { 369 mvm->ciphers[hw->wiphy->n_cipher_suites] = 370 WLAN_CIPHER_SUITE_BIP_GMAC_128; 371 hw->wiphy->n_cipher_suites++; 372 mvm->ciphers[hw->wiphy->n_cipher_suites] = 373 WLAN_CIPHER_SUITE_BIP_GMAC_256; 374 hw->wiphy->n_cipher_suites++; 375 } 376 377 wiphy_ext_feature_set(hw->wiphy, 378 NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); 379 380 if (fw_has_capa(&mvm->fw->ucode_capa, 381 IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { 382 wiphy_ext_feature_set(hw->wiphy, 383 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); 384 hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; 385 } 386 387 if (fw_has_capa(&mvm->fw->ucode_capa, 388 IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) 389 wiphy_ext_feature_set(hw->wiphy, 390 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT); 391 392 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 393 hw->wiphy->features |= 394 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | 395 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | 396 NL80211_FEATURE_ND_RANDOM_MAC_ADDR; 397 398 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 399 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 400 hw->chanctx_data_size = sizeof(u16); 401 hw->txq_data_size = sizeof(struct iwl_mvm_txq); 402 403 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 404 BIT(NL80211_IFTYPE_P2P_CLIENT) | 405 BIT(NL80211_IFTYPE_AP) | 406 BIT(NL80211_IFTYPE_P2P_GO) | 407 BIT(NL80211_IFTYPE_P2P_DEVICE) | 408 BIT(NL80211_IFTYPE_ADHOC); 409 410 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 411 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 412 413 /* The new Tx API does not allow to pass the key or keyid of a MPDU to 414 * the hw, preventing us to control which key(id) to use per MPDU. 415 * Till that's fixed we can't use Extended Key ID for the newer cards. 416 */ 417 if (!iwl_mvm_has_new_tx_api(mvm)) 418 wiphy_ext_feature_set(hw->wiphy, 419 NL80211_EXT_FEATURE_EXT_KEY_ID); 420 hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; 421 422 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 423 if (iwl_mvm_is_lar_supported(mvm)) 424 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 425 else 426 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 427 REGULATORY_DISABLE_BEACON_HINTS; 428 429 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 430 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 431 hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; 432 433 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 434 hw->wiphy->n_iface_combinations = 435 ARRAY_SIZE(iwl_mvm_iface_combinations); 436 437 hw->wiphy->max_remain_on_channel_duration = 10000; 438 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 439 440 /* Extract MAC address */ 441 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 442 hw->wiphy->addresses = mvm->addresses; 443 hw->wiphy->n_addresses = 1; 444 445 /* Extract additional MAC addresses if available */ 446 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? 447 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; 448 449 for (i = 1; i < num_mac; i++) { 450 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, 451 ETH_ALEN); 452 mvm->addresses[i].addr[5]++; 453 hw->wiphy->n_addresses++; 454 } 455 456 iwl_mvm_reset_phy_ctxts(mvm); 457 458 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); 459 460 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 461 462 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); 463 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || 464 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); 465 466 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 467 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; 468 else 469 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; 470 471 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) 472 hw->wiphy->bands[NL80211_BAND_2GHZ] = 473 &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 474 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { 475 hw->wiphy->bands[NL80211_BAND_5GHZ] = 476 &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 477 478 if (fw_has_capa(&mvm->fw->ucode_capa, 479 IWL_UCODE_TLV_CAPA_BEAMFORMER) && 480 fw_has_api(&mvm->fw->ucode_capa, 481 IWL_UCODE_TLV_API_LQ_SS_PARAMS)) 482 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= 483 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 484 } 485 if (fw_has_capa(&mvm->fw->ucode_capa, 486 IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) && 487 mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels) 488 hw->wiphy->bands[NL80211_BAND_6GHZ] = 489 &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; 490 491 hw->wiphy->hw_version = mvm->trans->hw_id; 492 493 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 494 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 495 else 496 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 497 498 hw->wiphy->max_sched_scan_reqs = 1; 499 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 500 hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); 501 /* we create the 802.11 header and zero length SSID IE. */ 502 hw->wiphy->max_sched_scan_ie_len = 503 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; 504 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; 505 hw->wiphy->max_sched_scan_plan_interval = U16_MAX; 506 507 /* 508 * the firmware uses u8 for num of iterations, but 0xff is saved for 509 * infinite loop, so the maximum number of iterations is actually 254. 510 */ 511 hw->wiphy->max_sched_scan_plan_iterations = 254; 512 513 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 514 NL80211_FEATURE_LOW_PRIORITY_SCAN | 515 NL80211_FEATURE_P2P_GO_OPPPS | 516 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 517 NL80211_FEATURE_DYNAMIC_SMPS | 518 NL80211_FEATURE_STATIC_SMPS | 519 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 520 521 if (fw_has_capa(&mvm->fw->ucode_capa, 522 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) 523 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; 524 if (fw_has_capa(&mvm->fw->ucode_capa, 525 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) 526 hw->wiphy->features |= NL80211_FEATURE_QUIET; 527 528 if (fw_has_capa(&mvm->fw->ucode_capa, 529 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) 530 hw->wiphy->features |= 531 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; 532 533 if (fw_has_capa(&mvm->fw->ucode_capa, 534 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 535 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; 536 537 if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, 538 IWL_FW_CMD_VER_UNKNOWN) == 3) 539 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; 540 541 if (fw_has_api(&mvm->fw->ucode_capa, 542 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { 543 wiphy_ext_feature_set(hw->wiphy, 544 NL80211_EXT_FEATURE_SCAN_START_TIME); 545 wiphy_ext_feature_set(hw->wiphy, 546 NL80211_EXT_FEATURE_BSS_PARENT_TSF); 547 } 548 549 if (iwl_mvm_is_oce_supported(mvm)) { 550 u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0); 551 552 wiphy_ext_feature_set(hw->wiphy, 553 NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); 554 wiphy_ext_feature_set(hw->wiphy, 555 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); 556 wiphy_ext_feature_set(hw->wiphy, 557 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); 558 559 /* Old firmware also supports probe deferral and suppression */ 560 if (scan_ver < 15) 561 wiphy_ext_feature_set(hw->wiphy, 562 NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); 563 } 564 565 if (mvm->nvm_data->sku_cap_11ax_enable && 566 !iwlwifi_mod_params.disable_11ax) { 567 hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; 568 hw->wiphy->num_iftype_ext_capab = 569 ARRAY_SIZE(he_iftypes_ext_capa); 570 571 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); 572 ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); 573 } 574 575 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 576 577 #ifdef CONFIG_PM_SLEEP 578 if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && 579 mvm->trans->ops->d3_suspend && 580 mvm->trans->ops->d3_resume && 581 device_can_wakeup(mvm->trans->dev)) { 582 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | 583 WIPHY_WOWLAN_DISCONNECT | 584 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 585 WIPHY_WOWLAN_RFKILL_RELEASE | 586 WIPHY_WOWLAN_NET_DETECT; 587 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 588 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 589 WIPHY_WOWLAN_4WAY_HANDSHAKE; 590 591 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 592 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 593 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 594 mvm->wowlan.max_nd_match_sets = 595 iwl_umac_scan_get_max_profiles(mvm->fw); 596 hw->wiphy->wowlan = &mvm->wowlan; 597 } 598 #endif 599 600 ret = iwl_mvm_leds_init(mvm); 601 if (ret) 602 return ret; 603 604 if (fw_has_capa(&mvm->fw->ucode_capa, 605 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { 606 IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); 607 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 608 ieee80211_hw_set(hw, TDLS_WIDER_BW); 609 } 610 611 if (fw_has_capa(&mvm->fw->ucode_capa, 612 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { 613 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); 614 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; 615 } 616 617 hw->netdev_features |= mvm->cfg->features; 618 if (!iwl_mvm_is_csum_supported(mvm)) 619 hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; 620 621 if (mvm->cfg->vht_mu_mimo_supported) 622 wiphy_ext_feature_set(hw->wiphy, 623 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); 624 625 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) 626 wiphy_ext_feature_set(hw->wiphy, 627 NL80211_EXT_FEATURE_PROTECTED_TWT); 628 629 iwl_mvm_vendor_cmds_register(mvm); 630 631 hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); 632 hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); 633 634 ret = ieee80211_register_hw(mvm->hw); 635 if (ret) { 636 iwl_mvm_leds_exit(mvm); 637 } 638 639 return ret; 640 } 641 642 static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, 643 struct ieee80211_sta *sta) 644 { 645 if (likely(sta)) { 646 if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) 647 return; 648 } else { 649 if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) 650 return; 651 } 652 653 ieee80211_free_txskb(mvm->hw, skb); 654 } 655 656 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 657 struct ieee80211_tx_control *control, 658 struct sk_buff *skb) 659 { 660 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 661 struct ieee80211_sta *sta = control->sta; 662 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 663 struct ieee80211_hdr *hdr = (void *)skb->data; 664 bool offchannel = IEEE80211_SKB_CB(skb)->flags & 665 IEEE80211_TX_CTL_TX_OFFCHAN; 666 667 if (iwl_mvm_is_radio_killed(mvm)) { 668 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); 669 goto drop; 670 } 671 672 if (offchannel && 673 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && 674 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) 675 goto drop; 676 677 /* 678 * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs 679 * so we treat the others as broadcast 680 */ 681 if (ieee80211_is_mgmt(hdr->frame_control)) 682 sta = NULL; 683 684 /* If there is no sta, and it's not offchannel - send through AP */ 685 if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && 686 !offchannel) { 687 struct iwl_mvm_vif *mvmvif = 688 iwl_mvm_vif_from_mac80211(info->control.vif); 689 u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); 690 691 if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { 692 /* mac80211 holds rcu read lock */ 693 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); 694 if (IS_ERR_OR_NULL(sta)) 695 goto drop; 696 } 697 } 698 699 iwl_mvm_tx_skb(mvm, skb, sta); 700 return; 701 drop: 702 ieee80211_free_txskb(hw, skb); 703 } 704 705 void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 706 { 707 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 708 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 709 struct sk_buff *skb = NULL; 710 711 /* 712 * No need for threads to be pending here, they can leave the first 713 * taker all the work. 714 * 715 * mvmtxq->tx_request logic: 716 * 717 * If 0, no one is currently TXing, set to 1 to indicate current thread 718 * will now start TX and other threads should quit. 719 * 720 * If 1, another thread is currently TXing, set to 2 to indicate to 721 * that thread that there was another request. Since that request may 722 * have raced with the check whether the queue is empty, the TXing 723 * thread should check the queue's status one more time before leaving. 724 * This check is done in order to not leave any TX hanging in the queue 725 * until the next TX invocation (which may not even happen). 726 * 727 * If 2, another thread is currently TXing, and it will already double 728 * check the queue, so do nothing. 729 */ 730 if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) 731 return; 732 733 rcu_read_lock(); 734 do { 735 while (likely(!mvmtxq->stopped && 736 !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { 737 skb = ieee80211_tx_dequeue(hw, txq); 738 739 if (!skb) { 740 if (txq->sta) 741 IWL_DEBUG_TX(mvm, 742 "TXQ of sta %pM tid %d is now empty\n", 743 txq->sta->addr, 744 txq->tid); 745 break; 746 } 747 748 iwl_mvm_tx_skb(mvm, skb, txq->sta); 749 } 750 } while (atomic_dec_return(&mvmtxq->tx_request)); 751 rcu_read_unlock(); 752 } 753 754 static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, 755 struct ieee80211_txq *txq) 756 { 757 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 758 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 759 760 /* 761 * Please note that racing is handled very carefully here: 762 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is 763 * deleted afterwards. 764 * This means that if: 765 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): 766 * queue is allocated and we can TX. 767 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): 768 * a race, should defer the frame. 769 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): 770 * need to allocate the queue and defer the frame. 771 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): 772 * queue is already scheduled for allocation, no need to allocate, 773 * should defer the frame. 774 */ 775 776 /* If the queue is allocated TX and return. */ 777 if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { 778 /* 779 * Check that list is empty to avoid a race where txq_id is 780 * already updated, but the queue allocation work wasn't 781 * finished 782 */ 783 if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) 784 return; 785 786 iwl_mvm_mac_itxq_xmit(hw, txq); 787 return; 788 } 789 790 /* The list is being deleted only after the queue is fully allocated. */ 791 if (!list_empty(&mvmtxq->list)) 792 return; 793 794 list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); 795 schedule_work(&mvm->add_stream_wk); 796 } 797 798 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ 799 do { \ 800 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ 801 break; \ 802 iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ 803 } while (0) 804 805 static void 806 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 807 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, 808 enum ieee80211_ampdu_mlme_action action) 809 { 810 struct iwl_fw_dbg_trigger_tlv *trig; 811 struct iwl_fw_dbg_trigger_ba *ba_trig; 812 813 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 814 FW_DBG_TRIGGER_BA); 815 if (!trig) 816 return; 817 818 ba_trig = (void *)trig->data; 819 820 switch (action) { 821 case IEEE80211_AMPDU_TX_OPERATIONAL: { 822 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 823 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 824 825 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, 826 "TX AGG START: MAC %pM tid %d ssn %d\n", 827 sta->addr, tid, tid_data->ssn); 828 break; 829 } 830 case IEEE80211_AMPDU_TX_STOP_CONT: 831 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, 832 "TX AGG STOP: MAC %pM tid %d\n", 833 sta->addr, tid); 834 break; 835 case IEEE80211_AMPDU_RX_START: 836 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, 837 "RX AGG START: MAC %pM tid %d ssn %d\n", 838 sta->addr, tid, rx_ba_ssn); 839 break; 840 case IEEE80211_AMPDU_RX_STOP: 841 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, 842 "RX AGG STOP: MAC %pM tid %d\n", 843 sta->addr, tid); 844 break; 845 default: 846 break; 847 } 848 } 849 850 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 851 struct ieee80211_vif *vif, 852 struct ieee80211_ampdu_params *params) 853 { 854 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 855 int ret; 856 struct ieee80211_sta *sta = params->sta; 857 enum ieee80211_ampdu_mlme_action action = params->action; 858 u16 tid = params->tid; 859 u16 *ssn = ¶ms->ssn; 860 u16 buf_size = params->buf_size; 861 bool amsdu = params->amsdu; 862 u16 timeout = params->timeout; 863 864 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 865 sta->addr, tid, action); 866 867 if (!(mvm->nvm_data->sku_cap_11n_enable)) 868 return -EACCES; 869 870 mutex_lock(&mvm->mutex); 871 872 switch (action) { 873 case IEEE80211_AMPDU_RX_START: 874 if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == 875 iwl_mvm_sta_from_mac80211(sta)->sta_id) { 876 struct iwl_mvm_vif *mvmvif; 877 u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; 878 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; 879 880 mdata->opened_rx_ba_sessions = true; 881 mvmvif = iwl_mvm_vif_from_mac80211(vif); 882 cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); 883 } 884 if (!iwl_enable_rx_ampdu()) { 885 ret = -EINVAL; 886 break; 887 } 888 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, 889 timeout); 890 break; 891 case IEEE80211_AMPDU_RX_STOP: 892 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, 893 timeout); 894 break; 895 case IEEE80211_AMPDU_TX_START: 896 if (!iwl_enable_tx_ampdu()) { 897 ret = -EINVAL; 898 break; 899 } 900 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 901 break; 902 case IEEE80211_AMPDU_TX_STOP_CONT: 903 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 904 break; 905 case IEEE80211_AMPDU_TX_STOP_FLUSH: 906 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 907 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); 908 break; 909 case IEEE80211_AMPDU_TX_OPERATIONAL: 910 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, 911 buf_size, amsdu); 912 break; 913 default: 914 WARN_ON_ONCE(1); 915 ret = -EINVAL; 916 break; 917 } 918 919 if (!ret) { 920 u16 rx_ba_ssn = 0; 921 922 if (action == IEEE80211_AMPDU_RX_START) 923 rx_ba_ssn = *ssn; 924 925 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, 926 rx_ba_ssn, action); 927 } 928 mutex_unlock(&mvm->mutex); 929 930 return ret; 931 } 932 933 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, 934 struct ieee80211_vif *vif) 935 { 936 struct iwl_mvm *mvm = data; 937 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 938 939 mvmvif->uploaded = false; 940 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 941 942 spin_lock_bh(&mvm->time_event_lock); 943 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 944 spin_unlock_bh(&mvm->time_event_lock); 945 946 mvmvif->phy_ctxt = NULL; 947 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); 948 memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); 949 } 950 951 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 952 { 953 iwl_mvm_stop_device(mvm); 954 955 mvm->cur_aid = 0; 956 957 mvm->scan_status = 0; 958 mvm->ps_disabled = false; 959 mvm->rfkill_safe_init_done = false; 960 961 /* just in case one was running */ 962 iwl_mvm_cleanup_roc_te(mvm); 963 ieee80211_remain_on_channel_expired(mvm->hw); 964 965 iwl_mvm_ftm_restart(mvm); 966 967 /* 968 * cleanup all interfaces, even inactive ones, as some might have 969 * gone down during the HW restart 970 */ 971 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); 972 973 mvm->p2p_device_vif = NULL; 974 975 iwl_mvm_reset_phy_ctxts(mvm); 976 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 977 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 978 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); 979 980 ieee80211_wake_queues(mvm->hw); 981 982 mvm->rx_ba_sessions = 0; 983 mvm->fwrt.dump.conf = FW_DBG_INVALID; 984 mvm->monitor_on = false; 985 986 /* keep statistics ticking */ 987 iwl_mvm_accu_radio_stats(mvm); 988 } 989 990 int __iwl_mvm_mac_start(struct iwl_mvm *mvm) 991 { 992 int ret; 993 994 lockdep_assert_held(&mvm->mutex); 995 996 ret = iwl_mvm_mei_get_ownership(mvm); 997 if (ret) 998 return ret; 999 1000 if (mvm->mei_nvm_data) { 1001 /* We got the NIC, we can now free the MEI NVM data */ 1002 kfree(mvm->mei_nvm_data); 1003 mvm->mei_nvm_data = NULL; 1004 1005 /* 1006 * We can't free the nvm_data we allocated based on the SAP 1007 * data because we registered to cfg80211 with the channels 1008 * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data 1009 * just in order to be able free it later. 1010 * NULLify nvm_data so that we will read the NVM from the 1011 * firmware this time. 1012 */ 1013 mvm->temp_nvm_data = mvm->nvm_data; 1014 mvm->nvm_data = NULL; 1015 } 1016 1017 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { 1018 /* 1019 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART 1020 * so later code will - from now on - see that we're doing it. 1021 */ 1022 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1023 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1024 /* Clean up some internal and mac80211 state on restart */ 1025 iwl_mvm_restart_cleanup(mvm); 1026 } 1027 ret = iwl_mvm_up(mvm); 1028 1029 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, 1030 NULL); 1031 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, 1032 NULL); 1033 1034 mvm->last_reset_or_resume_time_jiffies = jiffies; 1035 1036 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1037 /* Something went wrong - we need to finish some cleanup 1038 * that normally iwl_mvm_mac_restart_complete() below 1039 * would do. 1040 */ 1041 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1042 } 1043 1044 return ret; 1045 } 1046 1047 static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 1048 { 1049 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1050 int ret; 1051 int retry, max_retry = 0; 1052 1053 mutex_lock(&mvm->mutex); 1054 1055 /* we are starting the mac not in error flow, and restart is enabled */ 1056 if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && 1057 iwlwifi_mod_params.fw_restart) { 1058 max_retry = IWL_MAX_INIT_RETRY; 1059 /* 1060 * This will prevent mac80211 recovery flows to trigger during 1061 * init failures 1062 */ 1063 set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); 1064 } 1065 1066 for (retry = 0; retry <= max_retry; retry++) { 1067 ret = __iwl_mvm_mac_start(mvm); 1068 if (!ret) 1069 break; 1070 1071 /* 1072 * In PLDR sync PCI re-enumeration is needed. no point to retry 1073 * mac start before that. 1074 */ 1075 if (mvm->pldr_sync) { 1076 iwl_mei_alive_notif(false); 1077 iwl_trans_pcie_remove(mvm->trans, true); 1078 break; 1079 } 1080 1081 IWL_ERR(mvm, "mac start retry %d\n", retry); 1082 } 1083 clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); 1084 1085 mutex_unlock(&mvm->mutex); 1086 1087 iwl_mvm_mei_set_sw_rfkill_state(mvm); 1088 1089 return ret; 1090 } 1091 1092 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) 1093 { 1094 int ret; 1095 1096 mutex_lock(&mvm->mutex); 1097 1098 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1099 1100 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1101 if (ret) 1102 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1103 ret); 1104 1105 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); 1106 1107 /* 1108 * If we have TDLS peers, remove them. We don't know the last seqno/PN 1109 * of packets the FW sent out, so we must reconnect. 1110 */ 1111 iwl_mvm_teardown_tdls_peers(mvm); 1112 1113 mutex_unlock(&mvm->mutex); 1114 } 1115 1116 static void 1117 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, 1118 enum ieee80211_reconfig_type reconfig_type) 1119 { 1120 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1121 1122 switch (reconfig_type) { 1123 case IEEE80211_RECONFIG_TYPE_RESTART: 1124 iwl_mvm_restart_complete(mvm); 1125 break; 1126 case IEEE80211_RECONFIG_TYPE_SUSPEND: 1127 break; 1128 } 1129 } 1130 1131 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) 1132 { 1133 lockdep_assert_held(&mvm->mutex); 1134 1135 iwl_mvm_ftm_initiator_smooth_stop(mvm); 1136 1137 /* firmware counters are obviously reset now, but we shouldn't 1138 * partially track so also clear the fw_reset_accu counters. 1139 */ 1140 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); 1141 1142 /* async_handlers_wk is now blocked */ 1143 1144 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) 1145 iwl_mvm_rm_aux_sta(mvm); 1146 1147 iwl_mvm_stop_device(mvm); 1148 1149 iwl_mvm_async_handlers_purge(mvm); 1150 /* async_handlers_list is empty and will stay empty: HW is stopped */ 1151 1152 /* 1153 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the 1154 * hw (as restart_complete() won't be called in this case) and mac80211 1155 * won't execute the restart. 1156 * But make sure to cleanup interfaces that have gone down before/during 1157 * HW restart was requested. 1158 */ 1159 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 1160 test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 1161 &mvm->status)) 1162 ieee80211_iterate_interfaces(mvm->hw, 0, 1163 iwl_mvm_cleanup_iterator, mvm); 1164 1165 /* We shouldn't have any UIDs still set. Loop over all the UIDs to 1166 * make sure there's nothing left there and warn if any is found. 1167 */ 1168 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1169 int i; 1170 1171 for (i = 0; i < mvm->max_scans; i++) { 1172 if (WARN_ONCE(mvm->scan_uid_status[i], 1173 "UMAC scan UID %d status was not cleaned\n", 1174 i)) 1175 mvm->scan_uid_status[i] = 0; 1176 } 1177 } 1178 } 1179 1180 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) 1181 { 1182 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1183 1184 flush_work(&mvm->async_handlers_wk); 1185 flush_work(&mvm->add_stream_wk); 1186 1187 /* 1188 * Lock and clear the firmware running bit here already, so that 1189 * new commands coming in elsewhere, e.g. from debugfs, will not 1190 * be able to proceed. This is important here because one of those 1191 * debugfs files causes the firmware dump to be triggered, and if we 1192 * don't stop debugfs accesses before canceling that it could be 1193 * retriggered after we flush it but before we've cleared the bit. 1194 */ 1195 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1196 1197 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); 1198 cancel_delayed_work_sync(&mvm->scan_timeout_dwork); 1199 1200 /* 1201 * The work item could be running or queued if the 1202 * ROC time event stops just as we get here. 1203 */ 1204 flush_work(&mvm->roc_done_wk); 1205 1206 iwl_mvm_mei_set_sw_rfkill_state(mvm); 1207 1208 mutex_lock(&mvm->mutex); 1209 __iwl_mvm_mac_stop(mvm); 1210 mutex_unlock(&mvm->mutex); 1211 1212 /* 1213 * The worker might have been waiting for the mutex, let it run and 1214 * discover that its list is now empty. 1215 */ 1216 cancel_work_sync(&mvm->async_handlers_wk); 1217 } 1218 1219 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) 1220 { 1221 u16 i; 1222 1223 lockdep_assert_held(&mvm->mutex); 1224 1225 for (i = 0; i < NUM_PHY_CTX; i++) 1226 if (!mvm->phy_ctxts[i].ref) 1227 return &mvm->phy_ctxts[i]; 1228 1229 IWL_ERR(mvm, "No available PHY context\n"); 1230 return NULL; 1231 } 1232 1233 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1234 s16 tx_power) 1235 { 1236 u32 cmd_id = REDUCE_TX_POWER_CMD; 1237 int len; 1238 struct iwl_dev_tx_power_cmd cmd = { 1239 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), 1240 .common.mac_context_id = 1241 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), 1242 .common.pwr_restriction = cpu_to_le16(8 * tx_power), 1243 }; 1244 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1245 IWL_FW_CMD_VER_UNKNOWN); 1246 1247 if (tx_power == IWL_DEFAULT_MAX_TX_POWER) 1248 cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); 1249 1250 if (cmd_ver == 7) 1251 len = sizeof(cmd.v7); 1252 else if (cmd_ver == 6) 1253 len = sizeof(cmd.v6); 1254 else if (fw_has_api(&mvm->fw->ucode_capa, 1255 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) 1256 len = sizeof(cmd.v5); 1257 else if (fw_has_capa(&mvm->fw->ucode_capa, 1258 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 1259 len = sizeof(cmd.v4); 1260 else 1261 len = sizeof(cmd.v3); 1262 1263 /* all structs have the same common part, add it */ 1264 len += sizeof(cmd.common); 1265 1266 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); 1267 } 1268 1269 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, 1270 struct ieee80211_vif *vif) 1271 { 1272 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1273 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1274 int ret; 1275 1276 mutex_lock(&mvm->mutex); 1277 1278 if (vif->type == NL80211_IFTYPE_STATION) { 1279 struct iwl_mvm_sta *mvmsta; 1280 1281 mvmvif->csa_bcn_pending = false; 1282 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, 1283 mvmvif->ap_sta_id); 1284 1285 if (WARN_ON(!mvmsta)) { 1286 ret = -EIO; 1287 goto out_unlock; 1288 } 1289 1290 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 1291 1292 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1293 1294 if (!fw_has_capa(&mvm->fw->ucode_capa, 1295 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 1296 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 1297 if (ret) 1298 goto out_unlock; 1299 1300 iwl_mvm_stop_session_protection(mvm, vif); 1301 } 1302 } 1303 1304 mvmvif->ps_disabled = false; 1305 1306 ret = iwl_mvm_power_update_ps(mvm); 1307 1308 out_unlock: 1309 if (mvmvif->csa_failed) 1310 ret = -EIO; 1311 mutex_unlock(&mvm->mutex); 1312 1313 return ret; 1314 } 1315 1316 static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, 1317 struct ieee80211_vif *vif) 1318 { 1319 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1320 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1321 struct iwl_chan_switch_te_cmd cmd = { 1322 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 1323 mvmvif->color)), 1324 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), 1325 }; 1326 1327 /* 1328 * In the new flow since FW is in charge of the timing, 1329 * if driver has canceled the channel switch he will receive the 1330 * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it 1331 */ 1332 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 1333 CHANNEL_SWITCH_ERROR_NOTIF, 0)) 1334 return; 1335 1336 IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); 1337 1338 mutex_lock(&mvm->mutex); 1339 if (!fw_has_capa(&mvm->fw->ucode_capa, 1340 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) 1341 iwl_mvm_remove_csa_period(mvm, vif); 1342 else 1343 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, 1344 WIDE_ID(MAC_CONF_GROUP, 1345 CHANNEL_SWITCH_TIME_EVENT_CMD), 1346 0, sizeof(cmd), &cmd)); 1347 mvmvif->csa_failed = true; 1348 mutex_unlock(&mvm->mutex); 1349 1350 iwl_mvm_post_channel_switch(hw, vif); 1351 } 1352 1353 static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) 1354 { 1355 struct iwl_mvm_vif *mvmvif; 1356 struct ieee80211_vif *vif; 1357 1358 mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); 1359 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); 1360 1361 /* Trigger disconnect (should clear the CSA state) */ 1362 ieee80211_chswitch_done(vif, false); 1363 } 1364 1365 static u8 1366 iwl_mvm_chandef_get_primary_80(struct cfg80211_chan_def *chandef) 1367 { 1368 int data_start; 1369 int control_start; 1370 int bw; 1371 1372 if (chandef->width == NL80211_CHAN_WIDTH_320) 1373 bw = 320; 1374 else if (chandef->width == NL80211_CHAN_WIDTH_160) 1375 bw = 160; 1376 else 1377 return 0; 1378 1379 /* data is bw wide so the start is half the width */ 1380 data_start = chandef->center_freq1 - bw / 2; 1381 /* control is 20Mhz width */ 1382 control_start = chandef->chan->center_freq - 10; 1383 1384 return (control_start - data_start) / 80; 1385 } 1386 1387 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 1388 struct ieee80211_vif *vif) 1389 { 1390 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1391 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1392 int ret; 1393 1394 mvmvif->mvm = mvm; 1395 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1396 1397 /* 1398 * Not much to do here. The stack will not allow interface 1399 * types or combinations that we didn't advertise, so we 1400 * don't really have to check the types. 1401 */ 1402 1403 mutex_lock(&mvm->mutex); 1404 1405 /* make sure that beacon statistics don't go backwards with FW reset */ 1406 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1407 mvmvif->beacon_stats.accu_num_beacons += 1408 mvmvif->beacon_stats.num_beacons; 1409 1410 /* Allocate resources for the MAC context, and add it to the fw */ 1411 ret = iwl_mvm_mac_ctxt_init(mvm, vif); 1412 if (ret) 1413 goto out_unlock; 1414 1415 rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); 1416 1417 /* 1418 * The AP binding flow can be done only after the beacon 1419 * template is configured (which happens only in the mac80211 1420 * start_ap() flow), and adding the broadcast station can happen 1421 * only after the binding. 1422 * In addition, since modifying the MAC before adding a bcast 1423 * station is not allowed by the FW, delay the adding of MAC context to 1424 * the point where we can also add the bcast station. 1425 * In short: there's not much we can do at this point, other than 1426 * allocating resources :) 1427 */ 1428 if (vif->type == NL80211_IFTYPE_AP || 1429 vif->type == NL80211_IFTYPE_ADHOC) { 1430 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1431 if (ret) { 1432 IWL_ERR(mvm, "Failed to allocate bcast sta\n"); 1433 goto out_unlock; 1434 } 1435 1436 /* 1437 * Only queue for this station is the mcast queue, 1438 * which shouldn't be in TFD mask anyway 1439 */ 1440 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 1441 0, vif->type, 1442 IWL_STA_MULTICAST); 1443 if (ret) 1444 goto out_unlock; 1445 1446 iwl_mvm_vif_dbgfs_register(mvm, vif); 1447 goto out_unlock; 1448 } 1449 1450 mvmvif->features |= hw->netdev_features; 1451 1452 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 1453 if (ret) 1454 goto out_unlock; 1455 1456 ret = iwl_mvm_power_update_mac(mvm); 1457 if (ret) 1458 goto out_remove_mac; 1459 1460 /* beacon filtering */ 1461 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 1462 if (ret) 1463 goto out_remove_mac; 1464 1465 if (!mvm->bf_allowed_vif && 1466 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { 1467 mvm->bf_allowed_vif = mvmvif; 1468 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 1469 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 1470 } 1471 1472 /* 1473 * P2P_DEVICE interface does not have a channel context assigned to it, 1474 * so a dedicated PHY context is allocated to it and the corresponding 1475 * MAC context is bound to it at this stage. 1476 */ 1477 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1478 1479 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 1480 if (!mvmvif->phy_ctxt) { 1481 ret = -ENOSPC; 1482 goto out_free_bf; 1483 } 1484 1485 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 1486 ret = iwl_mvm_binding_add_vif(mvm, vif); 1487 if (ret) 1488 goto out_unref_phy; 1489 1490 ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); 1491 if (ret) 1492 goto out_unbind; 1493 1494 /* Save a pointer to p2p device vif, so it can later be used to 1495 * update the p2p device MAC when a GO is started/stopped */ 1496 mvm->p2p_device_vif = vif; 1497 } 1498 1499 iwl_mvm_tcm_add_vif(mvm, vif); 1500 INIT_DELAYED_WORK(&mvmvif->csa_work, 1501 iwl_mvm_channel_switch_disconnect_wk); 1502 1503 if (vif->type == NL80211_IFTYPE_MONITOR) { 1504 mvm->monitor_on = true; 1505 mvm->monitor_p80 = 1506 iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef); 1507 } 1508 1509 iwl_mvm_vif_dbgfs_register(mvm, vif); 1510 1511 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 1512 vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 1513 !mvm->csme_vif && mvm->mei_registered) { 1514 iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); 1515 iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); 1516 mvm->csme_vif = vif; 1517 } 1518 1519 goto out_unlock; 1520 1521 out_unbind: 1522 iwl_mvm_binding_remove_vif(mvm, vif); 1523 out_unref_phy: 1524 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1525 out_free_bf: 1526 if (mvm->bf_allowed_vif == mvmvif) { 1527 mvm->bf_allowed_vif = NULL; 1528 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1529 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1530 } 1531 out_remove_mac: 1532 mvmvif->phy_ctxt = NULL; 1533 iwl_mvm_mac_ctxt_remove(mvm, vif); 1534 out_unlock: 1535 mutex_unlock(&mvm->mutex); 1536 1537 return ret; 1538 } 1539 1540 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, 1541 struct ieee80211_vif *vif) 1542 { 1543 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1544 /* 1545 * Flush the ROC worker which will flush the OFFCHANNEL queue. 1546 * We assume here that all the packets sent to the OFFCHANNEL 1547 * queue are sent in ROC session. 1548 */ 1549 flush_work(&mvm->roc_done_wk); 1550 } 1551 } 1552 1553 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 1554 struct ieee80211_vif *vif) 1555 { 1556 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1557 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1558 struct iwl_probe_resp_data *probe_data; 1559 1560 iwl_mvm_prepare_mac_removal(mvm, vif); 1561 1562 if (!(vif->type == NL80211_IFTYPE_AP || 1563 vif->type == NL80211_IFTYPE_ADHOC)) 1564 iwl_mvm_tcm_rm_vif(mvm, vif); 1565 1566 mutex_lock(&mvm->mutex); 1567 1568 if (vif == mvm->csme_vif) { 1569 iwl_mei_set_netdev(NULL); 1570 mvm->csme_vif = NULL; 1571 } 1572 1573 probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, 1574 lockdep_is_held(&mvm->mutex)); 1575 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1576 if (probe_data) 1577 kfree_rcu(probe_data, rcu_head); 1578 1579 if (mvm->bf_allowed_vif == mvmvif) { 1580 mvm->bf_allowed_vif = NULL; 1581 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1582 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1583 } 1584 1585 if (vif->bss_conf.ftm_responder) 1586 memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); 1587 1588 iwl_mvm_vif_dbgfs_clean(mvm, vif); 1589 1590 /* 1591 * For AP/GO interface, the tear down of the resources allocated to the 1592 * interface is be handled as part of the stop_ap flow. 1593 */ 1594 if (vif->type == NL80211_IFTYPE_AP || 1595 vif->type == NL80211_IFTYPE_ADHOC) { 1596 #ifdef CONFIG_NL80211_TESTMODE 1597 if (vif == mvm->noa_vif) { 1598 mvm->noa_vif = NULL; 1599 mvm->noa_duration = 0; 1600 } 1601 #endif 1602 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); 1603 iwl_mvm_dealloc_bcast_sta(mvm, vif); 1604 goto out_release; 1605 } 1606 1607 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1608 mvm->p2p_device_vif = NULL; 1609 iwl_mvm_rm_p2p_bcast_sta(mvm, vif); 1610 iwl_mvm_binding_remove_vif(mvm, vif); 1611 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1612 mvmvif->phy_ctxt = NULL; 1613 } 1614 1615 iwl_mvm_power_update_mac(mvm); 1616 iwl_mvm_mac_ctxt_remove(mvm, vif); 1617 1618 RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); 1619 1620 if (vif->type == NL80211_IFTYPE_MONITOR) 1621 mvm->monitor_on = false; 1622 1623 out_release: 1624 mutex_unlock(&mvm->mutex); 1625 } 1626 1627 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) 1628 { 1629 return 0; 1630 } 1631 1632 struct iwl_mvm_mc_iter_data { 1633 struct iwl_mvm *mvm; 1634 int port_id; 1635 }; 1636 1637 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, 1638 struct ieee80211_vif *vif) 1639 { 1640 struct iwl_mvm_mc_iter_data *data = _data; 1641 struct iwl_mvm *mvm = data->mvm; 1642 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1643 struct iwl_host_cmd hcmd = { 1644 .id = MCAST_FILTER_CMD, 1645 .flags = CMD_ASYNC, 1646 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1647 }; 1648 int ret, len; 1649 1650 /* if we don't have free ports, mcast frames will be dropped */ 1651 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) 1652 return; 1653 1654 if (vif->type != NL80211_IFTYPE_STATION || 1655 !vif->cfg.assoc) 1656 return; 1657 1658 cmd->port_id = data->port_id++; 1659 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1660 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1661 1662 hcmd.len[0] = len; 1663 hcmd.data[0] = cmd; 1664 1665 ret = iwl_mvm_send_cmd(mvm, &hcmd); 1666 if (ret) 1667 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1668 } 1669 1670 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) 1671 { 1672 struct iwl_mvm_mc_iter_data iter_data = { 1673 .mvm = mvm, 1674 }; 1675 int ret; 1676 1677 lockdep_assert_held(&mvm->mutex); 1678 1679 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1680 return; 1681 1682 ieee80211_iterate_active_interfaces_atomic( 1683 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1684 iwl_mvm_mc_iface_iterator, &iter_data); 1685 1686 /* 1687 * Send a (synchronous) ech command so that we wait for the 1688 * multiple asynchronous MCAST_FILTER_CMD commands sent by 1689 * the interface iterator. Otherwise, we might get here over 1690 * and over again (by userspace just sending a lot of these) 1691 * and the CPU can send them faster than the firmware can 1692 * process them. 1693 * Note that the CPU is still faster - but with this we'll 1694 * actually send fewer commands overall because the CPU will 1695 * not schedule the work in mac80211 as frequently if it's 1696 * still running when rescheduled (possibly multiple times). 1697 */ 1698 ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); 1699 if (ret) 1700 IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); 1701 } 1702 1703 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, 1704 struct netdev_hw_addr_list *mc_list) 1705 { 1706 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1707 struct iwl_mcast_filter_cmd *cmd; 1708 struct netdev_hw_addr *addr; 1709 int addr_count; 1710 bool pass_all; 1711 int len; 1712 1713 addr_count = netdev_hw_addr_list_count(mc_list); 1714 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || 1715 IWL_MVM_FW_MCAST_FILTER_PASS_ALL; 1716 if (pass_all) 1717 addr_count = 0; 1718 1719 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); 1720 cmd = kzalloc(len, GFP_ATOMIC); 1721 if (!cmd) 1722 return 0; 1723 1724 if (pass_all) { 1725 cmd->pass_all = 1; 1726 return (u64)(unsigned long)cmd; 1727 } 1728 1729 netdev_hw_addr_list_for_each(addr, mc_list) { 1730 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", 1731 cmd->count, addr->addr); 1732 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], 1733 addr->addr, ETH_ALEN); 1734 cmd->count++; 1735 } 1736 1737 return (u64)(unsigned long)cmd; 1738 } 1739 1740 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, 1741 unsigned int changed_flags, 1742 unsigned int *total_flags, 1743 u64 multicast) 1744 { 1745 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1746 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; 1747 1748 mutex_lock(&mvm->mutex); 1749 1750 /* replace previous configuration */ 1751 kfree(mvm->mcast_filter_cmd); 1752 mvm->mcast_filter_cmd = cmd; 1753 1754 if (!cmd) 1755 goto out; 1756 1757 if (changed_flags & FIF_ALLMULTI) 1758 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); 1759 1760 if (cmd->pass_all) 1761 cmd->count = 0; 1762 1763 iwl_mvm_recalc_multicast(mvm); 1764 out: 1765 mutex_unlock(&mvm->mutex); 1766 *total_flags = 0; 1767 } 1768 1769 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, 1770 struct ieee80211_vif *vif, 1771 unsigned int filter_flags, 1772 unsigned int changed_flags) 1773 { 1774 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1775 1776 /* We support only filter for probe requests */ 1777 if (!(changed_flags & FIF_PROBE_REQ)) 1778 return; 1779 1780 /* Supported only for p2p client interfaces */ 1781 if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc || 1782 !vif->p2p) 1783 return; 1784 1785 mutex_lock(&mvm->mutex); 1786 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1787 mutex_unlock(&mvm->mutex); 1788 } 1789 1790 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, 1791 struct ieee80211_vif *vif) 1792 { 1793 struct iwl_mu_group_mgmt_cmd cmd = {}; 1794 1795 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, 1796 WLAN_MEMBERSHIP_LEN); 1797 memcpy(cmd.user_position, vif->bss_conf.mu_group.position, 1798 WLAN_USER_POSITION_LEN); 1799 1800 return iwl_mvm_send_cmd_pdu(mvm, 1801 WIDE_ID(DATA_PATH_GROUP, 1802 UPDATE_MU_GROUPS_CMD), 1803 0, sizeof(cmd), &cmd); 1804 } 1805 1806 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, 1807 struct ieee80211_vif *vif) 1808 { 1809 if (vif->bss_conf.mu_mimo_owner) { 1810 struct iwl_mu_group_mgmt_notif *notif = _data; 1811 1812 /* 1813 * MU-MIMO Group Id action frame is little endian. We treat 1814 * the data received from firmware as if it came from the 1815 * action frame, so no conversion is needed. 1816 */ 1817 ieee80211_update_mu_groups(vif, 0, 1818 (u8 *)¬if->membership_status, 1819 (u8 *)¬if->user_position); 1820 } 1821 } 1822 1823 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, 1824 struct iwl_rx_cmd_buffer *rxb) 1825 { 1826 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1827 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; 1828 1829 ieee80211_iterate_active_interfaces_atomic( 1830 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1831 iwl_mvm_mu_mimo_iface_iterator, notif); 1832 } 1833 1834 static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) 1835 { 1836 u8 byte_num = ppe_pos_bit / 8; 1837 u8 bit_num = ppe_pos_bit % 8; 1838 u8 residue_bits; 1839 u8 res; 1840 1841 if (bit_num <= 5) 1842 return (ppe[byte_num] >> bit_num) & 1843 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); 1844 1845 /* 1846 * If bit_num > 5, we have to combine bits with next byte. 1847 * Calculate how many bits we need to take from current byte (called 1848 * here "residue_bits"), and add them to bits from next byte. 1849 */ 1850 1851 residue_bits = 8 - bit_num; 1852 1853 res = (ppe[byte_num + 1] & 1854 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << 1855 residue_bits; 1856 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); 1857 1858 return res; 1859 } 1860 1861 static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, 1862 struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, 1863 u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit, 1864 bool inheritance) 1865 { 1866 int i; 1867 1868 /* 1869 * FW currently supports only nss == MAX_HE_SUPP_NSS 1870 * 1871 * If nss > MAX: we can ignore values we don't support 1872 * If nss < MAX: we can set zeros in other streams 1873 */ 1874 if (nss > MAX_HE_SUPP_NSS) { 1875 IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, 1876 MAX_HE_SUPP_NSS); 1877 nss = MAX_HE_SUPP_NSS; 1878 } 1879 1880 for (i = 0; i < nss; i++) { 1881 u8 ru_index_tmp = ru_index_bitmap << 1; 1882 u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; 1883 u8 bw; 1884 1885 for (bw = 0; 1886 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); 1887 bw++) { 1888 ru_index_tmp >>= 1; 1889 1890 /* 1891 * According to the 11be spec, if for a specific BW the PPE Thresholds 1892 * isn't present - it should inherit the thresholds from the last 1893 * BW for which we had PPE Thresholds. In 11ax though, we don't have 1894 * this inheritance - continue in this case 1895 */ 1896 if (!(ru_index_tmp & 1)) { 1897 if (inheritance) 1898 goto set_thresholds; 1899 else 1900 continue; 1901 } 1902 1903 high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); 1904 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1905 low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); 1906 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1907 1908 set_thresholds: 1909 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; 1910 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; 1911 } 1912 } 1913 } 1914 1915 static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, 1916 struct ieee80211_sta *sta, 1917 struct iwl_he_pkt_ext_v2 *pkt_ext, 1918 bool inheritance) 1919 { 1920 u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; 1921 u8 *ppe = &sta->deflink.he_cap.ppe_thres[0]; 1922 u8 ru_index_bitmap = 1923 u8_get_bits(*ppe, 1924 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); 1925 /* Starting after PPE header */ 1926 u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; 1927 1928 iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit, 1929 inheritance); 1930 } 1931 1932 static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, 1933 u8 nominal_padding, 1934 u32 *flags) 1935 { 1936 int low_th = -1; 1937 int high_th = -1; 1938 int i; 1939 1940 /* all the macros are the same for EHT and HE */ 1941 switch (nominal_padding) { 1942 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US: 1943 low_th = IWL_HE_PKT_EXT_NONE; 1944 high_th = IWL_HE_PKT_EXT_NONE; 1945 break; 1946 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US: 1947 low_th = IWL_HE_PKT_EXT_BPSK; 1948 high_th = IWL_HE_PKT_EXT_NONE; 1949 break; 1950 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US: 1951 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US: 1952 low_th = IWL_HE_PKT_EXT_NONE; 1953 high_th = IWL_HE_PKT_EXT_BPSK; 1954 break; 1955 } 1956 1957 /* Set the PPE thresholds accordingly */ 1958 if (low_th >= 0 && high_th >= 0) { 1959 for (i = 0; i < MAX_HE_SUPP_NSS; i++) { 1960 u8 bw; 1961 1962 for (bw = 0; 1963 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); 1964 bw++) { 1965 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; 1966 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; 1967 } 1968 } 1969 1970 *flags |= STA_CTXT_HE_PACKET_EXT; 1971 } 1972 } 1973 1974 static void iwl_mvm_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext, 1975 u8 nominal_padding) 1976 { 1977 int i; 1978 1979 for (i = 0; i < MAX_HE_SUPP_NSS; i++) { 1980 u8 bw; 1981 1982 for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); 1983 bw++) { 1984 u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0]; 1985 1986 if (nominal_padding > 1987 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US && 1988 qam_th[1] == IWL_HE_PKT_EXT_NONE) 1989 qam_th[1] = IWL_HE_PKT_EXT_4096QAM; 1990 else if (nominal_padding == 1991 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US && 1992 qam_th[0] == IWL_HE_PKT_EXT_NONE && 1993 qam_th[1] == IWL_HE_PKT_EXT_NONE) 1994 qam_th[0] = IWL_HE_PKT_EXT_4096QAM; 1995 } 1996 } 1997 } 1998 1999 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, 2000 struct ieee80211_vif *vif, u8 sta_id) 2001 { 2002 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2003 struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = { 2004 .sta_id = sta_id, 2005 .tid_limit = IWL_MAX_TID_COUNT, 2006 .bss_color = vif->bss_conf.he_bss_color.color, 2007 .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, 2008 .frame_time_rts_th = 2009 cpu_to_le16(vif->bss_conf.frame_time_rts_th), 2010 }; 2011 struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {}; 2012 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD); 2013 u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2); 2014 int size; 2015 struct ieee80211_sta *sta; 2016 u32 flags; 2017 int i; 2018 const struct ieee80211_sta_he_cap *own_he_cap = NULL; 2019 struct ieee80211_chanctx_conf *chanctx_conf; 2020 const struct ieee80211_supported_band *sband; 2021 void *cmd; 2022 u8 nominal_padding; 2023 2024 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) 2025 ver = 1; 2026 2027 switch (ver) { 2028 case 1: 2029 /* same layout as v2 except some data at the end */ 2030 cmd = &sta_ctxt_cmd_v2; 2031 size = sizeof(struct iwl_he_sta_context_cmd_v1); 2032 break; 2033 case 2: 2034 cmd = &sta_ctxt_cmd_v2; 2035 size = sizeof(struct iwl_he_sta_context_cmd_v2); 2036 break; 2037 case 3: 2038 cmd = &sta_ctxt_cmd; 2039 size = sizeof(struct iwl_he_sta_context_cmd_v3); 2040 break; 2041 default: 2042 IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver); 2043 return; 2044 } 2045 2046 rcu_read_lock(); 2047 2048 chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); 2049 if (WARN_ON(!chanctx_conf)) { 2050 rcu_read_unlock(); 2051 return; 2052 } 2053 2054 sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; 2055 own_he_cap = ieee80211_get_he_iftype_cap(sband, 2056 ieee80211_vif_type_p2p(vif)); 2057 2058 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); 2059 if (IS_ERR_OR_NULL(sta)) { 2060 rcu_read_unlock(); 2061 WARN(1, "Can't find STA to configure HE\n"); 2062 return; 2063 } 2064 2065 if (!sta->deflink.he_cap.has_he) { 2066 rcu_read_unlock(); 2067 return; 2068 } 2069 2070 flags = 0; 2071 2072 /* Block 26-tone RU OFDMA transmissions */ 2073 if (mvmvif->he_ru_2mhz_block) 2074 flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; 2075 2076 /* HTC flags */ 2077 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] & 2078 IEEE80211_HE_MAC_CAP0_HTC_HE) 2079 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); 2080 if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & 2081 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || 2082 (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2083 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { 2084 u8 link_adap = 2085 ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2086 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + 2087 (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & 2088 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); 2089 2090 if (link_adap == 2) 2091 sta_ctxt_cmd.htc_flags |= 2092 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); 2093 else if (link_adap == 3) 2094 sta_ctxt_cmd.htc_flags |= 2095 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); 2096 } 2097 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) 2098 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); 2099 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] & 2100 IEEE80211_HE_MAC_CAP3_OMI_CONTROL) 2101 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); 2102 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) 2103 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); 2104 2105 /* 2106 * Initialize the PPE thresholds to "None" (7), as described in Table 2107 * 9-262ac of 80211.ax/D3.0. 2108 */ 2109 memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, 2110 sizeof(sta_ctxt_cmd.pkt_ext)); 2111 2112 if (sta->deflink.eht_cap.has_eht) { 2113 nominal_padding = 2114 u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2115 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 2116 2117 /* If PPE Thresholds exists, parse them into a FW-familiar format. */ 2118 if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5] & 2119 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { 2120 u8 nss = (sta->deflink.eht_cap.eht_ppe_thres[0] & 2121 IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1; 2122 u8 *ppe = &sta->deflink.eht_cap.eht_ppe_thres[0]; 2123 u8 ru_index_bitmap = 2124 u16_get_bits(*ppe, 2125 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2126 /* Starting after PPE header */ 2127 u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; 2128 2129 iwl_mvm_parse_ppe(mvm, 2130 &sta_ctxt_cmd.pkt_ext, 2131 nss, ru_index_bitmap, ppe, 2132 ppe_pos_bit, true); 2133 flags |= STA_CTXT_HE_PACKET_EXT; 2134 /* EHT PPE Thresholds doesn't exist - set the API according to HE PPE Tresholds*/ 2135 } else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] & 2136 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { 2137 struct iwl_he_pkt_ext_v2 *pkt_ext = 2138 &sta_ctxt_cmd.pkt_ext; 2139 2140 /* 2141 * Even though HE Capabilities IE doesn't contain PPE 2142 * Thresholds for BW 320Mhz, thresholds for this BW will 2143 * be filled in with the same values as 160Mhz, due to 2144 * the inheritance, as required. 2145 */ 2146 iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, pkt_ext, 2147 true); 2148 2149 /* 2150 * According to the requirements, for MCSs 12-13 the maximum value between 2151 * HE PPE Threshold and Common Nominal Packet Padding needs to be taken 2152 */ 2153 iwl_mvm_get_optimal_ppe_info(pkt_ext, nominal_padding); 2154 2155 flags |= STA_CTXT_HE_PACKET_EXT; 2156 2157 /* 2158 * if PPE Thresholds doesn't present in both EHT IE and HE IE - 2159 * take the Thresholds from Common Nominal Packet Padding field 2160 */ 2161 } else { 2162 iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, 2163 nominal_padding, 2164 &flags); 2165 } 2166 } else if (sta->deflink.he_cap.has_he) { 2167 /* If PPE Thresholds exist, parse them into a FW-familiar format. */ 2168 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] & 2169 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { 2170 iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, 2171 &sta_ctxt_cmd.pkt_ext, 2172 false); 2173 flags |= STA_CTXT_HE_PACKET_EXT; 2174 /* 2175 * PPE Thresholds doesn't exist - set the API PPE values 2176 * according to Common Nominal Packet Padding field. 2177 */ 2178 } else { 2179 nominal_padding = 2180 u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9], 2181 IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); 2182 if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) 2183 iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, 2184 nominal_padding, 2185 &flags); 2186 } 2187 } 2188 2189 for (i = 0; i < MAX_HE_SUPP_NSS; i++) { 2190 int bw; 2191 2192 for (bw = 0; 2193 bw < ARRAY_SIZE(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i]); 2194 bw++) { 2195 u8 *qam_th = 2196 &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0]; 2197 2198 IWL_DEBUG_HT(mvm, 2199 "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n", 2200 i, bw, qam_th[0], qam_th[1]); 2201 } 2202 } 2203 2204 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2205 IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) 2206 flags |= STA_CTXT_HE_32BIT_BA_BITMAP; 2207 2208 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2209 IEEE80211_HE_MAC_CAP2_ACK_EN) 2210 flags |= STA_CTXT_HE_ACK_ENABLED; 2211 2212 rcu_read_unlock(); 2213 2214 /* Mark MU EDCA as enabled, unless none detected on some AC */ 2215 flags |= STA_CTXT_HE_MU_EDCA_CW; 2216 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 2217 struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = 2218 &mvmvif->queue_params[i].mu_edca_param_rec; 2219 u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); 2220 2221 if (!mvmvif->queue_params[i].mu_edca) { 2222 flags &= ~STA_CTXT_HE_MU_EDCA_CW; 2223 break; 2224 } 2225 2226 sta_ctxt_cmd.trig_based_txf[ac].cwmin = 2227 cpu_to_le16(mu_edca->ecw_min_max & 0xf); 2228 sta_ctxt_cmd.trig_based_txf[ac].cwmax = 2229 cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); 2230 sta_ctxt_cmd.trig_based_txf[ac].aifsn = 2231 cpu_to_le16(mu_edca->aifsn); 2232 sta_ctxt_cmd.trig_based_txf[ac].mu_time = 2233 cpu_to_le16(mu_edca->mu_edca_timer); 2234 } 2235 2236 2237 if (vif->bss_conf.uora_exists) { 2238 flags |= STA_CTXT_HE_TRIG_RND_ALLOC; 2239 2240 sta_ctxt_cmd.rand_alloc_ecwmin = 2241 vif->bss_conf.uora_ocw_range & 0x7; 2242 sta_ctxt_cmd.rand_alloc_ecwmax = 2243 (vif->bss_conf.uora_ocw_range >> 3) & 0x7; 2244 } 2245 2246 if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] & 2247 IEEE80211_HE_MAC_CAP2_ACK_EN)) 2248 flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; 2249 2250 if (vif->bss_conf.nontransmitted) { 2251 flags |= STA_CTXT_HE_REF_BSSID_VALID; 2252 ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, 2253 vif->bss_conf.transmitter_bssid); 2254 sta_ctxt_cmd.max_bssid_indicator = 2255 vif->bss_conf.bssid_indicator; 2256 sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; 2257 sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; 2258 sta_ctxt_cmd.profile_periodicity = 2259 vif->bss_conf.profile_periodicity; 2260 } 2261 2262 sta_ctxt_cmd.flags = cpu_to_le32(flags); 2263 2264 if (ver < 3) { 2265 /* fields before pkt_ext */ 2266 BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) != 2267 offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext)); 2268 memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd, 2269 offsetof(typeof(sta_ctxt_cmd), pkt_ext)); 2270 2271 /* pkt_ext */ 2272 for (i = 0; 2273 i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th); 2274 i++) { 2275 u8 bw; 2276 2277 for (bw = 0; 2278 bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]); 2279 bw++) { 2280 BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) != 2281 sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw])); 2282 2283 memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw], 2284 &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw], 2285 sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw])); 2286 } 2287 } 2288 2289 /* fields after pkt_ext */ 2290 BUILD_BUG_ON(sizeof(sta_ctxt_cmd) - 2291 offsetofend(typeof(sta_ctxt_cmd), pkt_ext) != 2292 sizeof(sta_ctxt_cmd_v2) - 2293 offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext)); 2294 memcpy((u8 *)&sta_ctxt_cmd_v2 + 2295 offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext), 2296 (u8 *)&sta_ctxt_cmd + 2297 offsetofend(typeof(sta_ctxt_cmd), pkt_ext), 2298 sizeof(sta_ctxt_cmd) - 2299 offsetofend(typeof(sta_ctxt_cmd), pkt_ext)); 2300 sta_ctxt_cmd_v2.reserved3 = 0; 2301 } 2302 2303 if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd)) 2304 IWL_ERR(mvm, "Failed to config FW to work HE!\n"); 2305 } 2306 2307 static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, 2308 struct ieee80211_vif *vif, 2309 u32 duration_override) 2310 { 2311 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 2312 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; 2313 2314 if (duration_override > duration) 2315 duration = duration_override; 2316 2317 /* Try really hard to protect the session and hear a beacon 2318 * The new session protection command allows us to protect the 2319 * session for a much longer time since the firmware will internally 2320 * create two events: a 300TU one with a very high priority that 2321 * won't be fragmented which should be enough for 99% of the cases, 2322 * and another one (which we configure here to be 900TU long) which 2323 * will have a slightly lower priority, but more importantly, can be 2324 * fragmented so that it'll allow other activities to run. 2325 */ 2326 if (fw_has_capa(&mvm->fw->ucode_capa, 2327 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 2328 iwl_mvm_schedule_session_protection(mvm, vif, 900, 2329 min_duration, false); 2330 else 2331 iwl_mvm_protect_session(mvm, vif, duration, 2332 min_duration, 500, false); 2333 } 2334 2335 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 2336 struct ieee80211_vif *vif, 2337 struct ieee80211_bss_conf *bss_conf, 2338 u64 changes) 2339 { 2340 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2341 int ret; 2342 2343 /* 2344 * Re-calculate the tsf id, as the leader-follower relations depend 2345 * on the beacon interval, which was not known when the station 2346 * interface was added. 2347 */ 2348 if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) { 2349 if ((vif->bss_conf.he_support && 2350 !iwlwifi_mod_params.disable_11ax) || 2351 (vif->bss_conf.eht_support && 2352 !iwlwifi_mod_params.disable_11be)) 2353 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2354 2355 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2356 } 2357 2358 /* Update MU EDCA params */ 2359 if (changes & BSS_CHANGED_QOS && mvmvif->associated && 2360 vif->cfg.assoc && 2361 ((vif->bss_conf.he_support && 2362 !iwlwifi_mod_params.disable_11ax) || 2363 (vif->bss_conf.eht_support && 2364 !iwlwifi_mod_params.disable_11be))) 2365 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2366 2367 /* 2368 * If we're not associated yet, take the (new) BSSID before associating 2369 * so the firmware knows. If we're already associated, then use the old 2370 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC 2371 * branch for disassociation below. 2372 */ 2373 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) 2374 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2375 2376 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); 2377 if (ret) 2378 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2379 2380 /* after sending it once, adopt mac80211 data */ 2381 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2382 mvmvif->associated = vif->cfg.assoc; 2383 2384 if (changes & BSS_CHANGED_ASSOC) { 2385 if (vif->cfg.assoc) { 2386 /* clear statistics to get clean beacon counter */ 2387 iwl_mvm_request_statistics(mvm, true); 2388 memset(&mvmvif->beacon_stats, 0, 2389 sizeof(mvmvif->beacon_stats)); 2390 2391 /* add quota for this interface */ 2392 ret = iwl_mvm_update_quotas(mvm, true, NULL); 2393 if (ret) { 2394 IWL_ERR(mvm, "failed to update quotas\n"); 2395 return; 2396 } 2397 2398 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2399 &mvm->status) && 2400 !fw_has_capa(&mvm->fw->ucode_capa, 2401 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 2402 /* 2403 * If we're restarting then the firmware will 2404 * obviously have lost synchronisation with 2405 * the AP. It will attempt to synchronise by 2406 * itself, but we can make it more reliable by 2407 * scheduling a session protection time event. 2408 * 2409 * The firmware needs to receive a beacon to 2410 * catch up with synchronisation, use 110% of 2411 * the beacon interval. 2412 * 2413 * Set a large maximum delay to allow for more 2414 * than a single interface. 2415 * 2416 * For new firmware versions, rely on the 2417 * firmware. This is relevant for DCM scenarios 2418 * only anyway. 2419 */ 2420 u32 dur = (11 * vif->bss_conf.beacon_int) / 10; 2421 iwl_mvm_protect_session(mvm, vif, dur, dur, 2422 5 * dur, false); 2423 } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2424 &mvm->status) && 2425 !vif->bss_conf.dtim_period) { 2426 /* 2427 * If we're not restarting and still haven't 2428 * heard a beacon (dtim period unknown) then 2429 * make sure we still have enough minimum time 2430 * remaining in the time event, since the auth 2431 * might actually have taken quite a while 2432 * (especially for SAE) and so the remaining 2433 * time could be small without us having heard 2434 * a beacon yet. 2435 */ 2436 iwl_mvm_protect_assoc(mvm, vif, 0); 2437 } 2438 2439 iwl_mvm_sf_update(mvm, vif, false); 2440 iwl_mvm_power_vif_assoc(mvm, vif); 2441 if (vif->p2p) { 2442 iwl_mvm_update_smps(mvm, vif, 2443 IWL_MVM_SMPS_REQ_PROT, 2444 IEEE80211_SMPS_DYNAMIC); 2445 } 2446 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 2447 iwl_mvm_mei_host_disassociated(mvm); 2448 /* 2449 * If update fails - SF might be running in associated 2450 * mode while disassociated - which is forbidden. 2451 */ 2452 ret = iwl_mvm_sf_update(mvm, vif, false); 2453 WARN_ONCE(ret && 2454 !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 2455 &mvm->status), 2456 "Failed to update SF upon disassociation\n"); 2457 2458 /* 2459 * If we get an assert during the connection (after the 2460 * station has been added, but before the vif is set 2461 * to associated), mac80211 will re-add the station and 2462 * then configure the vif. Since the vif is not 2463 * associated, we would remove the station here and 2464 * this would fail the recovery. 2465 */ 2466 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2467 &mvm->status)) { 2468 /* first remove remaining keys */ 2469 iwl_mvm_sec_key_remove_ap(mvm, vif); 2470 2471 /* 2472 * Remove AP station now that 2473 * the MAC is unassoc 2474 */ 2475 ret = iwl_mvm_rm_sta_id(mvm, vif, 2476 mvmvif->ap_sta_id); 2477 if (ret) 2478 IWL_ERR(mvm, 2479 "failed to remove AP station\n"); 2480 2481 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 2482 } 2483 2484 /* remove quota for this interface */ 2485 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2486 if (ret) 2487 IWL_ERR(mvm, "failed to update quotas\n"); 2488 2489 /* this will take the cleared BSSID from bss_conf */ 2490 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2491 if (ret) 2492 IWL_ERR(mvm, 2493 "failed to update MAC %pM (clear after unassoc)\n", 2494 vif->addr); 2495 } 2496 2497 /* 2498 * The firmware tracks the MU-MIMO group on its own. 2499 * However, on HW restart we should restore this data. 2500 */ 2501 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2502 (changes & BSS_CHANGED_MU_GROUPS) && vif->bss_conf.mu_mimo_owner) { 2503 ret = iwl_mvm_update_mu_groups(mvm, vif); 2504 if (ret) 2505 IWL_ERR(mvm, 2506 "failed to update VHT MU_MIMO groups\n"); 2507 } 2508 2509 iwl_mvm_recalc_multicast(mvm); 2510 2511 /* reset rssi values */ 2512 mvmvif->bf_data.ave_beacon_signal = 0; 2513 2514 iwl_mvm_bt_coex_vif_change(mvm); 2515 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, 2516 IEEE80211_SMPS_AUTOMATIC); 2517 if (fw_has_capa(&mvm->fw->ucode_capa, 2518 IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 2519 iwl_mvm_config_scan(mvm); 2520 } 2521 2522 if (changes & BSS_CHANGED_BEACON_INFO) { 2523 /* 2524 * We received a beacon from the associated AP so 2525 * remove the session protection. 2526 */ 2527 iwl_mvm_stop_session_protection(mvm, vif); 2528 2529 iwl_mvm_sf_update(mvm, vif, false); 2530 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2531 } 2532 2533 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | 2534 /* 2535 * Send power command on every beacon change, 2536 * because we may have not enabled beacon abort yet. 2537 */ 2538 BSS_CHANGED_BEACON_INFO)) { 2539 ret = iwl_mvm_power_update_mac(mvm); 2540 if (ret) 2541 IWL_ERR(mvm, "failed to update power mode\n"); 2542 } 2543 2544 if (changes & BSS_CHANGED_CQM) { 2545 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); 2546 /* reset cqm events tracking */ 2547 mvmvif->bf_data.last_cqm_event = 0; 2548 if (mvmvif->bf_data.bf_enabled) { 2549 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 2550 if (ret) 2551 IWL_ERR(mvm, 2552 "failed to update CQM thresholds\n"); 2553 } 2554 } 2555 2556 if (changes & BSS_CHANGED_BANDWIDTH) 2557 iwl_mvm_apply_fw_smps_request(vif); 2558 } 2559 2560 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, 2561 struct ieee80211_vif *vif, 2562 struct ieee80211_bss_conf *link_conf) 2563 { 2564 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2565 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2566 int ret, i; 2567 2568 mutex_lock(&mvm->mutex); 2569 2570 /* Send the beacon template */ 2571 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); 2572 if (ret) 2573 goto out_unlock; 2574 2575 /* 2576 * Re-calculate the tsf id, as the leader-follower relations depend on 2577 * the beacon interval, which was not known when the AP interface 2578 * was added. 2579 */ 2580 if (vif->type == NL80211_IFTYPE_AP) 2581 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2582 2583 mvmvif->ap_assoc_sta_count = 0; 2584 2585 /* Add the mac context */ 2586 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 2587 if (ret) 2588 goto out_unlock; 2589 2590 /* Perform the binding */ 2591 ret = iwl_mvm_binding_add_vif(mvm, vif); 2592 if (ret) 2593 goto out_remove; 2594 2595 /* 2596 * This is not very nice, but the simplest: 2597 * For older FWs adding the mcast sta before the bcast station may 2598 * cause assert 0x2b00. 2599 * This is fixed in later FW so make the order of removal depend on 2600 * the TLV 2601 */ 2602 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2603 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2604 if (ret) 2605 goto out_unbind; 2606 /* 2607 * Send the bcast station. At this stage the TBTT and DTIM time 2608 * events are added and applied to the scheduler 2609 */ 2610 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2611 if (ret) { 2612 iwl_mvm_rm_mcast_sta(mvm, vif); 2613 goto out_unbind; 2614 } 2615 } else { 2616 /* 2617 * Send the bcast station. At this stage the TBTT and DTIM time 2618 * events are added and applied to the scheduler 2619 */ 2620 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2621 if (ret) 2622 goto out_unbind; 2623 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2624 if (ret) { 2625 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2626 goto out_unbind; 2627 } 2628 } 2629 2630 /* must be set before quota calculations */ 2631 mvmvif->ap_ibss_active = true; 2632 2633 /* send all the early keys to the device now */ 2634 for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { 2635 struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; 2636 2637 if (!key) 2638 continue; 2639 2640 mvmvif->ap_early_keys[i] = NULL; 2641 2642 ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); 2643 if (ret) 2644 goto out_quota_failed; 2645 } 2646 2647 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2648 iwl_mvm_vif_set_low_latency(mvmvif, true, 2649 LOW_LATENCY_VIF_TYPE); 2650 iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); 2651 } 2652 2653 /* power updated needs to be done before quotas */ 2654 iwl_mvm_power_update_mac(mvm); 2655 2656 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2657 if (ret) 2658 goto out_quota_failed; 2659 2660 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2661 if (vif->p2p && mvm->p2p_device_vif) 2662 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2663 2664 iwl_mvm_bt_coex_vif_change(mvm); 2665 2666 /* we don't support TDLS during DCM */ 2667 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2668 iwl_mvm_teardown_tdls_peers(mvm); 2669 2670 iwl_mvm_ftm_restart_responder(mvm, vif); 2671 2672 goto out_unlock; 2673 2674 out_quota_failed: 2675 iwl_mvm_power_update_mac(mvm); 2676 mvmvif->ap_ibss_active = false; 2677 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2678 iwl_mvm_rm_mcast_sta(mvm, vif); 2679 out_unbind: 2680 iwl_mvm_binding_remove_vif(mvm, vif); 2681 out_remove: 2682 iwl_mvm_mac_ctxt_remove(mvm, vif); 2683 out_unlock: 2684 mutex_unlock(&mvm->mutex); 2685 return ret; 2686 } 2687 2688 static int iwl_mvm_start_ap(struct ieee80211_hw *hw, 2689 struct ieee80211_vif *vif, 2690 struct ieee80211_bss_conf *link_conf) 2691 { 2692 return iwl_mvm_start_ap_ibss(hw, vif, link_conf); 2693 } 2694 2695 static int iwl_mvm_start_ibss(struct ieee80211_hw *hw, 2696 struct ieee80211_vif *vif) 2697 { 2698 return iwl_mvm_start_ap_ibss(hw, vif, &vif->bss_conf); 2699 } 2700 2701 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, 2702 struct ieee80211_vif *vif, 2703 struct ieee80211_bss_conf *link_conf) 2704 { 2705 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2706 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2707 2708 iwl_mvm_prepare_mac_removal(mvm, vif); 2709 2710 mutex_lock(&mvm->mutex); 2711 2712 /* Handle AP stop while in CSA */ 2713 if (rcu_access_pointer(mvm->csa_vif) == vif) { 2714 iwl_mvm_remove_time_event(mvm, mvmvif, 2715 &mvmvif->time_event_data); 2716 RCU_INIT_POINTER(mvm->csa_vif, NULL); 2717 mvmvif->csa_countdown = false; 2718 } 2719 2720 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { 2721 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); 2722 mvm->csa_tx_block_bcn_timeout = 0; 2723 } 2724 2725 mvmvif->ap_ibss_active = false; 2726 mvm->ap_last_beacon_gp2 = 0; 2727 2728 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2729 iwl_mvm_vif_set_low_latency(mvmvif, false, 2730 LOW_LATENCY_VIF_TYPE); 2731 iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); 2732 } 2733 2734 iwl_mvm_bt_coex_vif_change(mvm); 2735 2736 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2737 if (vif->p2p && mvm->p2p_device_vif) 2738 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2739 2740 iwl_mvm_update_quotas(mvm, false, NULL); 2741 2742 iwl_mvm_ftm_responder_clear(mvm, vif); 2743 2744 /* 2745 * This is not very nice, but the simplest: 2746 * For older FWs removing the mcast sta before the bcast station may 2747 * cause assert 0x2b00. 2748 * This is fixed in later FW (which will stop beaconing when removing 2749 * bcast station). 2750 * So make the order of removal depend on the TLV 2751 */ 2752 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2753 iwl_mvm_rm_mcast_sta(mvm, vif); 2754 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2755 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2756 iwl_mvm_rm_mcast_sta(mvm, vif); 2757 iwl_mvm_binding_remove_vif(mvm, vif); 2758 2759 iwl_mvm_power_update_mac(mvm); 2760 2761 iwl_mvm_mac_ctxt_remove(mvm, vif); 2762 2763 mutex_unlock(&mvm->mutex); 2764 } 2765 2766 static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, 2767 struct ieee80211_vif *vif, 2768 struct ieee80211_bss_conf *link_conf) 2769 { 2770 iwl_mvm_stop_ap_ibss(hw, vif, link_conf); 2771 } 2772 2773 static void iwl_mvm_stop_ibss(struct ieee80211_hw *hw, 2774 struct ieee80211_vif *vif) 2775 { 2776 iwl_mvm_stop_ap_ibss(hw, vif, &vif->bss_conf); 2777 } 2778 2779 static void 2780 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, 2781 struct ieee80211_vif *vif, 2782 struct ieee80211_bss_conf *bss_conf, 2783 u64 changes) 2784 { 2785 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2786 2787 /* Changes will be applied when the AP/IBSS is started */ 2788 if (!mvmvif->ap_ibss_active) 2789 return; 2790 2791 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 2792 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && 2793 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) 2794 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2795 2796 /* Need to send a new beacon template to the FW */ 2797 if (changes & BSS_CHANGED_BEACON && 2798 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 2799 IWL_WARN(mvm, "Failed updating beacon data\n"); 2800 2801 if (changes & BSS_CHANGED_FTM_RESPONDER) { 2802 int ret = iwl_mvm_ftm_start_responder(mvm, vif); 2803 2804 if (ret) 2805 IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", 2806 ret); 2807 } 2808 2809 } 2810 2811 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 2812 struct ieee80211_vif *vif, 2813 struct ieee80211_bss_conf *bss_conf, 2814 u64 changes) 2815 { 2816 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2817 2818 mutex_lock(&mvm->mutex); 2819 2820 if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) 2821 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 2822 2823 switch (vif->type) { 2824 case NL80211_IFTYPE_STATION: 2825 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 2826 break; 2827 case NL80211_IFTYPE_AP: 2828 case NL80211_IFTYPE_ADHOC: 2829 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); 2830 break; 2831 case NL80211_IFTYPE_MONITOR: 2832 if (changes & BSS_CHANGED_MU_GROUPS) 2833 iwl_mvm_update_mu_groups(mvm, vif); 2834 break; 2835 default: 2836 /* shouldn't happen */ 2837 WARN_ON_ONCE(1); 2838 } 2839 2840 if (changes & BSS_CHANGED_TXPOWER) { 2841 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", 2842 bss_conf->txpower); 2843 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2844 } 2845 2846 mutex_unlock(&mvm->mutex); 2847 } 2848 2849 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, 2850 struct ieee80211_vif *vif, 2851 struct ieee80211_scan_request *hw_req) 2852 { 2853 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2854 int ret; 2855 2856 if (hw_req->req.n_channels == 0 || 2857 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) 2858 return -EINVAL; 2859 2860 mutex_lock(&mvm->mutex); 2861 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); 2862 mutex_unlock(&mvm->mutex); 2863 2864 return ret; 2865 } 2866 2867 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, 2868 struct ieee80211_vif *vif) 2869 { 2870 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2871 2872 mutex_lock(&mvm->mutex); 2873 2874 /* Due to a race condition, it's possible that mac80211 asks 2875 * us to stop a hw_scan when it's already stopped. This can 2876 * happen, for instance, if we stopped the scan ourselves, 2877 * called ieee80211_scan_completed() and the userspace called 2878 * cancel scan scan before ieee80211_scan_work() could run. 2879 * To handle that, simply return if the scan is not running. 2880 */ 2881 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) 2882 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 2883 2884 mutex_unlock(&mvm->mutex); 2885 } 2886 2887 static void 2888 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, 2889 struct ieee80211_sta *sta, u16 tids, 2890 int num_frames, 2891 enum ieee80211_frame_release_type reason, 2892 bool more_data) 2893 { 2894 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2895 2896 /* Called when we need to transmit (a) frame(s) from mac80211 */ 2897 2898 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2899 tids, more_data, false); 2900 } 2901 2902 static void 2903 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, 2904 struct ieee80211_sta *sta, u16 tids, 2905 int num_frames, 2906 enum ieee80211_frame_release_type reason, 2907 bool more_data) 2908 { 2909 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2910 2911 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ 2912 2913 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2914 tids, more_data, true); 2915 } 2916 2917 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2918 enum sta_notify_cmd cmd, 2919 struct ieee80211_sta *sta) 2920 { 2921 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2922 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2923 unsigned long txqs = 0, tids = 0; 2924 int tid; 2925 2926 /* 2927 * If we have TVQM then we get too high queue numbers - luckily 2928 * we really shouldn't get here with that because such hardware 2929 * should have firmware supporting buffer station offload. 2930 */ 2931 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 2932 return; 2933 2934 spin_lock_bh(&mvmsta->lock); 2935 for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { 2936 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2937 2938 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) 2939 continue; 2940 2941 __set_bit(tid_data->txq_id, &txqs); 2942 2943 if (iwl_mvm_tid_queued(mvm, tid_data) == 0) 2944 continue; 2945 2946 __set_bit(tid, &tids); 2947 } 2948 2949 switch (cmd) { 2950 case STA_NOTIFY_SLEEP: 2951 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) 2952 ieee80211_sta_set_buffered(sta, tid, true); 2953 2954 if (txqs) 2955 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); 2956 /* 2957 * The fw updates the STA to be asleep. Tx packets on the Tx 2958 * queues to this station will not be transmitted. The fw will 2959 * send a Tx response with TX_STATUS_FAIL_DEST_PS. 2960 */ 2961 break; 2962 case STA_NOTIFY_AWAKE: 2963 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 2964 break; 2965 2966 if (txqs) 2967 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); 2968 iwl_mvm_sta_modify_ps_wake(mvm, sta); 2969 break; 2970 default: 2971 break; 2972 } 2973 spin_unlock_bh(&mvmsta->lock); 2974 } 2975 2976 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2977 struct ieee80211_vif *vif, 2978 enum sta_notify_cmd cmd, 2979 struct ieee80211_sta *sta) 2980 { 2981 __iwl_mvm_mac_sta_notify(hw, cmd, sta); 2982 } 2983 2984 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 2985 { 2986 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2987 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; 2988 struct ieee80211_sta *sta; 2989 struct iwl_mvm_sta *mvmsta; 2990 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); 2991 2992 if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations)) 2993 return; 2994 2995 rcu_read_lock(); 2996 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); 2997 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2998 rcu_read_unlock(); 2999 return; 3000 } 3001 3002 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3003 3004 if (!mvmsta->vif || 3005 mvmsta->vif->type != NL80211_IFTYPE_AP) { 3006 rcu_read_unlock(); 3007 return; 3008 } 3009 3010 if (mvmsta->sleeping != sleeping) { 3011 mvmsta->sleeping = sleeping; 3012 __iwl_mvm_mac_sta_notify(mvm->hw, 3013 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, 3014 sta); 3015 ieee80211_sta_ps_transition(sta, sleeping); 3016 } 3017 3018 if (sleeping) { 3019 switch (notif->type) { 3020 case IWL_MVM_PM_EVENT_AWAKE: 3021 case IWL_MVM_PM_EVENT_ASLEEP: 3022 break; 3023 case IWL_MVM_PM_EVENT_UAPSD: 3024 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); 3025 break; 3026 case IWL_MVM_PM_EVENT_PS_POLL: 3027 ieee80211_sta_pspoll(sta); 3028 break; 3029 default: 3030 break; 3031 } 3032 } 3033 3034 rcu_read_unlock(); 3035 } 3036 3037 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, 3038 struct ieee80211_vif *vif, 3039 struct ieee80211_sta *sta) 3040 { 3041 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3042 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3043 3044 /* 3045 * This is called before mac80211 does RCU synchronisation, 3046 * so here we already invalidate our internal RCU-protected 3047 * station pointer. The rest of the code will thus no longer 3048 * be able to find the station this way, and we don't rely 3049 * on further RCU synchronisation after the sta_state() 3050 * callback deleted the station. 3051 */ 3052 mutex_lock(&mvm->mutex); 3053 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) 3054 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 3055 ERR_PTR(-ENOENT)); 3056 3057 mutex_unlock(&mvm->mutex); 3058 } 3059 3060 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3061 const u8 *bssid) 3062 { 3063 int i; 3064 3065 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 3066 struct iwl_mvm_tcm_mac *mdata; 3067 3068 mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; 3069 ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); 3070 mdata->opened_rx_ba_sessions = false; 3071 } 3072 3073 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) 3074 return; 3075 3076 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { 3077 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 3078 return; 3079 } 3080 3081 if (!vif->p2p && 3082 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { 3083 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 3084 return; 3085 } 3086 3087 for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { 3088 if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { 3089 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 3090 return; 3091 } 3092 } 3093 3094 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 3095 } 3096 3097 static void 3098 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, 3099 struct ieee80211_vif *vif, u8 *peer_addr, 3100 enum nl80211_tdls_operation action) 3101 { 3102 struct iwl_fw_dbg_trigger_tlv *trig; 3103 struct iwl_fw_dbg_trigger_tdls *tdls_trig; 3104 3105 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 3106 FW_DBG_TRIGGER_TDLS); 3107 if (!trig) 3108 return; 3109 3110 tdls_trig = (void *)trig->data; 3111 3112 if (!(tdls_trig->action_bitmap & BIT(action))) 3113 return; 3114 3115 if (tdls_trig->peer_mode && 3116 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) 3117 return; 3118 3119 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 3120 "TDLS event occurred, peer %pM, action %d", 3121 peer_addr, action); 3122 } 3123 3124 struct iwl_mvm_he_obss_narrow_bw_ru_data { 3125 bool tolerated; 3126 }; 3127 3128 static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, 3129 struct cfg80211_bss *bss, 3130 void *_data) 3131 { 3132 struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; 3133 const struct cfg80211_bss_ies *ies; 3134 const struct element *elem; 3135 3136 rcu_read_lock(); 3137 ies = rcu_dereference(bss->ies); 3138 elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, 3139 ies->len); 3140 3141 if (!elem || elem->datalen < 10 || 3142 !(elem->data[10] & 3143 WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { 3144 data->tolerated = false; 3145 } 3146 rcu_read_unlock(); 3147 } 3148 3149 static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, 3150 struct ieee80211_vif *vif) 3151 { 3152 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3153 struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { 3154 .tolerated = true, 3155 }; 3156 3157 if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { 3158 mvmvif->he_ru_2mhz_block = false; 3159 return; 3160 } 3161 3162 cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, 3163 iwl_mvm_check_he_obss_narrow_bw_ru_iter, 3164 &iter_data); 3165 3166 /* 3167 * If there is at least one AP on radar channel that cannot 3168 * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. 3169 */ 3170 mvmvif->he_ru_2mhz_block = !iter_data.tolerated; 3171 } 3172 3173 static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, 3174 struct ieee80211_vif *vif) 3175 { 3176 struct ieee80211_supported_band *sband; 3177 const struct ieee80211_sta_he_cap *he_cap; 3178 3179 if (vif->type != NL80211_IFTYPE_STATION) 3180 return; 3181 3182 if (!mvm->cca_40mhz_workaround) 3183 return; 3184 3185 /* decrement and check that we reached zero */ 3186 mvm->cca_40mhz_workaround--; 3187 if (mvm->cca_40mhz_workaround) 3188 return; 3189 3190 sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; 3191 3192 sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 3193 3194 he_cap = ieee80211_get_he_iftype_cap(sband, 3195 ieee80211_vif_type_p2p(vif)); 3196 3197 if (he_cap) { 3198 /* we know that ours is writable */ 3199 struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; 3200 3201 he->he_cap_elem.phy_cap_info[0] |= 3202 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; 3203 } 3204 } 3205 3206 static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, 3207 struct ieee80211_vif *vif, 3208 struct iwl_mvm_sta *mvm_sta) 3209 { 3210 #if IS_ENABLED(CONFIG_IWLMEI) 3211 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3212 struct iwl_mei_conn_info conn_info = { 3213 .ssid_len = vif->cfg.ssid_len, 3214 .channel = vif->bss_conf.chandef.chan->hw_value, 3215 }; 3216 3217 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3218 return; 3219 3220 if (!mvm->mei_registered) 3221 return; 3222 3223 switch (mvm_sta->pairwise_cipher) { 3224 case WLAN_CIPHER_SUITE_TKIP: 3225 conn_info.pairwise_cipher = IWL_MEI_CIPHER_TKIP; 3226 break; 3227 case WLAN_CIPHER_SUITE_CCMP: 3228 conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; 3229 break; 3230 case WLAN_CIPHER_SUITE_GCMP: 3231 conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; 3232 break; 3233 case WLAN_CIPHER_SUITE_GCMP_256: 3234 conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; 3235 break; 3236 case 0: 3237 /* open profile */ 3238 break; 3239 default: 3240 /* cipher not supported, don't send anything to iwlmei */ 3241 return; 3242 } 3243 3244 switch (mvmvif->rekey_data.akm) { 3245 case WLAN_AKM_SUITE_SAE & 0xff: 3246 conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; 3247 break; 3248 case WLAN_AKM_SUITE_PSK & 0xff: 3249 conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; 3250 break; 3251 case WLAN_AKM_SUITE_8021X & 0xff: 3252 conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; 3253 break; 3254 case 0: 3255 /* open profile */ 3256 conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; 3257 break; 3258 default: 3259 /* auth method / AKM not supported */ 3260 /* TODO: All the FT vesions of these? */ 3261 return; 3262 } 3263 3264 memcpy(conn_info.ssid, vif->cfg.ssid, vif->cfg.ssid_len); 3265 memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); 3266 3267 /* TODO: add support for collocated AP data */ 3268 iwl_mei_host_associated(&conn_info, NULL); 3269 #endif 3270 } 3271 3272 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 3273 struct ieee80211_vif *vif, 3274 struct ieee80211_sta *sta, 3275 enum ieee80211_sta_state old_state, 3276 enum ieee80211_sta_state new_state) 3277 { 3278 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3279 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3280 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3281 int ret; 3282 3283 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", 3284 sta->addr, old_state, new_state); 3285 3286 /* this would be a mac80211 bug ... but don't crash */ 3287 if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) 3288 return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; 3289 3290 /* 3291 * If we are in a STA removal flow and in DQA mode: 3292 * 3293 * This is after the sync_rcu part, so the queues have already been 3294 * flushed. No more TXs on their way in mac80211's path, and no more in 3295 * the queues. 3296 * Also, we won't be getting any new TX frames for this station. 3297 * What we might have are deferred TX frames that need to be taken care 3298 * of. 3299 * 3300 * Drop any still-queued deferred-frame before removing the STA, and 3301 * make sure the worker is no longer handling frames for this STA. 3302 */ 3303 if (old_state == IEEE80211_STA_NONE && 3304 new_state == IEEE80211_STA_NOTEXIST) { 3305 flush_work(&mvm->add_stream_wk); 3306 3307 /* 3308 * No need to make sure deferred TX indication is off since the 3309 * worker will already remove it if it was on 3310 */ 3311 3312 /* 3313 * Additionally, reset the 40 MHz capability if we disconnected 3314 * from the AP now. 3315 */ 3316 iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); 3317 } 3318 3319 mutex_lock(&mvm->mutex); 3320 /* track whether or not the station is associated */ 3321 mvm_sta->sta_state = new_state; 3322 3323 if (old_state == IEEE80211_STA_NOTEXIST && 3324 new_state == IEEE80211_STA_NONE) { 3325 /* 3326 * Firmware bug - it'll crash if the beacon interval is less 3327 * than 16. We can't avoid connecting at all, so refuse the 3328 * station state change, this will cause mac80211 to abandon 3329 * attempts to connect to this AP, and eventually wpa_s will 3330 * blocklist the AP... 3331 */ 3332 if (vif->type == NL80211_IFTYPE_STATION && 3333 vif->bss_conf.beacon_int < 16) { 3334 IWL_ERR(mvm, 3335 "AP %pM beacon interval is %d, refusing due to firmware bug!\n", 3336 sta->addr, vif->bss_conf.beacon_int); 3337 ret = -EINVAL; 3338 goto out_unlock; 3339 } 3340 3341 if (vif->type == NL80211_IFTYPE_STATION) 3342 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3343 3344 if (sta->tdls && 3345 (vif->p2p || 3346 iwl_mvm_tdls_sta_count(mvm, NULL) == 3347 IWL_MVM_TDLS_STA_COUNT || 3348 iwl_mvm_phy_ctx_count(mvm) > 1)) { 3349 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); 3350 ret = -EBUSY; 3351 goto out_unlock; 3352 } 3353 3354 ret = iwl_mvm_add_sta(mvm, vif, sta); 3355 if (sta->tdls && ret == 0) { 3356 iwl_mvm_recalc_tdls_state(mvm, vif, true); 3357 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3358 NL80211_TDLS_SETUP); 3359 } 3360 3361 sta->deflink.agg.max_rc_amsdu_len = 1; 3362 } else if (old_state == IEEE80211_STA_NONE && 3363 new_state == IEEE80211_STA_AUTH) { 3364 /* 3365 * EBS may be disabled due to previous failures reported by FW. 3366 * Reset EBS status here assuming environment has been changed. 3367 */ 3368 mvm->last_ebs_successful = true; 3369 iwl_mvm_check_uapsd(mvm, vif, sta->addr); 3370 ret = 0; 3371 } else if (old_state == IEEE80211_STA_AUTH && 3372 new_state == IEEE80211_STA_ASSOC) { 3373 if (vif->type == NL80211_IFTYPE_AP) { 3374 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3375 mvmvif->ap_assoc_sta_count++; 3376 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3377 if ((vif->bss_conf.he_support && 3378 !iwlwifi_mod_params.disable_11ax) || 3379 (vif->bss_conf.eht_support && 3380 !iwlwifi_mod_params.disable_11be)) 3381 iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); 3382 } else if (vif->type == NL80211_IFTYPE_STATION) { 3383 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3384 3385 mvmvif->he_ru_2mhz_block = false; 3386 if (sta->deflink.he_cap.has_he) 3387 iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); 3388 3389 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3390 } 3391 3392 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3393 false); 3394 ret = iwl_mvm_update_sta(mvm, vif, sta); 3395 } else if (old_state == IEEE80211_STA_ASSOC && 3396 new_state == IEEE80211_STA_AUTHORIZED) { 3397 ret = 0; 3398 3399 /* we don't support TDLS during DCM */ 3400 if (iwl_mvm_phy_ctx_count(mvm) > 1) 3401 iwl_mvm_teardown_tdls_peers(mvm); 3402 3403 if (sta->tdls) { 3404 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3405 NL80211_TDLS_ENABLE_LINK); 3406 } else { 3407 /* enable beacon filtering */ 3408 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3409 3410 mvmvif->authorized = 1; 3411 3412 /* 3413 * Now that the station is authorized, i.e., keys were already 3414 * installed, need to indicate to the FW that 3415 * multicast data frames can be forwarded to the driver 3416 */ 3417 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3418 iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); 3419 } 3420 3421 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3422 true); 3423 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3424 new_state == IEEE80211_STA_ASSOC) { 3425 /* once we move into assoc state, need to update rate scale to 3426 * disable using wide bandwidth 3427 */ 3428 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3429 false); 3430 if (!sta->tdls) { 3431 /* Multicast data frames are no longer allowed */ 3432 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3433 3434 /* 3435 * Set this after the above iwl_mvm_mac_ctxt_changed() 3436 * to avoid sending high prio again for a little time. 3437 */ 3438 mvmvif->authorized = 0; 3439 3440 /* disable beacon filtering */ 3441 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3442 WARN_ON(ret && 3443 !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3444 &mvm->status)); 3445 } 3446 ret = 0; 3447 } else if (old_state == IEEE80211_STA_ASSOC && 3448 new_state == IEEE80211_STA_AUTH) { 3449 if (vif->type == NL80211_IFTYPE_AP) { 3450 mvmvif->ap_assoc_sta_count--; 3451 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3452 } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 3453 iwl_mvm_stop_session_protection(mvm, vif); 3454 ret = 0; 3455 } else if (old_state == IEEE80211_STA_AUTH && 3456 new_state == IEEE80211_STA_NONE) { 3457 ret = 0; 3458 } else if (old_state == IEEE80211_STA_NONE && 3459 new_state == IEEE80211_STA_NOTEXIST) { 3460 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 3461 iwl_mvm_stop_session_protection(mvm, vif); 3462 ret = iwl_mvm_rm_sta(mvm, vif, sta); 3463 if (sta->tdls) { 3464 iwl_mvm_recalc_tdls_state(mvm, vif, false); 3465 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3466 NL80211_TDLS_DISABLE_LINK); 3467 } 3468 3469 if (unlikely(ret && 3470 test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3471 &mvm->status))) 3472 ret = 0; 3473 } else { 3474 ret = -EIO; 3475 } 3476 out_unlock: 3477 mutex_unlock(&mvm->mutex); 3478 3479 if (sta->tdls && ret == 0) { 3480 if (old_state == IEEE80211_STA_NOTEXIST && 3481 new_state == IEEE80211_STA_NONE) 3482 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3483 else if (old_state == IEEE80211_STA_NONE && 3484 new_state == IEEE80211_STA_NOTEXIST) 3485 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3486 } 3487 3488 return ret; 3489 } 3490 3491 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3492 { 3493 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3494 3495 mvm->rts_threshold = value; 3496 3497 return 0; 3498 } 3499 3500 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, 3501 struct ieee80211_vif *vif, 3502 struct ieee80211_sta *sta, u32 changed) 3503 { 3504 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3505 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3506 3507 if (changed & (IEEE80211_RC_BW_CHANGED | 3508 IEEE80211_RC_SUPP_RATES_CHANGED | 3509 IEEE80211_RC_NSS_CHANGED)) 3510 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3511 true); 3512 3513 if (vif->type == NL80211_IFTYPE_STATION && 3514 changed & IEEE80211_RC_NSS_CHANGED) 3515 iwl_mvm_sf_update(mvm, vif, false); 3516 } 3517 3518 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, 3519 struct ieee80211_vif *vif, 3520 unsigned int link_id, u16 ac, 3521 const struct ieee80211_tx_queue_params *params) 3522 { 3523 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3524 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3525 3526 mvmvif->queue_params[ac] = *params; 3527 3528 /* 3529 * No need to update right away, we'll get BSS_CHANGED_QOS 3530 * The exception is P2P_DEVICE interface which needs immediate update. 3531 */ 3532 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 3533 int ret; 3534 3535 mutex_lock(&mvm->mutex); 3536 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3537 mutex_unlock(&mvm->mutex); 3538 return ret; 3539 } 3540 return 0; 3541 } 3542 3543 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, 3544 struct ieee80211_vif *vif, 3545 struct ieee80211_prep_tx_info *info) 3546 { 3547 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3548 3549 mutex_lock(&mvm->mutex); 3550 iwl_mvm_protect_assoc(mvm, vif, info->duration); 3551 mutex_unlock(&mvm->mutex); 3552 } 3553 3554 static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, 3555 struct ieee80211_vif *vif, 3556 struct ieee80211_prep_tx_info *info) 3557 { 3558 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3559 3560 /* for successful cases (auth/assoc), don't cancel session protection */ 3561 if (info->success) 3562 return; 3563 3564 mutex_lock(&mvm->mutex); 3565 iwl_mvm_stop_session_protection(mvm, vif); 3566 mutex_unlock(&mvm->mutex); 3567 } 3568 3569 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, 3570 struct ieee80211_vif *vif, 3571 struct cfg80211_sched_scan_request *req, 3572 struct ieee80211_scan_ies *ies) 3573 { 3574 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3575 3576 int ret; 3577 3578 mutex_lock(&mvm->mutex); 3579 3580 if (!vif->cfg.idle) { 3581 ret = -EBUSY; 3582 goto out; 3583 } 3584 3585 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); 3586 3587 out: 3588 mutex_unlock(&mvm->mutex); 3589 return ret; 3590 } 3591 3592 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, 3593 struct ieee80211_vif *vif) 3594 { 3595 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3596 int ret; 3597 3598 mutex_lock(&mvm->mutex); 3599 3600 /* Due to a race condition, it's possible that mac80211 asks 3601 * us to stop a sched_scan when it's already stopped. This 3602 * can happen, for instance, if we stopped the scan ourselves, 3603 * called ieee80211_sched_scan_stopped() and the userspace called 3604 * stop sched scan scan before ieee80211_sched_scan_stopped_work() 3605 * could run. To handle this, simply return if the scan is 3606 * not running. 3607 */ 3608 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { 3609 mutex_unlock(&mvm->mutex); 3610 return 0; 3611 } 3612 3613 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); 3614 mutex_unlock(&mvm->mutex); 3615 iwl_mvm_wait_for_async_handlers(mvm); 3616 3617 return ret; 3618 } 3619 3620 static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3621 enum set_key_cmd cmd, 3622 struct ieee80211_vif *vif, 3623 struct ieee80211_sta *sta, 3624 struct ieee80211_key_conf *key) 3625 { 3626 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3627 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3628 struct iwl_mvm_sta *mvmsta = NULL; 3629 struct iwl_mvm_key_pn *ptk_pn; 3630 int keyidx = key->keyidx; 3631 u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); 3632 u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); 3633 int ret, i; 3634 u8 key_offset; 3635 3636 if (sta) 3637 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3638 3639 switch (key->cipher) { 3640 case WLAN_CIPHER_SUITE_TKIP: 3641 if (!mvm->trans->trans_cfg->gen2) { 3642 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 3643 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3644 } else if (vif->type == NL80211_IFTYPE_STATION) { 3645 key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; 3646 } else { 3647 IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); 3648 return -EOPNOTSUPP; 3649 } 3650 break; 3651 case WLAN_CIPHER_SUITE_CCMP: 3652 case WLAN_CIPHER_SUITE_GCMP: 3653 case WLAN_CIPHER_SUITE_GCMP_256: 3654 if (!iwl_mvm_has_new_tx_api(mvm)) 3655 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3656 break; 3657 case WLAN_CIPHER_SUITE_AES_CMAC: 3658 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3659 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3660 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); 3661 break; 3662 case WLAN_CIPHER_SUITE_WEP40: 3663 case WLAN_CIPHER_SUITE_WEP104: 3664 if (vif->type == NL80211_IFTYPE_STATION) 3665 break; 3666 if (iwl_mvm_has_new_tx_api(mvm)) 3667 return -EOPNOTSUPP; 3668 /* support HW crypto on TX */ 3669 return 0; 3670 default: 3671 return -EOPNOTSUPP; 3672 } 3673 3674 switch (cmd) { 3675 case SET_KEY: 3676 if (keyidx == 6 || keyidx == 7) 3677 rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6], 3678 key); 3679 3680 if ((vif->type == NL80211_IFTYPE_ADHOC || 3681 vif->type == NL80211_IFTYPE_AP) && !sta) { 3682 /* 3683 * GTK on AP interface is a TX-only key, return 0; 3684 * on IBSS they're per-station and because we're lazy 3685 * we don't support them for RX, so do the same. 3686 * CMAC/GMAC in AP/IBSS modes must be done in software. 3687 */ 3688 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3689 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3690 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3691 ret = -EOPNOTSUPP; 3692 break; 3693 } 3694 3695 if (key->cipher != WLAN_CIPHER_SUITE_GCMP && 3696 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && 3697 !iwl_mvm_has_new_tx_api(mvm)) { 3698 key->hw_key_idx = STA_KEY_IDX_INVALID; 3699 ret = 0; 3700 break; 3701 } 3702 3703 if (!mvmvif->ap_ibss_active) { 3704 for (i = 0; 3705 i < ARRAY_SIZE(mvmvif->ap_early_keys); 3706 i++) { 3707 if (!mvmvif->ap_early_keys[i]) { 3708 mvmvif->ap_early_keys[i] = key; 3709 break; 3710 } 3711 } 3712 3713 if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) 3714 ret = -ENOSPC; 3715 else 3716 ret = 0; 3717 3718 break; 3719 } 3720 } 3721 3722 /* During FW restart, in order to restore the state as it was, 3723 * don't try to reprogram keys we previously failed for. 3724 */ 3725 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3726 key->hw_key_idx == STA_KEY_IDX_INVALID) { 3727 IWL_DEBUG_MAC80211(mvm, 3728 "skip invalid idx key programming during restart\n"); 3729 ret = 0; 3730 break; 3731 } 3732 3733 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3734 mvmsta && iwl_mvm_has_new_rx_api(mvm) && 3735 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3736 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3737 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3738 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3739 struct ieee80211_key_seq seq; 3740 int tid, q; 3741 3742 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 3743 ptk_pn = kzalloc(struct_size(ptk_pn, q, 3744 mvm->trans->num_rx_queues), 3745 GFP_KERNEL); 3746 if (!ptk_pn) { 3747 ret = -ENOMEM; 3748 break; 3749 } 3750 3751 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 3752 ieee80211_get_key_rx_seq(key, tid, &seq); 3753 for (q = 0; q < mvm->trans->num_rx_queues; q++) 3754 memcpy(ptk_pn->q[q].pn[tid], 3755 seq.ccmp.pn, 3756 IEEE80211_CCMP_PN_LEN); 3757 } 3758 3759 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); 3760 } 3761 3762 /* in HW restart reuse the index, otherwise request a new one */ 3763 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3764 key_offset = key->hw_key_idx; 3765 else 3766 key_offset = STA_KEY_IDX_INVALID; 3767 3768 if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 3769 mvmsta->pairwise_cipher = key->cipher; 3770 3771 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3772 3773 if (sec_key_ver) 3774 ret = iwl_mvm_sec_key_add(mvm, vif, sta, key); 3775 else 3776 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); 3777 3778 if (ret) { 3779 IWL_WARN(mvm, "set key failed\n"); 3780 key->hw_key_idx = STA_KEY_IDX_INVALID; 3781 /* 3782 * can't add key for RX, but we don't need it 3783 * in the device for TX so still return 0, 3784 * unless we have new TX API where we cannot 3785 * put key material into the TX_CMD 3786 */ 3787 if (iwl_mvm_has_new_tx_api(mvm)) 3788 ret = -EOPNOTSUPP; 3789 else 3790 ret = 0; 3791 } 3792 3793 break; 3794 case DISABLE_KEY: 3795 if (keyidx == 6 || keyidx == 7) 3796 RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6], 3797 NULL); 3798 3799 ret = -ENOENT; 3800 for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { 3801 if (mvmvif->ap_early_keys[i] == key) { 3802 mvmvif->ap_early_keys[i] = NULL; 3803 ret = 0; 3804 } 3805 } 3806 3807 /* found in pending list - don't do anything else */ 3808 if (ret == 0) 3809 break; 3810 3811 if (key->hw_key_idx == STA_KEY_IDX_INVALID) { 3812 ret = 0; 3813 break; 3814 } 3815 3816 if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && 3817 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3818 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3819 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3820 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3821 ptk_pn = rcu_dereference_protected( 3822 mvmsta->ptk_pn[keyidx], 3823 lockdep_is_held(&mvm->mutex)); 3824 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); 3825 if (ptk_pn) 3826 kfree_rcu(ptk_pn, rcu_head); 3827 } 3828 3829 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); 3830 if (sec_key_ver) 3831 ret = iwl_mvm_sec_key_del(mvm, vif, sta, key); 3832 else 3833 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); 3834 break; 3835 default: 3836 ret = -EINVAL; 3837 } 3838 3839 return ret; 3840 } 3841 3842 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3843 enum set_key_cmd cmd, 3844 struct ieee80211_vif *vif, 3845 struct ieee80211_sta *sta, 3846 struct ieee80211_key_conf *key) 3847 { 3848 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3849 int ret; 3850 3851 mutex_lock(&mvm->mutex); 3852 ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); 3853 mutex_unlock(&mvm->mutex); 3854 3855 return ret; 3856 } 3857 3858 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, 3859 struct ieee80211_vif *vif, 3860 struct ieee80211_key_conf *keyconf, 3861 struct ieee80211_sta *sta, 3862 u32 iv32, u16 *phase1key) 3863 { 3864 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3865 3866 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 3867 return; 3868 3869 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); 3870 } 3871 3872 3873 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, 3874 struct iwl_rx_packet *pkt, void *data) 3875 { 3876 struct iwl_mvm *mvm = 3877 container_of(notif_wait, struct iwl_mvm, notif_wait); 3878 struct iwl_hs20_roc_res *resp; 3879 int resp_len = iwl_rx_packet_payload_len(pkt); 3880 struct iwl_mvm_time_event_data *te_data = data; 3881 3882 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) 3883 return true; 3884 3885 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 3886 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); 3887 return true; 3888 } 3889 3890 resp = (void *)pkt->data; 3891 3892 IWL_DEBUG_TE(mvm, 3893 "Aux ROC: Received response from ucode: status=%d uid=%d\n", 3894 resp->status, resp->event_unique_id); 3895 3896 te_data->uid = le32_to_cpu(resp->event_unique_id); 3897 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 3898 te_data->uid); 3899 3900 spin_lock_bh(&mvm->time_event_lock); 3901 list_add_tail(&te_data->list, &mvm->aux_roc_te_list); 3902 spin_unlock_bh(&mvm->time_event_lock); 3903 3904 return true; 3905 } 3906 3907 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) 3908 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) 3909 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) 3910 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) 3911 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) 3912 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, 3913 struct ieee80211_channel *channel, 3914 struct ieee80211_vif *vif, 3915 int duration) 3916 { 3917 int res; 3918 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3919 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; 3920 static const u16 time_event_response[] = { HOT_SPOT_CMD }; 3921 struct iwl_notification_wait wait_time_event; 3922 u32 dtim_interval = vif->bss_conf.dtim_period * 3923 vif->bss_conf.beacon_int; 3924 u32 req_dur, delay; 3925 struct iwl_hs20_roc_req aux_roc_req = { 3926 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 3927 .id_and_color = 3928 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), 3929 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), 3930 }; 3931 struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, 3932 &aux_roc_req.channel_info); 3933 u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); 3934 3935 /* Set the channel info data */ 3936 iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, 3937 iwl_mvm_phy_band_from_nl80211(channel->band), 3938 IWL_PHY_CHANNEL_MODE20, 3939 0); 3940 3941 /* Set the time and duration */ 3942 tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); 3943 3944 delay = AUX_ROC_MIN_DELAY; 3945 req_dur = MSEC_TO_TU(duration); 3946 3947 /* 3948 * If we are associated we want the delay time to be at least one 3949 * dtim interval so that the FW can wait until after the DTIM and 3950 * then start the time event, this will potentially allow us to 3951 * remain off-channel for the max duration. 3952 * Since we want to use almost a whole dtim interval we would also 3953 * like the delay to be for 2-3 dtim intervals, in case there are 3954 * other time events with higher priority. 3955 */ 3956 if (vif->cfg.assoc) { 3957 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); 3958 /* We cannot remain off-channel longer than the DTIM interval */ 3959 if (dtim_interval <= req_dur) { 3960 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; 3961 if (req_dur <= AUX_ROC_MIN_DURATION) 3962 req_dur = dtim_interval - 3963 AUX_ROC_MIN_SAFETY_BUFFER; 3964 } 3965 } 3966 3967 tail->duration = cpu_to_le32(req_dur); 3968 tail->apply_time_max_delay = cpu_to_le32(delay); 3969 3970 IWL_DEBUG_TE(mvm, 3971 "ROC: Requesting to remain on channel %u for %ums\n", 3972 channel->hw_value, req_dur); 3973 IWL_DEBUG_TE(mvm, 3974 "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", 3975 duration, delay, dtim_interval); 3976 3977 /* Set the node address */ 3978 memcpy(tail->node_addr, vif->addr, ETH_ALEN); 3979 3980 lockdep_assert_held(&mvm->mutex); 3981 3982 spin_lock_bh(&mvm->time_event_lock); 3983 3984 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { 3985 spin_unlock_bh(&mvm->time_event_lock); 3986 return -EIO; 3987 } 3988 3989 te_data->vif = vif; 3990 te_data->duration = duration; 3991 te_data->id = HOT_SPOT_CMD; 3992 3993 spin_unlock_bh(&mvm->time_event_lock); 3994 3995 /* 3996 * Use a notification wait, which really just processes the 3997 * command response and doesn't wait for anything, in order 3998 * to be able to process the response and get the UID inside 3999 * the RX path. Using CMD_WANT_SKB doesn't work because it 4000 * stores the buffer and then wakes up this thread, by which 4001 * time another notification (that the time event started) 4002 * might already be processed unsuccessfully. 4003 */ 4004 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 4005 time_event_response, 4006 ARRAY_SIZE(time_event_response), 4007 iwl_mvm_rx_aux_roc, te_data); 4008 4009 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, 4010 &aux_roc_req); 4011 4012 if (res) { 4013 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); 4014 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 4015 goto out_clear_te; 4016 } 4017 4018 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 4019 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 4020 /* should never fail */ 4021 WARN_ON_ONCE(res); 4022 4023 if (res) { 4024 out_clear_te: 4025 spin_lock_bh(&mvm->time_event_lock); 4026 iwl_mvm_te_clear_data(mvm, te_data); 4027 spin_unlock_bh(&mvm->time_event_lock); 4028 } 4029 4030 return res; 4031 } 4032 4033 static int iwl_mvm_roc(struct ieee80211_hw *hw, 4034 struct ieee80211_vif *vif, 4035 struct ieee80211_channel *channel, 4036 int duration, 4037 enum ieee80211_roc_type type) 4038 { 4039 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4040 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4041 struct cfg80211_chan_def chandef; 4042 struct iwl_mvm_phy_ctxt *phy_ctxt; 4043 bool band_change_removal; 4044 int ret, i; 4045 4046 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 4047 duration, type); 4048 4049 /* 4050 * Flush the done work, just in case it's still pending, so that 4051 * the work it does can complete and we can accept new frames. 4052 */ 4053 flush_work(&mvm->roc_done_wk); 4054 4055 mutex_lock(&mvm->mutex); 4056 4057 switch (vif->type) { 4058 case NL80211_IFTYPE_STATION: 4059 if (fw_has_capa(&mvm->fw->ucode_capa, 4060 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { 4061 /* Use aux roc framework (HS20) */ 4062 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { 4063 u32 lmac_id; 4064 4065 lmac_id = iwl_mvm_get_lmac_id(mvm->fw, 4066 channel->band); 4067 ret = iwl_mvm_add_aux_sta(mvm, lmac_id); 4068 if (WARN(ret, 4069 "Failed to allocate aux station")) 4070 goto out_unlock; 4071 } 4072 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 4073 vif, duration); 4074 goto out_unlock; 4075 } 4076 IWL_ERR(mvm, "hotspot not supported\n"); 4077 ret = -EINVAL; 4078 goto out_unlock; 4079 case NL80211_IFTYPE_P2P_DEVICE: 4080 /* handle below */ 4081 break; 4082 default: 4083 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 4084 ret = -EINVAL; 4085 goto out_unlock; 4086 } 4087 4088 for (i = 0; i < NUM_PHY_CTX; i++) { 4089 phy_ctxt = &mvm->phy_ctxts[i]; 4090 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 4091 continue; 4092 4093 if (phy_ctxt->ref && channel == phy_ctxt->channel) { 4094 /* 4095 * Unbind the P2P_DEVICE from the current PHY context, 4096 * and if the PHY context is not used remove it. 4097 */ 4098 ret = iwl_mvm_binding_remove_vif(mvm, vif); 4099 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 4100 goto out_unlock; 4101 4102 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 4103 4104 /* Bind the P2P_DEVICE to the current PHY Context */ 4105 mvmvif->phy_ctxt = phy_ctxt; 4106 4107 ret = iwl_mvm_binding_add_vif(mvm, vif); 4108 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 4109 goto out_unlock; 4110 4111 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 4112 goto schedule_time_event; 4113 } 4114 } 4115 4116 /* Need to update the PHY context only if the ROC channel changed */ 4117 if (channel == mvmvif->phy_ctxt->channel) 4118 goto schedule_time_event; 4119 4120 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); 4121 4122 /* 4123 * Check if the remain-on-channel is on a different band and that 4124 * requires context removal, see iwl_mvm_phy_ctxt_changed(). If 4125 * so, we'll need to release and then re-configure here, since we 4126 * must not remove a PHY context that's part of a binding. 4127 */ 4128 band_change_removal = 4129 fw_has_capa(&mvm->fw->ucode_capa, 4130 IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && 4131 mvmvif->phy_ctxt->channel->band != chandef.chan->band; 4132 4133 if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) { 4134 /* 4135 * Change the PHY context configuration as it is currently 4136 * referenced only by the P2P Device MAC (and we can modify it) 4137 */ 4138 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, 4139 &chandef, 1, 1); 4140 if (ret) 4141 goto out_unlock; 4142 } else { 4143 /* 4144 * The PHY context is shared with other MACs (or we're trying to 4145 * switch bands), so remove the P2P Device from the binding, 4146 * allocate an new PHY context and create a new binding. 4147 */ 4148 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 4149 if (!phy_ctxt) { 4150 ret = -ENOSPC; 4151 goto out_unlock; 4152 } 4153 4154 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 4155 1, 1); 4156 if (ret) { 4157 IWL_ERR(mvm, "Failed to change PHY context\n"); 4158 goto out_unlock; 4159 } 4160 4161 /* Unbind the P2P_DEVICE from the current PHY context */ 4162 ret = iwl_mvm_binding_remove_vif(mvm, vif); 4163 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 4164 goto out_unlock; 4165 4166 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 4167 4168 /* Bind the P2P_DEVICE to the new allocated PHY context */ 4169 mvmvif->phy_ctxt = phy_ctxt; 4170 4171 ret = iwl_mvm_binding_add_vif(mvm, vif); 4172 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 4173 goto out_unlock; 4174 4175 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 4176 } 4177 4178 schedule_time_event: 4179 /* Schedule the time events */ 4180 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); 4181 4182 out_unlock: 4183 mutex_unlock(&mvm->mutex); 4184 IWL_DEBUG_MAC80211(mvm, "leave\n"); 4185 return ret; 4186 } 4187 4188 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, 4189 struct ieee80211_vif *vif) 4190 { 4191 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4192 4193 IWL_DEBUG_MAC80211(mvm, "enter\n"); 4194 4195 mutex_lock(&mvm->mutex); 4196 iwl_mvm_stop_roc(mvm, vif); 4197 mutex_unlock(&mvm->mutex); 4198 4199 IWL_DEBUG_MAC80211(mvm, "leave\n"); 4200 return 0; 4201 } 4202 4203 struct iwl_mvm_ftm_responder_iter_data { 4204 bool responder; 4205 struct ieee80211_chanctx_conf *ctx; 4206 }; 4207 4208 static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, 4209 struct ieee80211_vif *vif) 4210 { 4211 struct iwl_mvm_ftm_responder_iter_data *data = _data; 4212 4213 if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx && 4214 vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) 4215 data->responder = true; 4216 } 4217 4218 static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, 4219 struct ieee80211_chanctx_conf *ctx) 4220 { 4221 struct iwl_mvm_ftm_responder_iter_data data = { 4222 .responder = false, 4223 .ctx = ctx, 4224 }; 4225 4226 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 4227 IEEE80211_IFACE_ITER_NORMAL, 4228 iwl_mvm_ftm_responder_chanctx_iter, 4229 &data); 4230 return data.responder; 4231 } 4232 4233 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, 4234 struct ieee80211_chanctx_conf *ctx) 4235 { 4236 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4237 struct iwl_mvm_phy_ctxt *phy_ctxt; 4238 bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); 4239 struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; 4240 int ret; 4241 4242 lockdep_assert_held(&mvm->mutex); 4243 4244 IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); 4245 4246 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 4247 if (!phy_ctxt) { 4248 ret = -ENOSPC; 4249 goto out; 4250 } 4251 4252 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, 4253 ctx->rx_chains_static, 4254 ctx->rx_chains_dynamic); 4255 if (ret) { 4256 IWL_ERR(mvm, "Failed to add PHY context\n"); 4257 goto out; 4258 } 4259 4260 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); 4261 *phy_ctxt_id = phy_ctxt->id; 4262 out: 4263 return ret; 4264 } 4265 4266 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, 4267 struct ieee80211_chanctx_conf *ctx) 4268 { 4269 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4270 int ret; 4271 4272 mutex_lock(&mvm->mutex); 4273 ret = __iwl_mvm_add_chanctx(mvm, ctx); 4274 mutex_unlock(&mvm->mutex); 4275 4276 return ret; 4277 } 4278 4279 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, 4280 struct ieee80211_chanctx_conf *ctx) 4281 { 4282 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4283 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4284 4285 lockdep_assert_held(&mvm->mutex); 4286 4287 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); 4288 } 4289 4290 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, 4291 struct ieee80211_chanctx_conf *ctx) 4292 { 4293 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4294 4295 mutex_lock(&mvm->mutex); 4296 __iwl_mvm_remove_chanctx(mvm, ctx); 4297 mutex_unlock(&mvm->mutex); 4298 } 4299 4300 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, 4301 struct ieee80211_chanctx_conf *ctx, 4302 u32 changed) 4303 { 4304 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4305 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4306 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4307 bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); 4308 struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; 4309 4310 if (WARN_ONCE((phy_ctxt->ref > 1) && 4311 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | 4312 IEEE80211_CHANCTX_CHANGE_RX_CHAINS | 4313 IEEE80211_CHANCTX_CHANGE_RADAR | 4314 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), 4315 "Cannot change PHY. Ref=%d, changed=0x%X\n", 4316 phy_ctxt->ref, changed)) 4317 return; 4318 4319 mutex_lock(&mvm->mutex); 4320 4321 /* we are only changing the min_width, may be a noop */ 4322 if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { 4323 if (phy_ctxt->width == def->width) 4324 goto out_unlock; 4325 4326 /* we are just toggling between 20_NOHT and 20 */ 4327 if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && 4328 def->width <= NL80211_CHAN_WIDTH_20) 4329 goto out_unlock; 4330 } 4331 4332 iwl_mvm_bt_coex_vif_change(mvm); 4333 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, 4334 ctx->rx_chains_static, 4335 ctx->rx_chains_dynamic); 4336 4337 out_unlock: 4338 mutex_unlock(&mvm->mutex); 4339 } 4340 4341 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, 4342 struct ieee80211_vif *vif, 4343 struct ieee80211_chanctx_conf *ctx, 4344 bool switching_chanctx) 4345 { 4346 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4347 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4348 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4349 int ret; 4350 4351 lockdep_assert_held(&mvm->mutex); 4352 4353 mvmvif->phy_ctxt = phy_ctxt; 4354 4355 switch (vif->type) { 4356 case NL80211_IFTYPE_AP: 4357 /* only needed if we're switching chanctx (i.e. during CSA) */ 4358 if (switching_chanctx) { 4359 mvmvif->ap_ibss_active = true; 4360 break; 4361 } 4362 fallthrough; 4363 case NL80211_IFTYPE_ADHOC: 4364 /* 4365 * The AP binding flow is handled as part of the start_ap flow 4366 * (in bss_info_changed), similarly for IBSS. 4367 */ 4368 ret = 0; 4369 goto out; 4370 case NL80211_IFTYPE_STATION: 4371 mvmvif->csa_bcn_pending = false; 4372 break; 4373 case NL80211_IFTYPE_MONITOR: 4374 /* always disable PS when a monitor interface is active */ 4375 mvmvif->ps_disabled = true; 4376 break; 4377 default: 4378 ret = -EINVAL; 4379 goto out; 4380 } 4381 4382 ret = iwl_mvm_binding_add_vif(mvm, vif); 4383 if (ret) 4384 goto out; 4385 4386 /* 4387 * Power state must be updated before quotas, 4388 * otherwise fw will complain. 4389 */ 4390 iwl_mvm_power_update_mac(mvm); 4391 4392 /* Setting the quota at this stage is only required for monitor 4393 * interfaces. For the other types, the bss_info changed flow 4394 * will handle quota settings. 4395 */ 4396 if (vif->type == NL80211_IFTYPE_MONITOR) { 4397 mvmvif->monitor_active = true; 4398 ret = iwl_mvm_update_quotas(mvm, false, NULL); 4399 if (ret) 4400 goto out_remove_binding; 4401 4402 ret = iwl_mvm_add_snif_sta(mvm, vif); 4403 if (ret) 4404 goto out_remove_binding; 4405 4406 } 4407 4408 /* Handle binding during CSA */ 4409 if (vif->type == NL80211_IFTYPE_AP) { 4410 iwl_mvm_update_quotas(mvm, false, NULL); 4411 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 4412 } 4413 4414 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { 4415 mvmvif->csa_bcn_pending = true; 4416 4417 if (!fw_has_capa(&mvm->fw->ucode_capa, 4418 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 4419 u32 duration = 3 * vif->bss_conf.beacon_int; 4420 4421 /* Protect the session to make sure we hear the first 4422 * beacon on the new channel. 4423 */ 4424 iwl_mvm_protect_session(mvm, vif, duration, duration, 4425 vif->bss_conf.beacon_int / 2, 4426 true); 4427 } 4428 4429 iwl_mvm_update_quotas(mvm, false, NULL); 4430 } 4431 4432 goto out; 4433 4434 out_remove_binding: 4435 iwl_mvm_binding_remove_vif(mvm, vif); 4436 iwl_mvm_power_update_mac(mvm); 4437 out: 4438 if (ret) 4439 mvmvif->phy_ctxt = NULL; 4440 return ret; 4441 } 4442 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, 4443 struct ieee80211_vif *vif, 4444 struct ieee80211_bss_conf *link_conf, 4445 struct ieee80211_chanctx_conf *ctx) 4446 { 4447 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4448 int ret; 4449 4450 mutex_lock(&mvm->mutex); 4451 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); 4452 mutex_unlock(&mvm->mutex); 4453 4454 return ret; 4455 } 4456 4457 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, 4458 struct ieee80211_vif *vif, 4459 struct ieee80211_chanctx_conf *ctx, 4460 bool switching_chanctx) 4461 { 4462 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4463 struct ieee80211_vif *disabled_vif = NULL; 4464 4465 lockdep_assert_held(&mvm->mutex); 4466 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 4467 4468 switch (vif->type) { 4469 case NL80211_IFTYPE_ADHOC: 4470 goto out; 4471 case NL80211_IFTYPE_MONITOR: 4472 mvmvif->monitor_active = false; 4473 mvmvif->ps_disabled = false; 4474 iwl_mvm_rm_snif_sta(mvm, vif); 4475 break; 4476 case NL80211_IFTYPE_AP: 4477 /* This part is triggered only during CSA */ 4478 if (!switching_chanctx || !mvmvif->ap_ibss_active) 4479 goto out; 4480 4481 mvmvif->csa_countdown = false; 4482 4483 /* Set CS bit on all the stations */ 4484 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); 4485 4486 /* Save blocked iface, the timeout is set on the next beacon */ 4487 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); 4488 4489 mvmvif->ap_ibss_active = false; 4490 break; 4491 case NL80211_IFTYPE_STATION: 4492 if (!switching_chanctx) 4493 break; 4494 4495 disabled_vif = vif; 4496 4497 if (!fw_has_capa(&mvm->fw->ucode_capa, 4498 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) 4499 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); 4500 break; 4501 default: 4502 break; 4503 } 4504 4505 iwl_mvm_update_quotas(mvm, false, disabled_vif); 4506 iwl_mvm_binding_remove_vif(mvm, vif); 4507 4508 out: 4509 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && 4510 switching_chanctx) 4511 return; 4512 mvmvif->phy_ctxt = NULL; 4513 iwl_mvm_power_update_mac(mvm); 4514 } 4515 4516 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, 4517 struct ieee80211_vif *vif, 4518 struct ieee80211_bss_conf *link_conf, 4519 struct ieee80211_chanctx_conf *ctx) 4520 { 4521 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4522 4523 mutex_lock(&mvm->mutex); 4524 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); 4525 mutex_unlock(&mvm->mutex); 4526 } 4527 4528 static int 4529 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, 4530 struct ieee80211_vif_chanctx_switch *vifs) 4531 { 4532 int ret; 4533 4534 mutex_lock(&mvm->mutex); 4535 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4536 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); 4537 4538 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); 4539 if (ret) { 4540 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); 4541 goto out_reassign; 4542 } 4543 4544 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4545 true); 4546 if (ret) { 4547 IWL_ERR(mvm, 4548 "failed to assign new_ctx during channel switch\n"); 4549 goto out_remove; 4550 } 4551 4552 /* we don't support TDLS during DCM - can be caused by channel switch */ 4553 if (iwl_mvm_phy_ctx_count(mvm) > 1) 4554 iwl_mvm_teardown_tdls_peers(mvm); 4555 4556 goto out; 4557 4558 out_remove: 4559 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); 4560 4561 out_reassign: 4562 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { 4563 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); 4564 goto out_restart; 4565 } 4566 4567 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4568 true)) { 4569 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4570 goto out_restart; 4571 } 4572 4573 goto out; 4574 4575 out_restart: 4576 /* things keep failing, better restart the hw */ 4577 iwl_mvm_nic_restart(mvm, false); 4578 4579 out: 4580 mutex_unlock(&mvm->mutex); 4581 4582 return ret; 4583 } 4584 4585 static int 4586 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, 4587 struct ieee80211_vif_chanctx_switch *vifs) 4588 { 4589 int ret; 4590 4591 mutex_lock(&mvm->mutex); 4592 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4593 4594 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4595 true); 4596 if (ret) { 4597 IWL_ERR(mvm, 4598 "failed to assign new_ctx during channel switch\n"); 4599 goto out_reassign; 4600 } 4601 4602 goto out; 4603 4604 out_reassign: 4605 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4606 true)) { 4607 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4608 goto out_restart; 4609 } 4610 4611 goto out; 4612 4613 out_restart: 4614 /* things keep failing, better restart the hw */ 4615 iwl_mvm_nic_restart(mvm, false); 4616 4617 out: 4618 mutex_unlock(&mvm->mutex); 4619 4620 return ret; 4621 } 4622 4623 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, 4624 struct ieee80211_vif_chanctx_switch *vifs, 4625 int n_vifs, 4626 enum ieee80211_chanctx_switch_mode mode) 4627 { 4628 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4629 int ret; 4630 4631 /* we only support a single-vif right now */ 4632 if (n_vifs > 1) 4633 return -EOPNOTSUPP; 4634 4635 switch (mode) { 4636 case CHANCTX_SWMODE_SWAP_CONTEXTS: 4637 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); 4638 break; 4639 case CHANCTX_SWMODE_REASSIGN_VIF: 4640 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); 4641 break; 4642 default: 4643 ret = -EOPNOTSUPP; 4644 break; 4645 } 4646 4647 return ret; 4648 } 4649 4650 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) 4651 { 4652 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4653 4654 return mvm->ibss_manager; 4655 } 4656 4657 static int iwl_mvm_set_tim(struct ieee80211_hw *hw, 4658 struct ieee80211_sta *sta, 4659 bool set) 4660 { 4661 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4662 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4663 4664 if (!mvm_sta || !mvm_sta->vif) { 4665 IWL_ERR(mvm, "Station is not associated to a vif\n"); 4666 return -EINVAL; 4667 } 4668 4669 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 4670 } 4671 4672 #ifdef CONFIG_NL80211_TESTMODE 4673 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { 4674 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, 4675 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, 4676 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, 4677 }; 4678 4679 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, 4680 struct ieee80211_vif *vif, 4681 void *data, int len) 4682 { 4683 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; 4684 int err; 4685 u32 noa_duration; 4686 4687 err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, 4688 iwl_mvm_tm_policy, NULL); 4689 if (err) 4690 return err; 4691 4692 if (!tb[IWL_MVM_TM_ATTR_CMD]) 4693 return -EINVAL; 4694 4695 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { 4696 case IWL_MVM_TM_CMD_SET_NOA: 4697 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || 4698 !vif->bss_conf.enable_beacon || 4699 !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) 4700 return -EINVAL; 4701 4702 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); 4703 if (noa_duration >= vif->bss_conf.beacon_int) 4704 return -EINVAL; 4705 4706 mvm->noa_duration = noa_duration; 4707 mvm->noa_vif = vif; 4708 4709 return iwl_mvm_update_quotas(mvm, true, NULL); 4710 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 4711 /* must be associated client vif - ignore authorized */ 4712 if (!vif || vif->type != NL80211_IFTYPE_STATION || 4713 !vif->cfg.assoc || !vif->bss_conf.dtim_period || 4714 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) 4715 return -EINVAL; 4716 4717 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 4718 return iwl_mvm_enable_beacon_filter(mvm, vif, 0); 4719 return iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4720 } 4721 4722 return -EOPNOTSUPP; 4723 } 4724 4725 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, 4726 struct ieee80211_vif *vif, 4727 void *data, int len) 4728 { 4729 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4730 int err; 4731 4732 mutex_lock(&mvm->mutex); 4733 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); 4734 mutex_unlock(&mvm->mutex); 4735 4736 return err; 4737 } 4738 #endif 4739 4740 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, 4741 struct ieee80211_vif *vif, 4742 struct ieee80211_channel_switch *chsw) 4743 { 4744 /* By implementing this operation, we prevent mac80211 from 4745 * starting its own channel switch timer, so that we can call 4746 * ieee80211_chswitch_done() ourselves at the right time 4747 * (which is when the absence time event starts). 4748 */ 4749 4750 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), 4751 "dummy channel switch op\n"); 4752 } 4753 4754 static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, 4755 struct ieee80211_vif *vif, 4756 struct ieee80211_channel_switch *chsw) 4757 { 4758 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4759 struct iwl_chan_switch_te_cmd cmd = { 4760 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 4761 mvmvif->color)), 4762 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 4763 .tsf = cpu_to_le32(chsw->timestamp), 4764 .cs_count = chsw->count, 4765 .cs_mode = chsw->block_tx, 4766 }; 4767 4768 lockdep_assert_held(&mvm->mutex); 4769 4770 if (chsw->delay) 4771 cmd.cs_delayed_bcn_count = 4772 DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); 4773 4774 return iwl_mvm_send_cmd_pdu(mvm, 4775 WIDE_ID(MAC_CONF_GROUP, 4776 CHANNEL_SWITCH_TIME_EVENT_CMD), 4777 0, sizeof(cmd), &cmd); 4778 } 4779 4780 static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, 4781 struct ieee80211_vif *vif, 4782 struct ieee80211_channel_switch *chsw) 4783 { 4784 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4785 u32 apply_time; 4786 4787 /* Schedule the time event to a bit before beacon 1, 4788 * to make sure we're in the new channel when the 4789 * GO/AP arrives. In case count <= 1 immediately schedule the 4790 * TE (this might result with some packet loss or connection 4791 * loss). 4792 */ 4793 if (chsw->count <= 1) 4794 apply_time = 0; 4795 else 4796 apply_time = chsw->device_timestamp + 4797 ((vif->bss_conf.beacon_int * (chsw->count - 1) - 4798 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); 4799 4800 if (chsw->block_tx) 4801 iwl_mvm_csa_client_absent(mvm, vif); 4802 4803 if (mvmvif->bf_data.bf_enabled) { 4804 int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4805 4806 if (ret) 4807 return ret; 4808 } 4809 4810 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, 4811 apply_time); 4812 4813 return 0; 4814 } 4815 4816 #define IWL_MAX_CSA_BLOCK_TX 1500 4817 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, 4818 struct ieee80211_vif *vif, 4819 struct ieee80211_channel_switch *chsw) 4820 { 4821 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4822 struct ieee80211_vif *csa_vif; 4823 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4824 int ret; 4825 4826 mutex_lock(&mvm->mutex); 4827 4828 mvmvif->csa_failed = false; 4829 4830 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", 4831 chsw->chandef.center_freq1); 4832 4833 iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, 4834 ieee80211_vif_to_wdev(vif), 4835 FW_DBG_TRIGGER_CHANNEL_SWITCH); 4836 4837 switch (vif->type) { 4838 case NL80211_IFTYPE_AP: 4839 csa_vif = 4840 rcu_dereference_protected(mvm->csa_vif, 4841 lockdep_is_held(&mvm->mutex)); 4842 if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active, 4843 "Another CSA is already in progress")) { 4844 ret = -EBUSY; 4845 goto out_unlock; 4846 } 4847 4848 /* we still didn't unblock tx. prevent new CS meanwhile */ 4849 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, 4850 lockdep_is_held(&mvm->mutex))) { 4851 ret = -EBUSY; 4852 goto out_unlock; 4853 } 4854 4855 rcu_assign_pointer(mvm->csa_vif, vif); 4856 4857 if (WARN_ONCE(mvmvif->csa_countdown, 4858 "Previous CSA countdown didn't complete")) { 4859 ret = -EBUSY; 4860 goto out_unlock; 4861 } 4862 4863 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; 4864 4865 break; 4866 case NL80211_IFTYPE_STATION: 4867 /* 4868 * In the new flow FW is in charge of timing the switch so there 4869 * is no need for all of this 4870 */ 4871 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 4872 CHANNEL_SWITCH_ERROR_NOTIF, 4873 0)) 4874 break; 4875 4876 /* 4877 * We haven't configured the firmware to be associated yet since 4878 * we don't know the dtim period. In this case, the firmware can't 4879 * track the beacons. 4880 */ 4881 if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) { 4882 ret = -EBUSY; 4883 goto out_unlock; 4884 } 4885 4886 if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) 4887 schedule_delayed_work(&mvmvif->csa_work, 0); 4888 4889 if (chsw->block_tx) { 4890 /* 4891 * In case of undetermined / long time with immediate 4892 * quiet monitor status to gracefully disconnect 4893 */ 4894 if (!chsw->count || 4895 chsw->count * vif->bss_conf.beacon_int > 4896 IWL_MAX_CSA_BLOCK_TX) 4897 schedule_delayed_work(&mvmvif->csa_work, 4898 msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); 4899 } 4900 4901 if (!fw_has_capa(&mvm->fw->ucode_capa, 4902 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 4903 ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); 4904 if (ret) 4905 goto out_unlock; 4906 } else { 4907 iwl_mvm_schedule_client_csa(mvm, vif, chsw); 4908 } 4909 4910 mvmvif->csa_count = chsw->count; 4911 mvmvif->csa_misbehave = false; 4912 break; 4913 default: 4914 break; 4915 } 4916 4917 mvmvif->ps_disabled = true; 4918 4919 ret = iwl_mvm_power_update_ps(mvm); 4920 if (ret) 4921 goto out_unlock; 4922 4923 /* we won't be on this channel any longer */ 4924 iwl_mvm_teardown_tdls_peers(mvm); 4925 4926 out_unlock: 4927 mutex_unlock(&mvm->mutex); 4928 4929 return ret; 4930 } 4931 4932 static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, 4933 struct ieee80211_vif *vif, 4934 struct ieee80211_channel_switch *chsw) 4935 { 4936 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4937 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4938 struct iwl_chan_switch_te_cmd cmd = { 4939 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 4940 mvmvif->color)), 4941 .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), 4942 .tsf = cpu_to_le32(chsw->timestamp), 4943 .cs_count = chsw->count, 4944 .cs_mode = chsw->block_tx, 4945 }; 4946 4947 /* 4948 * In the new flow FW is in charge of timing the switch so there is no 4949 * need for all of this 4950 */ 4951 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 4952 CHANNEL_SWITCH_ERROR_NOTIF, 0)) 4953 return; 4954 4955 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) 4956 return; 4957 4958 IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n", 4959 mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx); 4960 4961 if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { 4962 if (mvmvif->csa_misbehave) { 4963 /* Second time, give up on this AP*/ 4964 iwl_mvm_abort_channel_switch(hw, vif); 4965 ieee80211_chswitch_done(vif, false); 4966 mvmvif->csa_misbehave = false; 4967 return; 4968 } 4969 mvmvif->csa_misbehave = true; 4970 } 4971 mvmvif->csa_count = chsw->count; 4972 4973 mutex_lock(&mvm->mutex); 4974 if (mvmvif->csa_failed) 4975 goto out_unlock; 4976 4977 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, 4978 WIDE_ID(MAC_CONF_GROUP, 4979 CHANNEL_SWITCH_TIME_EVENT_CMD), 4980 0, sizeof(cmd), &cmd)); 4981 out_unlock: 4982 mutex_unlock(&mvm->mutex); 4983 } 4984 4985 static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) 4986 { 4987 int i; 4988 4989 if (!iwl_mvm_has_new_tx_api(mvm)) { 4990 if (drop) { 4991 mutex_lock(&mvm->mutex); 4992 iwl_mvm_flush_tx_path(mvm, 4993 iwl_mvm_flushable_queues(mvm) & queues); 4994 mutex_unlock(&mvm->mutex); 4995 } else { 4996 iwl_trans_wait_tx_queues_empty(mvm->trans, queues); 4997 } 4998 return; 4999 } 5000 5001 mutex_lock(&mvm->mutex); 5002 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 5003 struct ieee80211_sta *sta; 5004 5005 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 5006 lockdep_is_held(&mvm->mutex)); 5007 if (IS_ERR_OR_NULL(sta)) 5008 continue; 5009 5010 if (drop) 5011 iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF); 5012 else 5013 iwl_mvm_wait_sta_queues_empty(mvm, 5014 iwl_mvm_sta_from_mac80211(sta)); 5015 } 5016 mutex_unlock(&mvm->mutex); 5017 } 5018 5019 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 5020 struct ieee80211_vif *vif, u32 queues, bool drop) 5021 { 5022 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5023 struct iwl_mvm_vif *mvmvif; 5024 struct iwl_mvm_sta *mvmsta; 5025 struct ieee80211_sta *sta; 5026 int i; 5027 u32 msk = 0; 5028 5029 if (!vif) { 5030 iwl_mvm_flush_no_vif(mvm, queues, drop); 5031 return; 5032 } 5033 5034 if (vif->type != NL80211_IFTYPE_STATION) 5035 return; 5036 5037 /* Make sure we're done with the deferred traffic before flushing */ 5038 flush_work(&mvm->add_stream_wk); 5039 5040 mutex_lock(&mvm->mutex); 5041 mvmvif = iwl_mvm_vif_from_mac80211(vif); 5042 5043 /* flush the AP-station and all TDLS peers */ 5044 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 5045 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 5046 lockdep_is_held(&mvm->mutex)); 5047 if (IS_ERR_OR_NULL(sta)) 5048 continue; 5049 5050 mvmsta = iwl_mvm_sta_from_mac80211(sta); 5051 if (mvmsta->vif != vif) 5052 continue; 5053 5054 /* make sure only TDLS peers or the AP are flushed */ 5055 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); 5056 5057 if (drop) { 5058 if (iwl_mvm_flush_sta(mvm, mvmsta, false)) 5059 IWL_ERR(mvm, "flush request fail\n"); 5060 } else { 5061 if (iwl_mvm_has_new_tx_api(mvm)) 5062 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); 5063 else /* only used for !iwl_mvm_has_new_tx_api() below */ 5064 msk |= mvmsta->tfd_queue_msk; 5065 } 5066 } 5067 5068 mutex_unlock(&mvm->mutex); 5069 5070 /* this can take a while, and we may need/want other operations 5071 * to succeed while doing this, so do it without the mutex held 5072 */ 5073 if (!drop && !iwl_mvm_has_new_tx_api(mvm)) 5074 iwl_trans_wait_tx_queues_empty(mvm->trans, msk); 5075 } 5076 5077 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, 5078 struct survey_info *survey) 5079 { 5080 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5081 int ret; 5082 5083 memset(survey, 0, sizeof(*survey)); 5084 5085 /* only support global statistics right now */ 5086 if (idx != 0) 5087 return -ENOENT; 5088 5089 if (!fw_has_capa(&mvm->fw->ucode_capa, 5090 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 5091 return -ENOENT; 5092 5093 mutex_lock(&mvm->mutex); 5094 5095 if (iwl_mvm_firmware_running(mvm)) { 5096 ret = iwl_mvm_request_statistics(mvm, false); 5097 if (ret) 5098 goto out; 5099 } 5100 5101 survey->filled = SURVEY_INFO_TIME | 5102 SURVEY_INFO_TIME_RX | 5103 SURVEY_INFO_TIME_TX | 5104 SURVEY_INFO_TIME_SCAN; 5105 survey->time = mvm->accu_radio_stats.on_time_rf + 5106 mvm->radio_stats.on_time_rf; 5107 do_div(survey->time, USEC_PER_MSEC); 5108 5109 survey->time_rx = mvm->accu_radio_stats.rx_time + 5110 mvm->radio_stats.rx_time; 5111 do_div(survey->time_rx, USEC_PER_MSEC); 5112 5113 survey->time_tx = mvm->accu_radio_stats.tx_time + 5114 mvm->radio_stats.tx_time; 5115 do_div(survey->time_tx, USEC_PER_MSEC); 5116 5117 survey->time_scan = mvm->accu_radio_stats.on_time_scan + 5118 mvm->radio_stats.on_time_scan; 5119 do_div(survey->time_scan, USEC_PER_MSEC); 5120 5121 ret = 0; 5122 out: 5123 mutex_unlock(&mvm->mutex); 5124 return ret; 5125 } 5126 5127 static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) 5128 { 5129 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; 5130 u32 gi_ltf; 5131 5132 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { 5133 case RATE_MCS_CHAN_WIDTH_20: 5134 rinfo->bw = RATE_INFO_BW_20; 5135 break; 5136 case RATE_MCS_CHAN_WIDTH_40: 5137 rinfo->bw = RATE_INFO_BW_40; 5138 break; 5139 case RATE_MCS_CHAN_WIDTH_80: 5140 rinfo->bw = RATE_INFO_BW_80; 5141 break; 5142 case RATE_MCS_CHAN_WIDTH_160: 5143 rinfo->bw = RATE_INFO_BW_160; 5144 break; 5145 case RATE_MCS_CHAN_WIDTH_320: 5146 rinfo->bw = RATE_INFO_BW_320; 5147 break; 5148 } 5149 5150 if (format == RATE_MCS_CCK_MSK || 5151 format == RATE_MCS_LEGACY_OFDM_MSK) { 5152 int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); 5153 5154 /* add the offset needed to get to the legacy ofdm indices */ 5155 if (format == RATE_MCS_LEGACY_OFDM_MSK) 5156 rate += IWL_FIRST_OFDM_RATE; 5157 5158 switch (rate) { 5159 case IWL_RATE_1M_INDEX: 5160 rinfo->legacy = 10; 5161 break; 5162 case IWL_RATE_2M_INDEX: 5163 rinfo->legacy = 20; 5164 break; 5165 case IWL_RATE_5M_INDEX: 5166 rinfo->legacy = 55; 5167 break; 5168 case IWL_RATE_11M_INDEX: 5169 rinfo->legacy = 110; 5170 break; 5171 case IWL_RATE_6M_INDEX: 5172 rinfo->legacy = 60; 5173 break; 5174 case IWL_RATE_9M_INDEX: 5175 rinfo->legacy = 90; 5176 break; 5177 case IWL_RATE_12M_INDEX: 5178 rinfo->legacy = 120; 5179 break; 5180 case IWL_RATE_18M_INDEX: 5181 rinfo->legacy = 180; 5182 break; 5183 case IWL_RATE_24M_INDEX: 5184 rinfo->legacy = 240; 5185 break; 5186 case IWL_RATE_36M_INDEX: 5187 rinfo->legacy = 360; 5188 break; 5189 case IWL_RATE_48M_INDEX: 5190 rinfo->legacy = 480; 5191 break; 5192 case IWL_RATE_54M_INDEX: 5193 rinfo->legacy = 540; 5194 } 5195 return; 5196 } 5197 5198 rinfo->nss = u32_get_bits(rate_n_flags, 5199 RATE_MCS_NSS_MSK) + 1; 5200 rinfo->mcs = format == RATE_MCS_HT_MSK ? 5201 RATE_HT_MCS_INDEX(rate_n_flags) : 5202 u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); 5203 5204 if (rate_n_flags & RATE_MCS_SGI_MSK) 5205 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 5206 5207 switch (format) { 5208 case RATE_MCS_EHT_MSK: 5209 /* TODO: GI/LTF/RU. How does the firmware encode them? */ 5210 rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS; 5211 break; 5212 case RATE_MCS_HE_MSK: 5213 gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); 5214 5215 rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; 5216 5217 if (rate_n_flags & RATE_MCS_HE_106T_MSK) { 5218 rinfo->bw = RATE_INFO_BW_HE_RU; 5219 rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; 5220 } 5221 5222 switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { 5223 case RATE_MCS_HE_TYPE_SU: 5224 case RATE_MCS_HE_TYPE_EXT_SU: 5225 if (gi_ltf == 0 || gi_ltf == 1) 5226 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5227 else if (gi_ltf == 2) 5228 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5229 else if (gi_ltf == 3) 5230 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5231 else 5232 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5233 break; 5234 case RATE_MCS_HE_TYPE_MU: 5235 if (gi_ltf == 0 || gi_ltf == 1) 5236 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5237 else if (gi_ltf == 2) 5238 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5239 else 5240 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5241 break; 5242 case RATE_MCS_HE_TYPE_TRIG: 5243 if (gi_ltf == 0 || gi_ltf == 1) 5244 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5245 else 5246 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5247 break; 5248 } 5249 5250 if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) 5251 rinfo->he_dcm = 1; 5252 break; 5253 case RATE_MCS_HT_MSK: 5254 rinfo->flags |= RATE_INFO_FLAGS_MCS; 5255 break; 5256 case RATE_MCS_VHT_MSK: 5257 rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; 5258 break; 5259 } 5260 } 5261 5262 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, 5263 struct ieee80211_vif *vif, 5264 struct ieee80211_sta *sta, 5265 struct station_info *sinfo) 5266 { 5267 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5268 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 5269 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 5270 5271 if (mvmsta->avg_energy) { 5272 sinfo->signal_avg = -(s8)mvmsta->avg_energy; 5273 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 5274 } 5275 5276 if (iwl_mvm_has_tlc_offload(mvm)) { 5277 struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; 5278 5279 iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); 5280 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 5281 } 5282 5283 /* if beacon filtering isn't on mac80211 does it anyway */ 5284 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 5285 return; 5286 5287 if (!vif->cfg.assoc) 5288 return; 5289 5290 mutex_lock(&mvm->mutex); 5291 5292 if (mvmvif->ap_sta_id != mvmsta->sta_id) 5293 goto unlock; 5294 5295 if (iwl_mvm_request_statistics(mvm, false)) 5296 goto unlock; 5297 5298 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + 5299 mvmvif->beacon_stats.accu_num_beacons; 5300 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); 5301 if (mvmvif->beacon_stats.avg_signal) { 5302 /* firmware only reports a value after RXing a few beacons */ 5303 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; 5304 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 5305 } 5306 unlock: 5307 mutex_unlock(&mvm->mutex); 5308 } 5309 5310 static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, 5311 struct ieee80211_vif *vif, 5312 const struct ieee80211_mlme_event *mlme) 5313 { 5314 if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && 5315 (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { 5316 iwl_dbg_tlv_time_point(&mvm->fwrt, 5317 IWL_FW_INI_TIME_POINT_ASSOC_FAILED, 5318 NULL); 5319 return; 5320 } 5321 5322 if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { 5323 iwl_dbg_tlv_time_point(&mvm->fwrt, 5324 IWL_FW_INI_TIME_POINT_DEASSOC, 5325 NULL); 5326 return; 5327 } 5328 } 5329 5330 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, 5331 struct ieee80211_vif *vif, 5332 const struct ieee80211_event *event) 5333 { 5334 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ 5335 do { \ 5336 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ 5337 break; \ 5338 iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ 5339 } while (0) 5340 5341 struct iwl_fw_dbg_trigger_tlv *trig; 5342 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 5343 5344 if (iwl_trans_dbg_ini_valid(mvm->trans)) { 5345 iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme); 5346 return; 5347 } 5348 5349 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 5350 FW_DBG_TRIGGER_MLME); 5351 if (!trig) 5352 return; 5353 5354 trig_mlme = (void *)trig->data; 5355 5356 if (event->u.mlme.data == ASSOC_EVENT) { 5357 if (event->u.mlme.status == MLME_DENIED) 5358 CHECK_MLME_TRIGGER(stop_assoc_denied, 5359 "DENIED ASSOC: reason %d", 5360 event->u.mlme.reason); 5361 else if (event->u.mlme.status == MLME_TIMEOUT) 5362 CHECK_MLME_TRIGGER(stop_assoc_timeout, 5363 "ASSOC TIMEOUT"); 5364 } else if (event->u.mlme.data == AUTH_EVENT) { 5365 if (event->u.mlme.status == MLME_DENIED) 5366 CHECK_MLME_TRIGGER(stop_auth_denied, 5367 "DENIED AUTH: reason %d", 5368 event->u.mlme.reason); 5369 else if (event->u.mlme.status == MLME_TIMEOUT) 5370 CHECK_MLME_TRIGGER(stop_auth_timeout, 5371 "AUTH TIMEOUT"); 5372 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { 5373 CHECK_MLME_TRIGGER(stop_rx_deauth, 5374 "DEAUTH RX %d", event->u.mlme.reason); 5375 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { 5376 CHECK_MLME_TRIGGER(stop_tx_deauth, 5377 "DEAUTH TX %d", event->u.mlme.reason); 5378 } 5379 #undef CHECK_MLME_TRIGGER 5380 } 5381 5382 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, 5383 struct ieee80211_vif *vif, 5384 const struct ieee80211_event *event) 5385 { 5386 struct iwl_fw_dbg_trigger_tlv *trig; 5387 struct iwl_fw_dbg_trigger_ba *ba_trig; 5388 5389 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 5390 FW_DBG_TRIGGER_BA); 5391 if (!trig) 5392 return; 5393 5394 ba_trig = (void *)trig->data; 5395 5396 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) 5397 return; 5398 5399 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 5400 "BAR received from %pM, tid %d, ssn %d", 5401 event->u.ba.sta->addr, event->u.ba.tid, 5402 event->u.ba.ssn); 5403 } 5404 5405 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, 5406 struct ieee80211_vif *vif, 5407 const struct ieee80211_event *event) 5408 { 5409 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5410 5411 switch (event->type) { 5412 case MLME_EVENT: 5413 iwl_mvm_event_mlme_callback(mvm, vif, event); 5414 break; 5415 case BAR_RX_EVENT: 5416 iwl_mvm_event_bar_rx_callback(mvm, vif, event); 5417 break; 5418 case BA_FRAME_TIMEOUT: 5419 iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, 5420 event->u.ba.tid); 5421 break; 5422 default: 5423 break; 5424 } 5425 } 5426 5427 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, 5428 enum iwl_mvm_rxq_notif_type type, 5429 bool sync, 5430 const void *data, u32 size) 5431 { 5432 struct { 5433 struct iwl_rxq_sync_cmd cmd; 5434 struct iwl_mvm_internal_rxq_notif notif; 5435 } __packed cmd = { 5436 .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), 5437 .cmd.count = 5438 cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + 5439 size), 5440 .notif.type = type, 5441 .notif.sync = sync, 5442 }; 5443 struct iwl_host_cmd hcmd = { 5444 .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), 5445 .data[0] = &cmd, 5446 .len[0] = sizeof(cmd), 5447 .data[1] = data, 5448 .len[1] = size, 5449 .flags = sync ? 0 : CMD_ASYNC, 5450 }; 5451 int ret; 5452 5453 /* size must be a multiple of DWORD */ 5454 if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) 5455 return; 5456 5457 if (!iwl_mvm_has_new_rx_api(mvm)) 5458 return; 5459 5460 if (sync) { 5461 cmd.notif.cookie = mvm->queue_sync_cookie; 5462 mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; 5463 } 5464 5465 ret = iwl_mvm_send_cmd(mvm, &hcmd); 5466 if (ret) { 5467 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 5468 goto out; 5469 } 5470 5471 if (sync) { 5472 lockdep_assert_held(&mvm->mutex); 5473 ret = wait_event_timeout(mvm->rx_sync_waitq, 5474 READ_ONCE(mvm->queue_sync_state) == 0 || 5475 iwl_mvm_is_radio_killed(mvm), 5476 HZ); 5477 WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), 5478 "queue sync: failed to sync, state is 0x%lx\n", 5479 mvm->queue_sync_state); 5480 } 5481 5482 out: 5483 if (sync) { 5484 mvm->queue_sync_state = 0; 5485 mvm->queue_sync_cookie++; 5486 } 5487 } 5488 5489 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) 5490 { 5491 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5492 5493 mutex_lock(&mvm->mutex); 5494 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); 5495 mutex_unlock(&mvm->mutex); 5496 } 5497 5498 static int 5499 iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, 5500 struct ieee80211_vif *vif, 5501 struct cfg80211_ftm_responder_stats *stats) 5502 { 5503 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5504 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 5505 5506 if (vif->p2p || vif->type != NL80211_IFTYPE_AP || 5507 !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) 5508 return -EINVAL; 5509 5510 mutex_lock(&mvm->mutex); 5511 *stats = mvm->ftm_resp_stats; 5512 mutex_unlock(&mvm->mutex); 5513 5514 stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | 5515 BIT(NL80211_FTM_STATS_PARTIAL_NUM) | 5516 BIT(NL80211_FTM_STATS_FAILED_NUM) | 5517 BIT(NL80211_FTM_STATS_ASAP_NUM) | 5518 BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | 5519 BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | 5520 BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | 5521 BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | 5522 BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); 5523 5524 return 0; 5525 } 5526 5527 static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, 5528 struct ieee80211_vif *vif, 5529 struct cfg80211_pmsr_request *request) 5530 { 5531 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5532 int ret; 5533 5534 mutex_lock(&mvm->mutex); 5535 ret = iwl_mvm_ftm_start(mvm, vif, request); 5536 mutex_unlock(&mvm->mutex); 5537 5538 return ret; 5539 } 5540 5541 static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, 5542 struct ieee80211_vif *vif, 5543 struct cfg80211_pmsr_request *request) 5544 { 5545 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5546 5547 mutex_lock(&mvm->mutex); 5548 iwl_mvm_ftm_abort(mvm, request); 5549 mutex_unlock(&mvm->mutex); 5550 } 5551 5552 static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) 5553 { 5554 u8 protocol = ip_hdr(skb)->protocol; 5555 5556 if (!IS_ENABLED(CONFIG_INET)) 5557 return false; 5558 5559 return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; 5560 } 5561 5562 static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, 5563 struct sk_buff *head, 5564 struct sk_buff *skb) 5565 { 5566 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5567 5568 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 5569 return iwl_mvm_tx_csum_bz(mvm, head, true) == 5570 iwl_mvm_tx_csum_bz(mvm, skb, true); 5571 5572 /* For now don't aggregate IPv6 in AMSDU */ 5573 if (skb->protocol != htons(ETH_P_IP)) 5574 return false; 5575 5576 if (!iwl_mvm_is_csum_supported(mvm)) 5577 return true; 5578 5579 return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); 5580 } 5581 5582 const struct ieee80211_ops iwl_mvm_hw_ops = { 5583 .tx = iwl_mvm_mac_tx, 5584 .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, 5585 .ampdu_action = iwl_mvm_mac_ampdu_action, 5586 .get_antenna = iwl_mvm_op_get_antenna, 5587 .start = iwl_mvm_mac_start, 5588 .reconfig_complete = iwl_mvm_mac_reconfig_complete, 5589 .stop = iwl_mvm_mac_stop, 5590 .add_interface = iwl_mvm_mac_add_interface, 5591 .remove_interface = iwl_mvm_mac_remove_interface, 5592 .config = iwl_mvm_mac_config, 5593 .prepare_multicast = iwl_mvm_prepare_multicast, 5594 .configure_filter = iwl_mvm_configure_filter, 5595 .config_iface_filter = iwl_mvm_config_iface_filter, 5596 .bss_info_changed = iwl_mvm_bss_info_changed, 5597 .hw_scan = iwl_mvm_mac_hw_scan, 5598 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, 5599 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, 5600 .sta_state = iwl_mvm_mac_sta_state, 5601 .sta_notify = iwl_mvm_mac_sta_notify, 5602 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, 5603 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, 5604 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 5605 .sta_rc_update = iwl_mvm_sta_rc_update, 5606 .conf_tx = iwl_mvm_mac_conf_tx, 5607 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 5608 .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, 5609 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, 5610 .flush = iwl_mvm_mac_flush, 5611 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 5612 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 5613 .set_key = iwl_mvm_mac_set_key, 5614 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 5615 .remain_on_channel = iwl_mvm_roc, 5616 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 5617 .add_chanctx = iwl_mvm_add_chanctx, 5618 .remove_chanctx = iwl_mvm_remove_chanctx, 5619 .change_chanctx = iwl_mvm_change_chanctx, 5620 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, 5621 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, 5622 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, 5623 5624 .start_ap = iwl_mvm_start_ap, 5625 .stop_ap = iwl_mvm_stop_ap, 5626 .join_ibss = iwl_mvm_start_ibss, 5627 .leave_ibss = iwl_mvm_stop_ibss, 5628 5629 .tx_last_beacon = iwl_mvm_tx_last_beacon, 5630 5631 .set_tim = iwl_mvm_set_tim, 5632 5633 .channel_switch = iwl_mvm_channel_switch, 5634 .pre_channel_switch = iwl_mvm_pre_channel_switch, 5635 .post_channel_switch = iwl_mvm_post_channel_switch, 5636 .abort_channel_switch = iwl_mvm_abort_channel_switch, 5637 .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, 5638 5639 .tdls_channel_switch = iwl_mvm_tdls_channel_switch, 5640 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, 5641 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, 5642 5643 .event_callback = iwl_mvm_mac_event_callback, 5644 5645 .sync_rx_queues = iwl_mvm_sync_rx_queues, 5646 5647 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 5648 5649 #ifdef CONFIG_PM_SLEEP 5650 /* look at d3.c */ 5651 .suspend = iwl_mvm_suspend, 5652 .resume = iwl_mvm_resume, 5653 .set_wakeup = iwl_mvm_set_wakeup, 5654 .set_rekey_data = iwl_mvm_set_rekey_data, 5655 #if IS_ENABLED(CONFIG_IPV6) 5656 .ipv6_addr_change = iwl_mvm_ipv6_addr_change, 5657 #endif 5658 .set_default_unicast_key = iwl_mvm_set_default_unicast_key, 5659 #endif 5660 .get_survey = iwl_mvm_mac_get_survey, 5661 .sta_statistics = iwl_mvm_mac_sta_statistics, 5662 .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, 5663 .start_pmsr = iwl_mvm_start_pmsr, 5664 .abort_pmsr = iwl_mvm_abort_pmsr, 5665 5666 .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, 5667 #ifdef CONFIG_IWLWIFI_DEBUGFS 5668 .sta_add_debugfs = iwl_mvm_sta_add_debugfs, 5669 #endif 5670 }; 5671