1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #include <linux/kernel.h> 68 #include <linux/slab.h> 69 #include <linux/skbuff.h> 70 #include <linux/netdevice.h> 71 #include <linux/etherdevice.h> 72 #include <linux/ip.h> 73 #include <linux/if_arp.h> 74 #include <linux/time.h> 75 #include <net/mac80211.h> 76 #include <net/ieee80211_radiotap.h> 77 #include <net/tcp.h> 78 79 #include "iwl-op-mode.h" 80 #include "iwl-io.h" 81 #include "mvm.h" 82 #include "sta.h" 83 #include "time-event.h" 84 #include "iwl-eeprom-parse.h" 85 #include "iwl-phy-db.h" 86 #include "testmode.h" 87 #include "fw/error-dump.h" 88 #include "iwl-prph.h" 89 #include "iwl-nvm-parse.h" 90 91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 92 { 93 .max = 1, 94 .types = BIT(NL80211_IFTYPE_STATION), 95 }, 96 { 97 .max = 1, 98 .types = BIT(NL80211_IFTYPE_AP) | 99 BIT(NL80211_IFTYPE_P2P_CLIENT) | 100 BIT(NL80211_IFTYPE_P2P_GO), 101 }, 102 { 103 .max = 1, 104 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 105 }, 106 }; 107 108 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { 109 { 110 .num_different_channels = 2, 111 .max_interfaces = 3, 112 .limits = iwl_mvm_limits, 113 .n_limits = ARRAY_SIZE(iwl_mvm_limits), 114 }, 115 }; 116 117 #ifdef CONFIG_PM_SLEEP 118 static const struct nl80211_wowlan_tcp_data_token_feature 119 iwl_mvm_wowlan_tcp_token_feature = { 120 .min_len = 0, 121 .max_len = 255, 122 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS, 123 }; 124 125 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = { 126 .tok = &iwl_mvm_wowlan_tcp_token_feature, 127 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN - 128 sizeof(struct ethhdr) - 129 sizeof(struct iphdr) - 130 sizeof(struct tcphdr), 131 .data_interval_max = 65535, /* __le16 in API */ 132 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN - 133 sizeof(struct ethhdr) - 134 sizeof(struct iphdr) - 135 sizeof(struct tcphdr), 136 .seq = true, 137 }; 138 #endif 139 140 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 141 /* 142 * Use the reserved field to indicate magic values. 143 * these values will only be used internally by the driver, 144 * and won't make it to the fw (reserved will be 0). 145 * BC_FILTER_MAGIC_IP - configure the val of this attribute to 146 * be the vif's ip address. in case there is not a single 147 * ip address (0, or more than 1), this attribute will 148 * be skipped. 149 * BC_FILTER_MAGIC_MAC - set the val of this attribute to 150 * the LSB bytes of the vif's mac address 151 */ 152 enum { 153 BC_FILTER_MAGIC_NONE = 0, 154 BC_FILTER_MAGIC_IP, 155 BC_FILTER_MAGIC_MAC, 156 }; 157 158 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { 159 { 160 /* arp */ 161 .discard = 0, 162 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, 163 .attrs = { 164 { 165 /* frame type - arp, hw type - ethernet */ 166 .offset_type = 167 BCAST_FILTER_OFFSET_PAYLOAD_START, 168 .offset = sizeof(rfc1042_header), 169 .val = cpu_to_be32(0x08060001), 170 .mask = cpu_to_be32(0xffffffff), 171 }, 172 { 173 /* arp dest ip */ 174 .offset_type = 175 BCAST_FILTER_OFFSET_PAYLOAD_START, 176 .offset = sizeof(rfc1042_header) + 2 + 177 sizeof(struct arphdr) + 178 ETH_ALEN + sizeof(__be32) + 179 ETH_ALEN, 180 .mask = cpu_to_be32(0xffffffff), 181 /* mark it as special field */ 182 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), 183 }, 184 }, 185 }, 186 { 187 /* dhcp offer bcast */ 188 .discard = 0, 189 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, 190 .attrs = { 191 { 192 /* udp dest port - 68 (bootp client)*/ 193 .offset_type = BCAST_FILTER_OFFSET_IP_END, 194 .offset = offsetof(struct udphdr, dest), 195 .val = cpu_to_be32(0x00440000), 196 .mask = cpu_to_be32(0xffff0000), 197 }, 198 { 199 /* dhcp - lsb bytes of client hw address */ 200 .offset_type = BCAST_FILTER_OFFSET_IP_END, 201 .offset = 38, 202 .mask = cpu_to_be32(0xffffffff), 203 /* mark it as special field */ 204 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), 205 }, 206 }, 207 }, 208 /* last filter must be empty */ 209 {}, 210 }; 211 #endif 212 213 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 214 { 215 if (!iwl_mvm_is_d0i3_supported(mvm)) 216 return; 217 218 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type); 219 spin_lock_bh(&mvm->refs_lock); 220 mvm->refs[ref_type]++; 221 spin_unlock_bh(&mvm->refs_lock); 222 iwl_trans_ref(mvm->trans); 223 } 224 225 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 226 { 227 if (!iwl_mvm_is_d0i3_supported(mvm)) 228 return; 229 230 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); 231 spin_lock_bh(&mvm->refs_lock); 232 if (WARN_ON(!mvm->refs[ref_type])) { 233 spin_unlock_bh(&mvm->refs_lock); 234 return; 235 } 236 mvm->refs[ref_type]--; 237 spin_unlock_bh(&mvm->refs_lock); 238 iwl_trans_unref(mvm->trans); 239 } 240 241 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, 242 enum iwl_mvm_ref_type except_ref) 243 { 244 int i, j; 245 246 if (!iwl_mvm_is_d0i3_supported(mvm)) 247 return; 248 249 spin_lock_bh(&mvm->refs_lock); 250 for (i = 0; i < IWL_MVM_REF_COUNT; i++) { 251 if (except_ref == i || !mvm->refs[i]) 252 continue; 253 254 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n", 255 i, mvm->refs[i]); 256 for (j = 0; j < mvm->refs[i]; j++) 257 iwl_trans_unref(mvm->trans); 258 mvm->refs[i] = 0; 259 } 260 spin_unlock_bh(&mvm->refs_lock); 261 } 262 263 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) 264 { 265 int i; 266 bool taken = false; 267 268 if (!iwl_mvm_is_d0i3_supported(mvm)) 269 return true; 270 271 spin_lock_bh(&mvm->refs_lock); 272 for (i = 0; i < IWL_MVM_REF_COUNT; i++) { 273 if (mvm->refs[i]) { 274 taken = true; 275 break; 276 } 277 } 278 spin_unlock_bh(&mvm->refs_lock); 279 280 return taken; 281 } 282 283 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 284 { 285 iwl_mvm_ref(mvm, ref_type); 286 287 if (!wait_event_timeout(mvm->d0i3_exit_waitq, 288 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), 289 HZ)) { 290 WARN_ON_ONCE(1); 291 iwl_mvm_unref(mvm, ref_type); 292 return -EIO; 293 } 294 295 return 0; 296 } 297 298 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) 299 { 300 int i; 301 302 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); 303 for (i = 0; i < NUM_PHY_CTX; i++) { 304 mvm->phy_ctxts[i].id = i; 305 mvm->phy_ctxts[i].ref = 0; 306 } 307 } 308 309 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 310 const char *alpha2, 311 enum iwl_mcc_source src_id, 312 bool *changed) 313 { 314 struct ieee80211_regdomain *regd = NULL; 315 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 316 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 317 struct iwl_mcc_update_resp *resp; 318 319 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); 320 321 lockdep_assert_held(&mvm->mutex); 322 323 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); 324 if (IS_ERR_OR_NULL(resp)) { 325 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", 326 PTR_ERR_OR_ZERO(resp)); 327 goto out; 328 } 329 330 if (changed) 331 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); 332 333 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 334 __le32_to_cpu(resp->n_channels), 335 resp->channels, 336 __le16_to_cpu(resp->mcc)); 337 /* Store the return source id */ 338 src_id = resp->source_id; 339 kfree(resp); 340 if (IS_ERR_OR_NULL(regd)) { 341 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", 342 PTR_ERR_OR_ZERO(regd)); 343 goto out; 344 } 345 346 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", 347 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); 348 mvm->lar_regdom_set = true; 349 mvm->mcc_src = src_id; 350 351 out: 352 return regd; 353 } 354 355 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) 356 { 357 bool changed; 358 struct ieee80211_regdomain *regd; 359 360 if (!iwl_mvm_is_lar_supported(mvm)) 361 return; 362 363 regd = iwl_mvm_get_current_regdomain(mvm, &changed); 364 if (!IS_ERR_OR_NULL(regd)) { 365 /* only update the regulatory core if changed */ 366 if (changed) 367 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 368 369 kfree(regd); 370 } 371 } 372 373 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 374 bool *changed) 375 { 376 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", 377 iwl_mvm_is_wifi_mcc_supported(mvm) ? 378 MCC_SOURCE_GET_CURRENT : 379 MCC_SOURCE_OLD_FW, changed); 380 } 381 382 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) 383 { 384 enum iwl_mcc_source used_src; 385 struct ieee80211_regdomain *regd; 386 int ret; 387 bool changed; 388 const struct ieee80211_regdomain *r = 389 rtnl_dereference(mvm->hw->wiphy->regd); 390 391 if (!r) 392 return -ENOENT; 393 394 /* save the last source in case we overwrite it below */ 395 used_src = mvm->mcc_src; 396 if (iwl_mvm_is_wifi_mcc_supported(mvm)) { 397 /* Notify the firmware we support wifi location updates */ 398 regd = iwl_mvm_get_current_regdomain(mvm, NULL); 399 if (!IS_ERR_OR_NULL(regd)) 400 kfree(regd); 401 } 402 403 /* Now set our last stored MCC and source */ 404 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, 405 &changed); 406 if (IS_ERR_OR_NULL(regd)) 407 return -EIO; 408 409 /* update cfg80211 if the regdomain was changed */ 410 if (changed) 411 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); 412 else 413 ret = 0; 414 415 kfree(regd); 416 return ret; 417 } 418 419 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 420 { 421 struct ieee80211_hw *hw = mvm->hw; 422 int num_mac, ret, i; 423 static const u32 mvm_ciphers[] = { 424 WLAN_CIPHER_SUITE_WEP40, 425 WLAN_CIPHER_SUITE_WEP104, 426 WLAN_CIPHER_SUITE_TKIP, 427 WLAN_CIPHER_SUITE_CCMP, 428 }; 429 430 /* Tell mac80211 our characteristics */ 431 ieee80211_hw_set(hw, SIGNAL_DBM); 432 ieee80211_hw_set(hw, SPECTRUM_MGMT); 433 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 434 ieee80211_hw_set(hw, QUEUE_CONTROL); 435 ieee80211_hw_set(hw, WANT_MONITOR_VIF); 436 ieee80211_hw_set(hw, SUPPORTS_PS); 437 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 438 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 439 ieee80211_hw_set(hw, TIMING_BEACON_ONLY); 440 ieee80211_hw_set(hw, CONNECTION_MONITOR); 441 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 442 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 443 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 444 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 445 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); 446 if (iwl_mvm_has_new_rx_api(mvm)) 447 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 448 449 if (fw_has_capa(&mvm->fw->ucode_capa, 450 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { 451 ieee80211_hw_set(hw, AP_LINK_PS); 452 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { 453 /* 454 * we absolutely need this for the new TX API since that comes 455 * with many more queues than the current code can deal with 456 * for station powersave 457 */ 458 return -EINVAL; 459 } 460 461 if (mvm->trans->num_rx_queues > 1) 462 ieee80211_hw_set(hw, USES_RSS); 463 464 if (mvm->trans->max_skb_frags) 465 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; 466 467 hw->queues = IEEE80211_MAX_QUEUES; 468 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 469 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 470 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 471 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 472 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; 473 474 hw->radiotap_timestamp.units_pos = 475 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | 476 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; 477 /* this is the case for CCK frames, it's better (only 8) for OFDM */ 478 hw->radiotap_timestamp.accuracy = 22; 479 480 hw->rate_control_algorithm = "iwl-mvm-rs"; 481 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; 482 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 483 484 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); 485 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); 486 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); 487 hw->wiphy->cipher_suites = mvm->ciphers; 488 489 if (iwl_mvm_has_new_rx_api(mvm)) { 490 mvm->ciphers[hw->wiphy->n_cipher_suites] = 491 WLAN_CIPHER_SUITE_GCMP; 492 hw->wiphy->n_cipher_suites++; 493 mvm->ciphers[hw->wiphy->n_cipher_suites] = 494 WLAN_CIPHER_SUITE_GCMP_256; 495 hw->wiphy->n_cipher_suites++; 496 } 497 498 /* Enable 11w if software crypto is not enabled (as the 499 * firmware will interpret some mgmt packets, so enabling it 500 * with software crypto isn't safe). 501 */ 502 if (!iwlwifi_mod_params.swcrypto) { 503 ieee80211_hw_set(hw, MFP_CAPABLE); 504 mvm->ciphers[hw->wiphy->n_cipher_suites] = 505 WLAN_CIPHER_SUITE_AES_CMAC; 506 hw->wiphy->n_cipher_suites++; 507 if (iwl_mvm_has_new_rx_api(mvm)) { 508 mvm->ciphers[hw->wiphy->n_cipher_suites] = 509 WLAN_CIPHER_SUITE_BIP_GMAC_128; 510 hw->wiphy->n_cipher_suites++; 511 mvm->ciphers[hw->wiphy->n_cipher_suites] = 512 WLAN_CIPHER_SUITE_BIP_GMAC_256; 513 hw->wiphy->n_cipher_suites++; 514 } 515 } 516 517 /* currently FW API supports only one optional cipher scheme */ 518 if (mvm->fw->cs[0].cipher) { 519 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0]; 520 struct ieee80211_cipher_scheme *cs = &mvm->cs[0]; 521 522 mvm->hw->n_cipher_schemes = 1; 523 524 cs->cipher = le32_to_cpu(fwcs->cipher); 525 cs->iftype = BIT(NL80211_IFTYPE_STATION); 526 cs->hdr_len = fwcs->hdr_len; 527 cs->pn_len = fwcs->pn_len; 528 cs->pn_off = fwcs->pn_off; 529 cs->key_idx_off = fwcs->key_idx_off; 530 cs->key_idx_mask = fwcs->key_idx_mask; 531 cs->key_idx_shift = fwcs->key_idx_shift; 532 cs->mic_len = fwcs->mic_len; 533 534 mvm->hw->cipher_schemes = mvm->cs; 535 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher; 536 hw->wiphy->n_cipher_suites++; 537 } 538 539 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 540 hw->wiphy->features |= 541 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | 542 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | 543 NL80211_FEATURE_ND_RANDOM_MAC_ADDR; 544 545 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 546 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 547 hw->chanctx_data_size = sizeof(u16); 548 549 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 550 BIT(NL80211_IFTYPE_P2P_CLIENT) | 551 BIT(NL80211_IFTYPE_AP) | 552 BIT(NL80211_IFTYPE_P2P_GO) | 553 BIT(NL80211_IFTYPE_P2P_DEVICE) | 554 BIT(NL80211_IFTYPE_ADHOC); 555 556 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 557 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 558 if (iwl_mvm_is_lar_supported(mvm)) 559 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 560 else 561 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 562 REGULATORY_DISABLE_BEACON_HINTS; 563 564 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 565 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 566 567 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 568 hw->wiphy->n_iface_combinations = 569 ARRAY_SIZE(iwl_mvm_iface_combinations); 570 571 hw->wiphy->max_remain_on_channel_duration = 10000; 572 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 573 /* we can compensate an offset of up to 3 channels = 15 MHz */ 574 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5; 575 576 /* Extract MAC address */ 577 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 578 hw->wiphy->addresses = mvm->addresses; 579 hw->wiphy->n_addresses = 1; 580 581 /* Extract additional MAC addresses if available */ 582 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? 583 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; 584 585 for (i = 1; i < num_mac; i++) { 586 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, 587 ETH_ALEN); 588 mvm->addresses[i].addr[5]++; 589 hw->wiphy->n_addresses++; 590 } 591 592 iwl_mvm_reset_phy_ctxts(mvm); 593 594 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); 595 596 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 597 598 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); 599 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || 600 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); 601 602 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 603 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; 604 else 605 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; 606 607 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) 608 hw->wiphy->bands[NL80211_BAND_2GHZ] = 609 &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 610 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { 611 hw->wiphy->bands[NL80211_BAND_5GHZ] = 612 &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 613 614 if (fw_has_capa(&mvm->fw->ucode_capa, 615 IWL_UCODE_TLV_CAPA_BEAMFORMER) && 616 fw_has_api(&mvm->fw->ucode_capa, 617 IWL_UCODE_TLV_API_LQ_SS_PARAMS)) 618 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= 619 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 620 } 621 622 hw->wiphy->hw_version = mvm->trans->hw_id; 623 624 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 625 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 626 else 627 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 628 629 hw->wiphy->max_sched_scan_reqs = 1; 630 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 631 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 632 /* we create the 802.11 header and zero length SSID IE. */ 633 hw->wiphy->max_sched_scan_ie_len = 634 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; 635 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; 636 hw->wiphy->max_sched_scan_plan_interval = U16_MAX; 637 638 /* 639 * the firmware uses u8 for num of iterations, but 0xff is saved for 640 * infinite loop, so the maximum number of iterations is actually 254. 641 */ 642 hw->wiphy->max_sched_scan_plan_iterations = 254; 643 644 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 645 NL80211_FEATURE_LOW_PRIORITY_SCAN | 646 NL80211_FEATURE_P2P_GO_OPPPS | 647 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 648 NL80211_FEATURE_DYNAMIC_SMPS | 649 NL80211_FEATURE_STATIC_SMPS | 650 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 651 652 if (fw_has_capa(&mvm->fw->ucode_capa, 653 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) 654 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; 655 if (fw_has_capa(&mvm->fw->ucode_capa, 656 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) 657 hw->wiphy->features |= NL80211_FEATURE_QUIET; 658 659 if (fw_has_capa(&mvm->fw->ucode_capa, 660 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) 661 hw->wiphy->features |= 662 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; 663 664 if (fw_has_capa(&mvm->fw->ucode_capa, 665 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 666 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; 667 668 if (fw_has_api(&mvm->fw->ucode_capa, 669 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { 670 wiphy_ext_feature_set(hw->wiphy, 671 NL80211_EXT_FEATURE_SCAN_START_TIME); 672 wiphy_ext_feature_set(hw->wiphy, 673 NL80211_EXT_FEATURE_BSS_PARENT_TSF); 674 wiphy_ext_feature_set(hw->wiphy, 675 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 676 } 677 678 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 679 680 #ifdef CONFIG_PM_SLEEP 681 if (iwl_mvm_is_d0i3_supported(mvm) && 682 device_can_wakeup(mvm->trans->dev)) { 683 mvm->wowlan.flags = WIPHY_WOWLAN_ANY; 684 hw->wiphy->wowlan = &mvm->wowlan; 685 } 686 687 if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec && 688 mvm->trans->ops->d3_suspend && 689 mvm->trans->ops->d3_resume && 690 device_can_wakeup(mvm->trans->dev)) { 691 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | 692 WIPHY_WOWLAN_DISCONNECT | 693 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 694 WIPHY_WOWLAN_RFKILL_RELEASE | 695 WIPHY_WOWLAN_NET_DETECT; 696 if (!iwlwifi_mod_params.swcrypto) 697 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 698 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 699 WIPHY_WOWLAN_4WAY_HANDSHAKE; 700 701 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 702 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 703 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 704 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; 705 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; 706 hw->wiphy->wowlan = &mvm->wowlan; 707 } 708 #endif 709 710 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 711 /* assign default bcast filtering configuration */ 712 mvm->bcast_filters = iwl_mvm_default_bcast_filters; 713 #endif 714 715 ret = iwl_mvm_leds_init(mvm); 716 if (ret) 717 return ret; 718 719 if (fw_has_capa(&mvm->fw->ucode_capa, 720 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { 721 IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); 722 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 723 ieee80211_hw_set(hw, TDLS_WIDER_BW); 724 } 725 726 if (fw_has_capa(&mvm->fw->ucode_capa, 727 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { 728 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); 729 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; 730 } 731 732 hw->netdev_features |= mvm->cfg->features; 733 if (!iwl_mvm_is_csum_supported(mvm)) { 734 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | 735 NETIF_F_RXCSUM); 736 /* We may support SW TX CSUM */ 737 if (IWL_MVM_SW_TX_CSUM_OFFLOAD) 738 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; 739 } 740 741 ret = ieee80211_register_hw(mvm->hw); 742 if (ret) 743 iwl_mvm_leds_exit(mvm); 744 mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE; 745 746 if (mvm->cfg->vht_mu_mimo_supported) 747 wiphy_ext_feature_set(hw->wiphy, 748 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); 749 750 return ret; 751 } 752 753 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, 754 struct ieee80211_sta *sta, 755 struct sk_buff *skb) 756 { 757 struct iwl_mvm_sta *mvmsta; 758 bool defer = false; 759 760 /* 761 * double check the IN_D0I3 flag both before and after 762 * taking the spinlock, in order to prevent taking 763 * the spinlock when not needed. 764 */ 765 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))) 766 return false; 767 768 spin_lock(&mvm->d0i3_tx_lock); 769 /* 770 * testing the flag again ensures the skb dequeue 771 * loop (on d0i3 exit) hasn't run yet. 772 */ 773 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) 774 goto out; 775 776 mvmsta = iwl_mvm_sta_from_mac80211(sta); 777 if (mvmsta->sta_id == IWL_MVM_INVALID_STA || 778 mvmsta->sta_id != mvm->d0i3_ap_sta_id) 779 goto out; 780 781 __skb_queue_tail(&mvm->d0i3_tx, skb); 782 ieee80211_stop_queues(mvm->hw); 783 784 /* trigger wakeup */ 785 iwl_mvm_ref(mvm, IWL_MVM_REF_TX); 786 iwl_mvm_unref(mvm, IWL_MVM_REF_TX); 787 788 defer = true; 789 out: 790 spin_unlock(&mvm->d0i3_tx_lock); 791 return defer; 792 } 793 794 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 795 struct ieee80211_tx_control *control, 796 struct sk_buff *skb) 797 { 798 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 799 struct ieee80211_sta *sta = control->sta; 800 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 801 struct ieee80211_hdr *hdr = (void *)skb->data; 802 803 if (iwl_mvm_is_radio_killed(mvm)) { 804 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); 805 goto drop; 806 } 807 808 if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && 809 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && 810 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) 811 goto drop; 812 813 /* treat non-bufferable MMPDUs on AP interfaces as broadcast */ 814 if ((info->control.vif->type == NL80211_IFTYPE_AP || 815 info->control.vif->type == NL80211_IFTYPE_ADHOC) && 816 ieee80211_is_mgmt(hdr->frame_control) && 817 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) 818 sta = NULL; 819 820 if (sta) { 821 if (iwl_mvm_defer_tx(mvm, sta, skb)) 822 return; 823 if (iwl_mvm_tx_skb(mvm, skb, sta)) 824 goto drop; 825 return; 826 } 827 828 if (iwl_mvm_tx_skb_non_sta(mvm, skb)) 829 goto drop; 830 return; 831 drop: 832 ieee80211_free_txskb(hw, skb); 833 } 834 835 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) 836 { 837 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 838 return false; 839 return true; 840 } 841 842 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) 843 { 844 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 845 return false; 846 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) 847 return true; 848 849 /* enabled by default */ 850 return true; 851 } 852 853 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ 854 do { \ 855 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ 856 break; \ 857 iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ 858 } while (0) 859 860 static void 861 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 862 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, 863 enum ieee80211_ampdu_mlme_action action) 864 { 865 struct iwl_fw_dbg_trigger_tlv *trig; 866 struct iwl_fw_dbg_trigger_ba *ba_trig; 867 868 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) 869 return; 870 871 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); 872 ba_trig = (void *)trig->data; 873 874 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, 875 ieee80211_vif_to_wdev(vif), trig)) 876 return; 877 878 switch (action) { 879 case IEEE80211_AMPDU_TX_OPERATIONAL: { 880 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 881 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 882 883 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, 884 "TX AGG START: MAC %pM tid %d ssn %d\n", 885 sta->addr, tid, tid_data->ssn); 886 break; 887 } 888 case IEEE80211_AMPDU_TX_STOP_CONT: 889 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, 890 "TX AGG STOP: MAC %pM tid %d\n", 891 sta->addr, tid); 892 break; 893 case IEEE80211_AMPDU_RX_START: 894 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, 895 "RX AGG START: MAC %pM tid %d ssn %d\n", 896 sta->addr, tid, rx_ba_ssn); 897 break; 898 case IEEE80211_AMPDU_RX_STOP: 899 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, 900 "RX AGG STOP: MAC %pM tid %d\n", 901 sta->addr, tid); 902 break; 903 default: 904 break; 905 } 906 } 907 908 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 909 struct ieee80211_vif *vif, 910 struct ieee80211_ampdu_params *params) 911 { 912 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 913 int ret; 914 bool tx_agg_ref = false; 915 struct ieee80211_sta *sta = params->sta; 916 enum ieee80211_ampdu_mlme_action action = params->action; 917 u16 tid = params->tid; 918 u16 *ssn = ¶ms->ssn; 919 u8 buf_size = params->buf_size; 920 bool amsdu = params->amsdu; 921 u16 timeout = params->timeout; 922 923 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 924 sta->addr, tid, action); 925 926 if (!(mvm->nvm_data->sku_cap_11n_enable)) 927 return -EACCES; 928 929 /* return from D0i3 before starting a new Tx aggregation */ 930 switch (action) { 931 case IEEE80211_AMPDU_TX_START: 932 case IEEE80211_AMPDU_TX_STOP_CONT: 933 case IEEE80211_AMPDU_TX_STOP_FLUSH: 934 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 935 case IEEE80211_AMPDU_TX_OPERATIONAL: 936 /* 937 * for tx start, wait synchronously until D0i3 exit to 938 * get the correct sequence number for the tid. 939 * additionally, some other ampdu actions use direct 940 * target access, which is not handled automatically 941 * by the trans layer (unlike commands), so wait for 942 * d0i3 exit in these cases as well. 943 */ 944 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG); 945 if (ret) 946 return ret; 947 948 tx_agg_ref = true; 949 break; 950 default: 951 break; 952 } 953 954 mutex_lock(&mvm->mutex); 955 956 switch (action) { 957 case IEEE80211_AMPDU_RX_START: 958 if (!iwl_enable_rx_ampdu(mvm->cfg)) { 959 ret = -EINVAL; 960 break; 961 } 962 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, 963 timeout); 964 break; 965 case IEEE80211_AMPDU_RX_STOP: 966 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, 967 timeout); 968 break; 969 case IEEE80211_AMPDU_TX_START: 970 if (!iwl_enable_tx_ampdu(mvm->cfg)) { 971 ret = -EINVAL; 972 break; 973 } 974 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 975 break; 976 case IEEE80211_AMPDU_TX_STOP_CONT: 977 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 978 break; 979 case IEEE80211_AMPDU_TX_STOP_FLUSH: 980 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 981 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); 982 break; 983 case IEEE80211_AMPDU_TX_OPERATIONAL: 984 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, 985 buf_size, amsdu); 986 break; 987 default: 988 WARN_ON_ONCE(1); 989 ret = -EINVAL; 990 break; 991 } 992 993 if (!ret) { 994 u16 rx_ba_ssn = 0; 995 996 if (action == IEEE80211_AMPDU_RX_START) 997 rx_ba_ssn = *ssn; 998 999 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, 1000 rx_ba_ssn, action); 1001 } 1002 mutex_unlock(&mvm->mutex); 1003 1004 /* 1005 * If the tid is marked as started, we won't use it for offloaded 1006 * traffic on the next D0i3 entry. It's safe to unref. 1007 */ 1008 if (tx_agg_ref) 1009 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); 1010 1011 return ret; 1012 } 1013 1014 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, 1015 struct ieee80211_vif *vif) 1016 { 1017 struct iwl_mvm *mvm = data; 1018 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1019 1020 mvmvif->uploaded = false; 1021 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1022 1023 spin_lock_bh(&mvm->time_event_lock); 1024 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 1025 spin_unlock_bh(&mvm->time_event_lock); 1026 1027 mvmvif->phy_ctxt = NULL; 1028 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); 1029 } 1030 1031 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 1032 { 1033 /* clear the D3 reconfig, we only need it to avoid dumping a 1034 * firmware coredump on reconfiguration, we shouldn't do that 1035 * on D3->D0 transition 1036 */ 1037 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { 1038 mvm->fwrt.dump.desc = &iwl_dump_desc_assert; 1039 iwl_fw_error_dump(&mvm->fwrt); 1040 } 1041 1042 /* cleanup all stale references (scan, roc), but keep the 1043 * ucode_down ref until reconfig is complete 1044 */ 1045 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); 1046 1047 iwl_mvm_stop_device(mvm); 1048 1049 mvm->scan_status = 0; 1050 mvm->ps_disabled = false; 1051 mvm->calibrating = false; 1052 1053 /* just in case one was running */ 1054 iwl_mvm_cleanup_roc_te(mvm); 1055 ieee80211_remain_on_channel_expired(mvm->hw); 1056 1057 /* 1058 * cleanup all interfaces, even inactive ones, as some might have 1059 * gone down during the HW restart 1060 */ 1061 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); 1062 1063 mvm->p2p_device_vif = NULL; 1064 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; 1065 1066 iwl_mvm_reset_phy_ctxts(mvm); 1067 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 1068 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); 1069 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 1070 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); 1071 1072 ieee80211_wake_queues(mvm->hw); 1073 1074 /* clear any stale d0i3 state */ 1075 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); 1076 1077 mvm->vif_count = 0; 1078 mvm->rx_ba_sessions = 0; 1079 mvm->fwrt.dump.conf = FW_DBG_INVALID; 1080 1081 /* keep statistics ticking */ 1082 iwl_mvm_accu_radio_stats(mvm); 1083 } 1084 1085 int __iwl_mvm_mac_start(struct iwl_mvm *mvm) 1086 { 1087 int ret; 1088 1089 lockdep_assert_held(&mvm->mutex); 1090 1091 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { 1092 /* 1093 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART 1094 * so later code will - from now on - see that we're doing it. 1095 */ 1096 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1097 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1098 /* Clean up some internal and mac80211 state on restart */ 1099 iwl_mvm_restart_cleanup(mvm); 1100 } else { 1101 /* Hold the reference to prevent runtime suspend while 1102 * the start procedure runs. It's a bit confusing 1103 * that the UCODE_DOWN reference is taken, but it just 1104 * means "UCODE is not UP yet". ( TODO: rename this 1105 * reference). 1106 */ 1107 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1108 } 1109 ret = iwl_mvm_up(mvm); 1110 1111 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1112 /* Something went wrong - we need to finish some cleanup 1113 * that normally iwl_mvm_mac_restart_complete() below 1114 * would do. 1115 */ 1116 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1117 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1118 } 1119 1120 return ret; 1121 } 1122 1123 static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 1124 { 1125 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1126 int ret; 1127 1128 /* Some hw restart cleanups must not hold the mutex */ 1129 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1130 /* 1131 * Make sure we are out of d0i3. This is needed 1132 * to make sure the reference accounting is correct 1133 * (and there is no stale d0i3_exit_work). 1134 */ 1135 wait_event_timeout(mvm->d0i3_exit_waitq, 1136 !test_bit(IWL_MVM_STATUS_IN_D0I3, 1137 &mvm->status), 1138 HZ); 1139 } 1140 1141 mutex_lock(&mvm->mutex); 1142 ret = __iwl_mvm_mac_start(mvm); 1143 mutex_unlock(&mvm->mutex); 1144 1145 return ret; 1146 } 1147 1148 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) 1149 { 1150 int ret; 1151 1152 mutex_lock(&mvm->mutex); 1153 1154 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1155 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1156 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1157 if (ret) 1158 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1159 ret); 1160 1161 /* allow transport/FW low power modes */ 1162 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1163 1164 /* 1165 * If we have TDLS peers, remove them. We don't know the last seqno/PN 1166 * of packets the FW sent out, so we must reconnect. 1167 */ 1168 iwl_mvm_teardown_tdls_peers(mvm); 1169 1170 mutex_unlock(&mvm->mutex); 1171 } 1172 1173 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) 1174 { 1175 if (iwl_mvm_is_d0i3_supported(mvm) && 1176 iwl_mvm_enter_d0i3_on_suspend(mvm)) 1177 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq, 1178 !test_bit(IWL_MVM_STATUS_IN_D0I3, 1179 &mvm->status), 1180 HZ), 1181 "D0i3 exit on resume timed out\n"); 1182 } 1183 1184 static void 1185 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, 1186 enum ieee80211_reconfig_type reconfig_type) 1187 { 1188 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1189 1190 switch (reconfig_type) { 1191 case IEEE80211_RECONFIG_TYPE_RESTART: 1192 iwl_mvm_restart_complete(mvm); 1193 break; 1194 case IEEE80211_RECONFIG_TYPE_SUSPEND: 1195 iwl_mvm_resume_complete(mvm); 1196 break; 1197 } 1198 } 1199 1200 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) 1201 { 1202 lockdep_assert_held(&mvm->mutex); 1203 1204 /* firmware counters are obviously reset now, but we shouldn't 1205 * partially track so also clear the fw_reset_accu counters. 1206 */ 1207 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); 1208 1209 /* async_handlers_wk is now blocked */ 1210 1211 /* 1212 * The work item could be running or queued if the 1213 * ROC time event stops just as we get here. 1214 */ 1215 flush_work(&mvm->roc_done_wk); 1216 1217 iwl_mvm_stop_device(mvm); 1218 1219 iwl_mvm_async_handlers_purge(mvm); 1220 /* async_handlers_list is empty and will stay empty: HW is stopped */ 1221 1222 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1223 iwl_mvm_del_aux_sta(mvm); 1224 1225 /* 1226 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1227 * won't be called in this case). 1228 * But make sure to cleanup interfaces that have gone down before/during 1229 * HW restart was requested. 1230 */ 1231 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1232 ieee80211_iterate_interfaces(mvm->hw, 0, 1233 iwl_mvm_cleanup_iterator, mvm); 1234 1235 /* We shouldn't have any UIDs still set. Loop over all the UIDs to 1236 * make sure there's nothing left there and warn if any is found. 1237 */ 1238 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1239 int i; 1240 1241 for (i = 0; i < mvm->max_scans; i++) { 1242 if (WARN_ONCE(mvm->scan_uid_status[i], 1243 "UMAC scan UID %d status was not cleaned\n", 1244 i)) 1245 mvm->scan_uid_status[i] = 0; 1246 } 1247 } 1248 } 1249 1250 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) 1251 { 1252 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1253 1254 flush_work(&mvm->d0i3_exit_work); 1255 flush_work(&mvm->async_handlers_wk); 1256 flush_work(&mvm->add_stream_wk); 1257 1258 /* 1259 * Lock and clear the firmware running bit here already, so that 1260 * new commands coming in elsewhere, e.g. from debugfs, will not 1261 * be able to proceed. This is important here because one of those 1262 * debugfs files causes the firmware dump to be triggered, and if we 1263 * don't stop debugfs accesses before canceling that it could be 1264 * retriggered after we flush it but before we've cleared the bit. 1265 */ 1266 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1267 1268 iwl_fw_cancel_dump(&mvm->fwrt); 1269 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); 1270 cancel_delayed_work_sync(&mvm->scan_timeout_dwork); 1271 iwl_fw_free_dump_desc(&mvm->fwrt); 1272 1273 mutex_lock(&mvm->mutex); 1274 __iwl_mvm_mac_stop(mvm); 1275 mutex_unlock(&mvm->mutex); 1276 1277 /* 1278 * The worker might have been waiting for the mutex, let it run and 1279 * discover that its list is now empty. 1280 */ 1281 cancel_work_sync(&mvm->async_handlers_wk); 1282 } 1283 1284 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) 1285 { 1286 u16 i; 1287 1288 lockdep_assert_held(&mvm->mutex); 1289 1290 for (i = 0; i < NUM_PHY_CTX; i++) 1291 if (!mvm->phy_ctxts[i].ref) 1292 return &mvm->phy_ctxts[i]; 1293 1294 IWL_ERR(mvm, "No available PHY context\n"); 1295 return NULL; 1296 } 1297 1298 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1299 s16 tx_power) 1300 { 1301 struct iwl_dev_tx_power_cmd cmd = { 1302 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), 1303 .v3.mac_context_id = 1304 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), 1305 .v3.pwr_restriction = cpu_to_le16(8 * tx_power), 1306 }; 1307 int len = sizeof(cmd); 1308 1309 if (tx_power == IWL_DEFAULT_MAX_TX_POWER) 1310 cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); 1311 1312 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 1313 len = sizeof(cmd.v3); 1314 1315 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); 1316 } 1317 1318 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 1319 struct ieee80211_vif *vif) 1320 { 1321 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1322 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1323 int ret; 1324 1325 mvmvif->mvm = mvm; 1326 1327 /* 1328 * make sure D0i3 exit is completed, otherwise a target access 1329 * during tx queue configuration could be done when still in 1330 * D0i3 state. 1331 */ 1332 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF); 1333 if (ret) 1334 return ret; 1335 1336 /* 1337 * Not much to do here. The stack will not allow interface 1338 * types or combinations that we didn't advertise, so we 1339 * don't really have to check the types. 1340 */ 1341 1342 mutex_lock(&mvm->mutex); 1343 1344 /* make sure that beacon statistics don't go backwards with FW reset */ 1345 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1346 mvmvif->beacon_stats.accu_num_beacons += 1347 mvmvif->beacon_stats.num_beacons; 1348 1349 /* Allocate resources for the MAC context, and add it to the fw */ 1350 ret = iwl_mvm_mac_ctxt_init(mvm, vif); 1351 if (ret) 1352 goto out_unlock; 1353 1354 /* Counting number of interfaces is needed for legacy PM */ 1355 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1356 mvm->vif_count++; 1357 1358 /* 1359 * The AP binding flow can be done only after the beacon 1360 * template is configured (which happens only in the mac80211 1361 * start_ap() flow), and adding the broadcast station can happen 1362 * only after the binding. 1363 * In addition, since modifying the MAC before adding a bcast 1364 * station is not allowed by the FW, delay the adding of MAC context to 1365 * the point where we can also add the bcast station. 1366 * In short: there's not much we can do at this point, other than 1367 * allocating resources :) 1368 */ 1369 if (vif->type == NL80211_IFTYPE_AP || 1370 vif->type == NL80211_IFTYPE_ADHOC) { 1371 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1372 if (ret) { 1373 IWL_ERR(mvm, "Failed to allocate bcast sta\n"); 1374 goto out_release; 1375 } 1376 1377 /* 1378 * Only queue for this station is the mcast queue, 1379 * which shouldn't be in TFD mask anyway 1380 */ 1381 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 1382 0, vif->type, 1383 IWL_STA_MULTICAST); 1384 if (ret) 1385 goto out_release; 1386 1387 iwl_mvm_vif_dbgfs_register(mvm, vif); 1388 goto out_unlock; 1389 } 1390 1391 mvmvif->features |= hw->netdev_features; 1392 1393 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 1394 if (ret) 1395 goto out_release; 1396 1397 ret = iwl_mvm_power_update_mac(mvm); 1398 if (ret) 1399 goto out_remove_mac; 1400 1401 /* beacon filtering */ 1402 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 1403 if (ret) 1404 goto out_remove_mac; 1405 1406 if (!mvm->bf_allowed_vif && 1407 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { 1408 mvm->bf_allowed_vif = mvmvif; 1409 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 1410 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 1411 } 1412 1413 /* 1414 * P2P_DEVICE interface does not have a channel context assigned to it, 1415 * so a dedicated PHY context is allocated to it and the corresponding 1416 * MAC context is bound to it at this stage. 1417 */ 1418 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1419 1420 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 1421 if (!mvmvif->phy_ctxt) { 1422 ret = -ENOSPC; 1423 goto out_free_bf; 1424 } 1425 1426 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 1427 ret = iwl_mvm_binding_add_vif(mvm, vif); 1428 if (ret) 1429 goto out_unref_phy; 1430 1431 ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); 1432 if (ret) 1433 goto out_unbind; 1434 1435 /* Save a pointer to p2p device vif, so it can later be used to 1436 * update the p2p device MAC when a GO is started/stopped */ 1437 mvm->p2p_device_vif = vif; 1438 } 1439 1440 iwl_mvm_vif_dbgfs_register(mvm, vif); 1441 goto out_unlock; 1442 1443 out_unbind: 1444 iwl_mvm_binding_remove_vif(mvm, vif); 1445 out_unref_phy: 1446 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1447 out_free_bf: 1448 if (mvm->bf_allowed_vif == mvmvif) { 1449 mvm->bf_allowed_vif = NULL; 1450 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1451 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1452 } 1453 out_remove_mac: 1454 mvmvif->phy_ctxt = NULL; 1455 iwl_mvm_mac_ctxt_remove(mvm, vif); 1456 out_release: 1457 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1458 mvm->vif_count--; 1459 out_unlock: 1460 mutex_unlock(&mvm->mutex); 1461 1462 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF); 1463 1464 return ret; 1465 } 1466 1467 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, 1468 struct ieee80211_vif *vif) 1469 { 1470 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1471 /* 1472 * Flush the ROC worker which will flush the OFFCHANNEL queue. 1473 * We assume here that all the packets sent to the OFFCHANNEL 1474 * queue are sent in ROC session. 1475 */ 1476 flush_work(&mvm->roc_done_wk); 1477 } 1478 } 1479 1480 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 1481 struct ieee80211_vif *vif) 1482 { 1483 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1484 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1485 1486 iwl_mvm_prepare_mac_removal(mvm, vif); 1487 1488 mutex_lock(&mvm->mutex); 1489 1490 if (mvm->bf_allowed_vif == mvmvif) { 1491 mvm->bf_allowed_vif = NULL; 1492 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1493 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1494 } 1495 1496 iwl_mvm_vif_dbgfs_clean(mvm, vif); 1497 1498 /* 1499 * For AP/GO interface, the tear down of the resources allocated to the 1500 * interface is be handled as part of the stop_ap flow. 1501 */ 1502 if (vif->type == NL80211_IFTYPE_AP || 1503 vif->type == NL80211_IFTYPE_ADHOC) { 1504 #ifdef CONFIG_NL80211_TESTMODE 1505 if (vif == mvm->noa_vif) { 1506 mvm->noa_vif = NULL; 1507 mvm->noa_duration = 0; 1508 } 1509 #endif 1510 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); 1511 iwl_mvm_dealloc_bcast_sta(mvm, vif); 1512 goto out_release; 1513 } 1514 1515 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1516 mvm->p2p_device_vif = NULL; 1517 iwl_mvm_rm_p2p_bcast_sta(mvm, vif); 1518 iwl_mvm_binding_remove_vif(mvm, vif); 1519 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1520 mvmvif->phy_ctxt = NULL; 1521 } 1522 1523 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) 1524 mvm->vif_count--; 1525 1526 iwl_mvm_power_update_mac(mvm); 1527 iwl_mvm_mac_ctxt_remove(mvm, vif); 1528 1529 out_release: 1530 mutex_unlock(&mvm->mutex); 1531 } 1532 1533 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) 1534 { 1535 return 0; 1536 } 1537 1538 struct iwl_mvm_mc_iter_data { 1539 struct iwl_mvm *mvm; 1540 int port_id; 1541 }; 1542 1543 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, 1544 struct ieee80211_vif *vif) 1545 { 1546 struct iwl_mvm_mc_iter_data *data = _data; 1547 struct iwl_mvm *mvm = data->mvm; 1548 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1549 int ret, len; 1550 1551 /* if we don't have free ports, mcast frames will be dropped */ 1552 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) 1553 return; 1554 1555 if (vif->type != NL80211_IFTYPE_STATION || 1556 !vif->bss_conf.assoc) 1557 return; 1558 1559 cmd->port_id = data->port_id++; 1560 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1561 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1562 1563 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); 1564 if (ret) 1565 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1566 } 1567 1568 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) 1569 { 1570 struct iwl_mvm_mc_iter_data iter_data = { 1571 .mvm = mvm, 1572 }; 1573 1574 lockdep_assert_held(&mvm->mutex); 1575 1576 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1577 return; 1578 1579 ieee80211_iterate_active_interfaces_atomic( 1580 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1581 iwl_mvm_mc_iface_iterator, &iter_data); 1582 } 1583 1584 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, 1585 struct netdev_hw_addr_list *mc_list) 1586 { 1587 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1588 struct iwl_mcast_filter_cmd *cmd; 1589 struct netdev_hw_addr *addr; 1590 int addr_count; 1591 bool pass_all; 1592 int len; 1593 1594 addr_count = netdev_hw_addr_list_count(mc_list); 1595 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || 1596 IWL_MVM_FW_MCAST_FILTER_PASS_ALL; 1597 if (pass_all) 1598 addr_count = 0; 1599 1600 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); 1601 cmd = kzalloc(len, GFP_ATOMIC); 1602 if (!cmd) 1603 return 0; 1604 1605 if (pass_all) { 1606 cmd->pass_all = 1; 1607 return (u64)(unsigned long)cmd; 1608 } 1609 1610 netdev_hw_addr_list_for_each(addr, mc_list) { 1611 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", 1612 cmd->count, addr->addr); 1613 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], 1614 addr->addr, ETH_ALEN); 1615 cmd->count++; 1616 } 1617 1618 return (u64)(unsigned long)cmd; 1619 } 1620 1621 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, 1622 unsigned int changed_flags, 1623 unsigned int *total_flags, 1624 u64 multicast) 1625 { 1626 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1627 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; 1628 1629 mutex_lock(&mvm->mutex); 1630 1631 /* replace previous configuration */ 1632 kfree(mvm->mcast_filter_cmd); 1633 mvm->mcast_filter_cmd = cmd; 1634 1635 if (!cmd) 1636 goto out; 1637 1638 iwl_mvm_recalc_multicast(mvm); 1639 out: 1640 mutex_unlock(&mvm->mutex); 1641 *total_flags = 0; 1642 } 1643 1644 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, 1645 struct ieee80211_vif *vif, 1646 unsigned int filter_flags, 1647 unsigned int changed_flags) 1648 { 1649 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1650 1651 /* We support only filter for probe requests */ 1652 if (!(changed_flags & FIF_PROBE_REQ)) 1653 return; 1654 1655 /* Supported only for p2p client interfaces */ 1656 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || 1657 !vif->p2p) 1658 return; 1659 1660 mutex_lock(&mvm->mutex); 1661 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1662 mutex_unlock(&mvm->mutex); 1663 } 1664 1665 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1666 struct iwl_bcast_iter_data { 1667 struct iwl_mvm *mvm; 1668 struct iwl_bcast_filter_cmd *cmd; 1669 u8 current_filter; 1670 }; 1671 1672 static void 1673 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, 1674 const struct iwl_fw_bcast_filter *in_filter, 1675 struct iwl_fw_bcast_filter *out_filter) 1676 { 1677 struct iwl_fw_bcast_filter_attr *attr; 1678 int i; 1679 1680 memcpy(out_filter, in_filter, sizeof(*out_filter)); 1681 1682 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { 1683 attr = &out_filter->attrs[i]; 1684 1685 if (!attr->mask) 1686 break; 1687 1688 switch (attr->reserved1) { 1689 case cpu_to_le16(BC_FILTER_MAGIC_IP): 1690 if (vif->bss_conf.arp_addr_cnt != 1) { 1691 attr->mask = 0; 1692 continue; 1693 } 1694 1695 attr->val = vif->bss_conf.arp_addr_list[0]; 1696 break; 1697 case cpu_to_le16(BC_FILTER_MAGIC_MAC): 1698 attr->val = *(__be32 *)&vif->addr[2]; 1699 break; 1700 default: 1701 break; 1702 } 1703 attr->reserved1 = 0; 1704 out_filter->num_attrs++; 1705 } 1706 } 1707 1708 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, 1709 struct ieee80211_vif *vif) 1710 { 1711 struct iwl_bcast_iter_data *data = _data; 1712 struct iwl_mvm *mvm = data->mvm; 1713 struct iwl_bcast_filter_cmd *cmd = data->cmd; 1714 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1715 struct iwl_fw_bcast_mac *bcast_mac; 1716 int i; 1717 1718 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) 1719 return; 1720 1721 bcast_mac = &cmd->macs[mvmvif->id]; 1722 1723 /* 1724 * enable filtering only for associated stations, but not for P2P 1725 * Clients 1726 */ 1727 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || 1728 !vif->bss_conf.assoc) 1729 return; 1730 1731 bcast_mac->default_discard = 1; 1732 1733 /* copy all configured filters */ 1734 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { 1735 /* 1736 * Make sure we don't exceed our filters limit. 1737 * if there is still a valid filter to be configured, 1738 * be on the safe side and just allow bcast for this mac. 1739 */ 1740 if (WARN_ON_ONCE(data->current_filter >= 1741 ARRAY_SIZE(cmd->filters))) { 1742 bcast_mac->default_discard = 0; 1743 bcast_mac->attached_filters = 0; 1744 break; 1745 } 1746 1747 iwl_mvm_set_bcast_filter(vif, 1748 &mvm->bcast_filters[i], 1749 &cmd->filters[data->current_filter]); 1750 1751 /* skip current filter if it contains no attributes */ 1752 if (!cmd->filters[data->current_filter].num_attrs) 1753 continue; 1754 1755 /* attach the filter to current mac */ 1756 bcast_mac->attached_filters |= 1757 cpu_to_le16(BIT(data->current_filter)); 1758 1759 data->current_filter++; 1760 } 1761 } 1762 1763 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, 1764 struct iwl_bcast_filter_cmd *cmd) 1765 { 1766 struct iwl_bcast_iter_data iter_data = { 1767 .mvm = mvm, 1768 .cmd = cmd, 1769 }; 1770 1771 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) 1772 return false; 1773 1774 memset(cmd, 0, sizeof(*cmd)); 1775 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); 1776 cmd->max_macs = ARRAY_SIZE(cmd->macs); 1777 1778 #ifdef CONFIG_IWLWIFI_DEBUGFS 1779 /* use debugfs filters/macs if override is configured */ 1780 if (mvm->dbgfs_bcast_filtering.override) { 1781 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, 1782 sizeof(cmd->filters)); 1783 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, 1784 sizeof(cmd->macs)); 1785 return true; 1786 } 1787 #endif 1788 1789 /* if no filters are configured, do nothing */ 1790 if (!mvm->bcast_filters) 1791 return false; 1792 1793 /* configure and attach these filters for each associated sta vif */ 1794 ieee80211_iterate_active_interfaces( 1795 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1796 iwl_mvm_bcast_filter_iterator, &iter_data); 1797 1798 return true; 1799 } 1800 1801 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) 1802 { 1803 struct iwl_bcast_filter_cmd cmd; 1804 1805 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 1806 return 0; 1807 1808 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 1809 return 0; 1810 1811 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, 1812 sizeof(cmd), &cmd); 1813 } 1814 #else 1815 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) 1816 { 1817 return 0; 1818 } 1819 #endif 1820 1821 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, 1822 struct ieee80211_vif *vif) 1823 { 1824 struct iwl_mu_group_mgmt_cmd cmd = {}; 1825 1826 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, 1827 WLAN_MEMBERSHIP_LEN); 1828 memcpy(cmd.user_position, vif->bss_conf.mu_group.position, 1829 WLAN_USER_POSITION_LEN); 1830 1831 return iwl_mvm_send_cmd_pdu(mvm, 1832 WIDE_ID(DATA_PATH_GROUP, 1833 UPDATE_MU_GROUPS_CMD), 1834 0, sizeof(cmd), &cmd); 1835 } 1836 1837 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, 1838 struct ieee80211_vif *vif) 1839 { 1840 if (vif->mu_mimo_owner) { 1841 struct iwl_mu_group_mgmt_notif *notif = _data; 1842 1843 /* 1844 * MU-MIMO Group Id action frame is little endian. We treat 1845 * the data received from firmware as if it came from the 1846 * action frame, so no conversion is needed. 1847 */ 1848 ieee80211_update_mu_groups(vif, 1849 (u8 *)¬if->membership_status, 1850 (u8 *)¬if->user_position); 1851 } 1852 } 1853 1854 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, 1855 struct iwl_rx_cmd_buffer *rxb) 1856 { 1857 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1858 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; 1859 1860 ieee80211_iterate_active_interfaces_atomic( 1861 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1862 iwl_mvm_mu_mimo_iface_iterator, notif); 1863 } 1864 1865 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 1866 struct ieee80211_vif *vif, 1867 struct ieee80211_bss_conf *bss_conf, 1868 u32 changes) 1869 { 1870 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1871 int ret; 1872 1873 /* 1874 * Re-calculate the tsf id, as the master-slave relations depend on the 1875 * beacon interval, which was not known when the station interface was 1876 * added. 1877 */ 1878 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) 1879 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 1880 1881 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc && 1882 mvmvif->lqm_active) 1883 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT, 1884 0, 0); 1885 1886 /* 1887 * If we're not associated yet, take the (new) BSSID before associating 1888 * so the firmware knows. If we're already associated, then use the old 1889 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC 1890 * branch for disassociation below. 1891 */ 1892 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) 1893 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 1894 1895 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); 1896 if (ret) 1897 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 1898 1899 /* after sending it once, adopt mac80211 data */ 1900 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 1901 mvmvif->associated = bss_conf->assoc; 1902 1903 if (changes & BSS_CHANGED_ASSOC) { 1904 if (bss_conf->assoc) { 1905 /* clear statistics to get clean beacon counter */ 1906 iwl_mvm_request_statistics(mvm, true); 1907 memset(&mvmvif->beacon_stats, 0, 1908 sizeof(mvmvif->beacon_stats)); 1909 1910 /* add quota for this interface */ 1911 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1912 if (ret) { 1913 IWL_ERR(mvm, "failed to update quotas\n"); 1914 return; 1915 } 1916 1917 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 1918 &mvm->status)) { 1919 /* 1920 * If we're restarting then the firmware will 1921 * obviously have lost synchronisation with 1922 * the AP. It will attempt to synchronise by 1923 * itself, but we can make it more reliable by 1924 * scheduling a session protection time event. 1925 * 1926 * The firmware needs to receive a beacon to 1927 * catch up with synchronisation, use 110% of 1928 * the beacon interval. 1929 * 1930 * Set a large maximum delay to allow for more 1931 * than a single interface. 1932 */ 1933 u32 dur = (11 * vif->bss_conf.beacon_int) / 10; 1934 iwl_mvm_protect_session(mvm, vif, dur, dur, 1935 5 * dur, false); 1936 } 1937 1938 iwl_mvm_sf_update(mvm, vif, false); 1939 iwl_mvm_power_vif_assoc(mvm, vif); 1940 if (vif->p2p) { 1941 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); 1942 iwl_mvm_update_smps(mvm, vif, 1943 IWL_MVM_SMPS_REQ_PROT, 1944 IEEE80211_SMPS_DYNAMIC); 1945 } 1946 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 1947 /* 1948 * If update fails - SF might be running in associated 1949 * mode while disassociated - which is forbidden. 1950 */ 1951 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), 1952 "Failed to update SF upon disassociation\n"); 1953 1954 /* 1955 * If we get an assert during the connection (after the 1956 * station has been added, but before the vif is set 1957 * to associated), mac80211 will re-add the station and 1958 * then configure the vif. Since the vif is not 1959 * associated, we would remove the station here and 1960 * this would fail the recovery. 1961 */ 1962 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 1963 &mvm->status)) { 1964 /* 1965 * Remove AP station now that 1966 * the MAC is unassoc 1967 */ 1968 ret = iwl_mvm_rm_sta_id(mvm, vif, 1969 mvmvif->ap_sta_id); 1970 if (ret) 1971 IWL_ERR(mvm, 1972 "failed to remove AP station\n"); 1973 1974 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) 1975 mvm->d0i3_ap_sta_id = 1976 IWL_MVM_INVALID_STA; 1977 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1978 } 1979 1980 /* remove quota for this interface */ 1981 ret = iwl_mvm_update_quotas(mvm, false, NULL); 1982 if (ret) 1983 IWL_ERR(mvm, "failed to update quotas\n"); 1984 1985 if (vif->p2p) 1986 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); 1987 1988 /* this will take the cleared BSSID from bss_conf */ 1989 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1990 if (ret) 1991 IWL_ERR(mvm, 1992 "failed to update MAC %pM (clear after unassoc)\n", 1993 vif->addr); 1994 } 1995 1996 /* 1997 * The firmware tracks the MU-MIMO group on its own. 1998 * However, on HW restart we should restore this data. 1999 */ 2000 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2001 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { 2002 ret = iwl_mvm_update_mu_groups(mvm, vif); 2003 if (ret) 2004 IWL_ERR(mvm, 2005 "failed to update VHT MU_MIMO groups\n"); 2006 } 2007 2008 iwl_mvm_recalc_multicast(mvm); 2009 iwl_mvm_configure_bcast_filter(mvm); 2010 2011 /* reset rssi values */ 2012 mvmvif->bf_data.ave_beacon_signal = 0; 2013 2014 iwl_mvm_bt_coex_vif_change(mvm); 2015 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, 2016 IEEE80211_SMPS_AUTOMATIC); 2017 if (fw_has_capa(&mvm->fw->ucode_capa, 2018 IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 2019 iwl_mvm_config_scan(mvm); 2020 } 2021 2022 if (changes & BSS_CHANGED_BEACON_INFO) { 2023 /* 2024 * We received a beacon from the associated AP so 2025 * remove the session protection. 2026 */ 2027 iwl_mvm_stop_session_protection(mvm, vif); 2028 2029 iwl_mvm_sf_update(mvm, vif, false); 2030 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2031 } 2032 2033 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | 2034 /* 2035 * Send power command on every beacon change, 2036 * because we may have not enabled beacon abort yet. 2037 */ 2038 BSS_CHANGED_BEACON_INFO)) { 2039 ret = iwl_mvm_power_update_mac(mvm); 2040 if (ret) 2041 IWL_ERR(mvm, "failed to update power mode\n"); 2042 } 2043 2044 if (changes & BSS_CHANGED_TXPOWER) { 2045 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 2046 bss_conf->txpower); 2047 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2048 } 2049 2050 if (changes & BSS_CHANGED_CQM) { 2051 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); 2052 /* reset cqm events tracking */ 2053 mvmvif->bf_data.last_cqm_event = 0; 2054 if (mvmvif->bf_data.bf_enabled) { 2055 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 2056 if (ret) 2057 IWL_ERR(mvm, 2058 "failed to update CQM thresholds\n"); 2059 } 2060 } 2061 2062 if (changes & BSS_CHANGED_ARP_FILTER) { 2063 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); 2064 iwl_mvm_configure_bcast_filter(mvm); 2065 } 2066 } 2067 2068 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, 2069 struct ieee80211_vif *vif) 2070 { 2071 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2072 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2073 int ret; 2074 2075 /* 2076 * iwl_mvm_mac_ctxt_add() might read directly from the device 2077 * (the system time), so make sure it is available. 2078 */ 2079 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP); 2080 if (ret) 2081 return ret; 2082 2083 mutex_lock(&mvm->mutex); 2084 2085 /* Send the beacon template */ 2086 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); 2087 if (ret) 2088 goto out_unlock; 2089 2090 /* 2091 * Re-calculate the tsf id, as the master-slave relations depend on the 2092 * beacon interval, which was not known when the AP interface was added. 2093 */ 2094 if (vif->type == NL80211_IFTYPE_AP) 2095 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2096 2097 mvmvif->ap_assoc_sta_count = 0; 2098 2099 /* Add the mac context */ 2100 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 2101 if (ret) 2102 goto out_unlock; 2103 2104 /* Perform the binding */ 2105 ret = iwl_mvm_binding_add_vif(mvm, vif); 2106 if (ret) 2107 goto out_remove; 2108 2109 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2110 if (ret) 2111 goto out_unbind; 2112 2113 /* Send the bcast station. At this stage the TBTT and DTIM time events 2114 * are added and applied to the scheduler */ 2115 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2116 if (ret) 2117 goto out_rm_mcast; 2118 2119 /* must be set before quota calculations */ 2120 mvmvif->ap_ibss_active = true; 2121 2122 /* power updated needs to be done before quotas */ 2123 iwl_mvm_power_update_mac(mvm); 2124 2125 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2126 if (ret) 2127 goto out_quota_failed; 2128 2129 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2130 if (vif->p2p && mvm->p2p_device_vif) 2131 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2132 2133 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); 2134 2135 iwl_mvm_bt_coex_vif_change(mvm); 2136 2137 /* we don't support TDLS during DCM */ 2138 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2139 iwl_mvm_teardown_tdls_peers(mvm); 2140 2141 goto out_unlock; 2142 2143 out_quota_failed: 2144 iwl_mvm_power_update_mac(mvm); 2145 mvmvif->ap_ibss_active = false; 2146 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2147 out_rm_mcast: 2148 iwl_mvm_rm_mcast_sta(mvm, vif); 2149 out_unbind: 2150 iwl_mvm_binding_remove_vif(mvm, vif); 2151 out_remove: 2152 iwl_mvm_mac_ctxt_remove(mvm, vif); 2153 out_unlock: 2154 mutex_unlock(&mvm->mutex); 2155 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP); 2156 return ret; 2157 } 2158 2159 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, 2160 struct ieee80211_vif *vif) 2161 { 2162 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2163 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2164 2165 iwl_mvm_prepare_mac_removal(mvm, vif); 2166 2167 mutex_lock(&mvm->mutex); 2168 2169 /* Handle AP stop while in CSA */ 2170 if (rcu_access_pointer(mvm->csa_vif) == vif) { 2171 iwl_mvm_remove_time_event(mvm, mvmvif, 2172 &mvmvif->time_event_data); 2173 RCU_INIT_POINTER(mvm->csa_vif, NULL); 2174 mvmvif->csa_countdown = false; 2175 } 2176 2177 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { 2178 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); 2179 mvm->csa_tx_block_bcn_timeout = 0; 2180 } 2181 2182 mvmvif->ap_ibss_active = false; 2183 mvm->ap_last_beacon_gp2 = 0; 2184 2185 iwl_mvm_bt_coex_vif_change(mvm); 2186 2187 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); 2188 2189 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2190 if (vif->p2p && mvm->p2p_device_vif) 2191 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2192 2193 iwl_mvm_update_quotas(mvm, false, NULL); 2194 2195 /* 2196 * This is not very nice, but the simplest: 2197 * For older FWs removing the mcast sta before the bcast station may 2198 * cause assert 0x2b00. 2199 * This is fixed in later FW (which will stop beaconing when removing 2200 * bcast station). 2201 * So make the order of removal depend on the TLV 2202 */ 2203 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2204 iwl_mvm_rm_mcast_sta(mvm, vif); 2205 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2206 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2207 iwl_mvm_rm_mcast_sta(mvm, vif); 2208 iwl_mvm_binding_remove_vif(mvm, vif); 2209 2210 iwl_mvm_power_update_mac(mvm); 2211 2212 iwl_mvm_mac_ctxt_remove(mvm, vif); 2213 2214 mutex_unlock(&mvm->mutex); 2215 } 2216 2217 static void 2218 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, 2219 struct ieee80211_vif *vif, 2220 struct ieee80211_bss_conf *bss_conf, 2221 u32 changes) 2222 { 2223 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2224 2225 /* Changes will be applied when the AP/IBSS is started */ 2226 if (!mvmvif->ap_ibss_active) 2227 return; 2228 2229 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 2230 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && 2231 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) 2232 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2233 2234 /* Need to send a new beacon template to the FW */ 2235 if (changes & BSS_CHANGED_BEACON && 2236 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 2237 IWL_WARN(mvm, "Failed updating beacon data\n"); 2238 2239 if (changes & BSS_CHANGED_TXPOWER) { 2240 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 2241 bss_conf->txpower); 2242 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2243 } 2244 } 2245 2246 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 2247 struct ieee80211_vif *vif, 2248 struct ieee80211_bss_conf *bss_conf, 2249 u32 changes) 2250 { 2251 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2252 2253 /* 2254 * iwl_mvm_bss_info_changed_station() might call 2255 * iwl_mvm_protect_session(), which reads directly from 2256 * the device (the system time), so make sure it is available. 2257 */ 2258 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED)) 2259 return; 2260 2261 mutex_lock(&mvm->mutex); 2262 2263 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) 2264 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 2265 2266 switch (vif->type) { 2267 case NL80211_IFTYPE_STATION: 2268 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 2269 break; 2270 case NL80211_IFTYPE_AP: 2271 case NL80211_IFTYPE_ADHOC: 2272 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); 2273 break; 2274 case NL80211_IFTYPE_MONITOR: 2275 if (changes & BSS_CHANGED_MU_GROUPS) 2276 iwl_mvm_update_mu_groups(mvm, vif); 2277 break; 2278 default: 2279 /* shouldn't happen */ 2280 WARN_ON_ONCE(1); 2281 } 2282 2283 mutex_unlock(&mvm->mutex); 2284 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED); 2285 } 2286 2287 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, 2288 struct ieee80211_vif *vif, 2289 struct ieee80211_scan_request *hw_req) 2290 { 2291 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2292 int ret; 2293 2294 if (hw_req->req.n_channels == 0 || 2295 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) 2296 return -EINVAL; 2297 2298 mutex_lock(&mvm->mutex); 2299 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); 2300 mutex_unlock(&mvm->mutex); 2301 2302 return ret; 2303 } 2304 2305 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, 2306 struct ieee80211_vif *vif) 2307 { 2308 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2309 2310 mutex_lock(&mvm->mutex); 2311 2312 /* Due to a race condition, it's possible that mac80211 asks 2313 * us to stop a hw_scan when it's already stopped. This can 2314 * happen, for instance, if we stopped the scan ourselves, 2315 * called ieee80211_scan_completed() and the userspace called 2316 * cancel scan scan before ieee80211_scan_work() could run. 2317 * To handle that, simply return if the scan is not running. 2318 */ 2319 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) 2320 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 2321 2322 mutex_unlock(&mvm->mutex); 2323 } 2324 2325 static void 2326 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, 2327 struct ieee80211_sta *sta, u16 tids, 2328 int num_frames, 2329 enum ieee80211_frame_release_type reason, 2330 bool more_data) 2331 { 2332 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2333 2334 /* Called when we need to transmit (a) frame(s) from mac80211 */ 2335 2336 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2337 tids, more_data, false); 2338 } 2339 2340 static void 2341 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, 2342 struct ieee80211_sta *sta, u16 tids, 2343 int num_frames, 2344 enum ieee80211_frame_release_type reason, 2345 bool more_data) 2346 { 2347 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2348 2349 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ 2350 2351 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2352 tids, more_data, true); 2353 } 2354 2355 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2356 enum sta_notify_cmd cmd, 2357 struct ieee80211_sta *sta) 2358 { 2359 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2360 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2361 unsigned long txqs = 0, tids = 0; 2362 int tid; 2363 2364 /* 2365 * If we have TVQM then we get too high queue numbers - luckily 2366 * we really shouldn't get here with that because such hardware 2367 * should have firmware supporting buffer station offload. 2368 */ 2369 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 2370 return; 2371 2372 spin_lock_bh(&mvmsta->lock); 2373 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 2374 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2375 2376 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) 2377 continue; 2378 2379 __set_bit(tid_data->txq_id, &txqs); 2380 2381 if (iwl_mvm_tid_queued(mvm, tid_data) == 0) 2382 continue; 2383 2384 __set_bit(tid, &tids); 2385 } 2386 2387 switch (cmd) { 2388 case STA_NOTIFY_SLEEP: 2389 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) 2390 ieee80211_sta_set_buffered(sta, tid, true); 2391 2392 if (txqs) 2393 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); 2394 /* 2395 * The fw updates the STA to be asleep. Tx packets on the Tx 2396 * queues to this station will not be transmitted. The fw will 2397 * send a Tx response with TX_STATUS_FAIL_DEST_PS. 2398 */ 2399 break; 2400 case STA_NOTIFY_AWAKE: 2401 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 2402 break; 2403 2404 if (txqs) 2405 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); 2406 iwl_mvm_sta_modify_ps_wake(mvm, sta); 2407 break; 2408 default: 2409 break; 2410 } 2411 spin_unlock_bh(&mvmsta->lock); 2412 } 2413 2414 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2415 struct ieee80211_vif *vif, 2416 enum sta_notify_cmd cmd, 2417 struct ieee80211_sta *sta) 2418 { 2419 __iwl_mvm_mac_sta_notify(hw, cmd, sta); 2420 } 2421 2422 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 2423 { 2424 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2425 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; 2426 struct ieee80211_sta *sta; 2427 struct iwl_mvm_sta *mvmsta; 2428 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); 2429 2430 if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) 2431 return; 2432 2433 rcu_read_lock(); 2434 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); 2435 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2436 rcu_read_unlock(); 2437 return; 2438 } 2439 2440 mvmsta = iwl_mvm_sta_from_mac80211(sta); 2441 2442 if (!mvmsta->vif || 2443 mvmsta->vif->type != NL80211_IFTYPE_AP) { 2444 rcu_read_unlock(); 2445 return; 2446 } 2447 2448 if (mvmsta->sleeping != sleeping) { 2449 mvmsta->sleeping = sleeping; 2450 __iwl_mvm_mac_sta_notify(mvm->hw, 2451 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, 2452 sta); 2453 ieee80211_sta_ps_transition(sta, sleeping); 2454 } 2455 2456 if (sleeping) { 2457 switch (notif->type) { 2458 case IWL_MVM_PM_EVENT_AWAKE: 2459 case IWL_MVM_PM_EVENT_ASLEEP: 2460 break; 2461 case IWL_MVM_PM_EVENT_UAPSD: 2462 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); 2463 break; 2464 case IWL_MVM_PM_EVENT_PS_POLL: 2465 ieee80211_sta_pspoll(sta); 2466 break; 2467 default: 2468 break; 2469 } 2470 } 2471 2472 rcu_read_unlock(); 2473 } 2474 2475 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, 2476 struct ieee80211_vif *vif, 2477 struct ieee80211_sta *sta) 2478 { 2479 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2480 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2481 2482 /* 2483 * This is called before mac80211 does RCU synchronisation, 2484 * so here we already invalidate our internal RCU-protected 2485 * station pointer. The rest of the code will thus no longer 2486 * be able to find the station this way, and we don't rely 2487 * on further RCU synchronisation after the sta_state() 2488 * callback deleted the station. 2489 */ 2490 mutex_lock(&mvm->mutex); 2491 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) 2492 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 2493 ERR_PTR(-ENOENT)); 2494 2495 mutex_unlock(&mvm->mutex); 2496 } 2497 2498 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2499 const u8 *bssid) 2500 { 2501 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) 2502 return; 2503 2504 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { 2505 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2506 return; 2507 } 2508 2509 if (!vif->p2p && 2510 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { 2511 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2512 return; 2513 } 2514 2515 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 2516 } 2517 2518 static void 2519 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, 2520 struct ieee80211_vif *vif, u8 *peer_addr, 2521 enum nl80211_tdls_operation action) 2522 { 2523 struct iwl_fw_dbg_trigger_tlv *trig; 2524 struct iwl_fw_dbg_trigger_tdls *tdls_trig; 2525 2526 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS)) 2527 return; 2528 2529 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); 2530 tdls_trig = (void *)trig->data; 2531 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, 2532 ieee80211_vif_to_wdev(vif), trig)) 2533 return; 2534 2535 if (!(tdls_trig->action_bitmap & BIT(action))) 2536 return; 2537 2538 if (tdls_trig->peer_mode && 2539 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) 2540 return; 2541 2542 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 2543 "TDLS event occurred, peer %pM, action %d", 2544 peer_addr, action); 2545 } 2546 2547 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, 2548 struct iwl_mvm_sta *mvm_sta) 2549 { 2550 struct iwl_mvm_tid_data *tid_data; 2551 struct sk_buff *skb; 2552 int i; 2553 2554 spin_lock_bh(&mvm_sta->lock); 2555 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 2556 tid_data = &mvm_sta->tid_data[i]; 2557 2558 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) { 2559 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2560 2561 /* 2562 * The first deferred frame should've stopped the MAC 2563 * queues, so we should never get a second deferred 2564 * frame for the RA/TID. 2565 */ 2566 iwl_mvm_start_mac_queues(mvm, info->hw_queue); 2567 ieee80211_free_txskb(mvm->hw, skb); 2568 } 2569 } 2570 spin_unlock_bh(&mvm_sta->lock); 2571 } 2572 2573 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 2574 struct ieee80211_vif *vif, 2575 struct ieee80211_sta *sta, 2576 enum ieee80211_sta_state old_state, 2577 enum ieee80211_sta_state new_state) 2578 { 2579 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2580 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2581 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2582 int ret; 2583 2584 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", 2585 sta->addr, old_state, new_state); 2586 2587 /* this would be a mac80211 bug ... but don't crash */ 2588 if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) 2589 return -EINVAL; 2590 2591 /* 2592 * If we are in a STA removal flow and in DQA mode: 2593 * 2594 * This is after the sync_rcu part, so the queues have already been 2595 * flushed. No more TXs on their way in mac80211's path, and no more in 2596 * the queues. 2597 * Also, we won't be getting any new TX frames for this station. 2598 * What we might have are deferred TX frames that need to be taken care 2599 * of. 2600 * 2601 * Drop any still-queued deferred-frame before removing the STA, and 2602 * make sure the worker is no longer handling frames for this STA. 2603 */ 2604 if (old_state == IEEE80211_STA_NONE && 2605 new_state == IEEE80211_STA_NOTEXIST) { 2606 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); 2607 flush_work(&mvm->add_stream_wk); 2608 2609 /* 2610 * No need to make sure deferred TX indication is off since the 2611 * worker will already remove it if it was on 2612 */ 2613 } 2614 2615 mutex_lock(&mvm->mutex); 2616 /* track whether or not the station is associated */ 2617 mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC; 2618 2619 if (old_state == IEEE80211_STA_NOTEXIST && 2620 new_state == IEEE80211_STA_NONE) { 2621 /* 2622 * Firmware bug - it'll crash if the beacon interval is less 2623 * than 16. We can't avoid connecting at all, so refuse the 2624 * station state change, this will cause mac80211 to abandon 2625 * attempts to connect to this AP, and eventually wpa_s will 2626 * blacklist the AP... 2627 */ 2628 if (vif->type == NL80211_IFTYPE_STATION && 2629 vif->bss_conf.beacon_int < 16) { 2630 IWL_ERR(mvm, 2631 "AP %pM beacon interval is %d, refusing due to firmware bug!\n", 2632 sta->addr, vif->bss_conf.beacon_int); 2633 ret = -EINVAL; 2634 goto out_unlock; 2635 } 2636 2637 if (sta->tdls && 2638 (vif->p2p || 2639 iwl_mvm_tdls_sta_count(mvm, NULL) == 2640 IWL_MVM_TDLS_STA_COUNT || 2641 iwl_mvm_phy_ctx_count(mvm) > 1)) { 2642 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); 2643 ret = -EBUSY; 2644 goto out_unlock; 2645 } 2646 2647 ret = iwl_mvm_add_sta(mvm, vif, sta); 2648 if (sta->tdls && ret == 0) { 2649 iwl_mvm_recalc_tdls_state(mvm, vif, true); 2650 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 2651 NL80211_TDLS_SETUP); 2652 } 2653 } else if (old_state == IEEE80211_STA_NONE && 2654 new_state == IEEE80211_STA_AUTH) { 2655 /* 2656 * EBS may be disabled due to previous failures reported by FW. 2657 * Reset EBS status here assuming environment has been changed. 2658 */ 2659 mvm->last_ebs_successful = true; 2660 iwl_mvm_check_uapsd(mvm, vif, sta->addr); 2661 ret = 0; 2662 } else if (old_state == IEEE80211_STA_AUTH && 2663 new_state == IEEE80211_STA_ASSOC) { 2664 if (vif->type == NL80211_IFTYPE_AP) { 2665 mvmvif->ap_assoc_sta_count++; 2666 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2667 } 2668 2669 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 2670 true); 2671 ret = iwl_mvm_update_sta(mvm, vif, sta); 2672 } else if (old_state == IEEE80211_STA_ASSOC && 2673 new_state == IEEE80211_STA_AUTHORIZED) { 2674 2675 /* we don't support TDLS during DCM */ 2676 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2677 iwl_mvm_teardown_tdls_peers(mvm); 2678 2679 if (sta->tdls) 2680 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 2681 NL80211_TDLS_ENABLE_LINK); 2682 2683 /* enable beacon filtering */ 2684 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2685 ret = 0; 2686 } else if (old_state == IEEE80211_STA_AUTHORIZED && 2687 new_state == IEEE80211_STA_ASSOC) { 2688 /* disable beacon filtering */ 2689 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0)); 2690 ret = 0; 2691 } else if (old_state == IEEE80211_STA_ASSOC && 2692 new_state == IEEE80211_STA_AUTH) { 2693 if (vif->type == NL80211_IFTYPE_AP) { 2694 mvmvif->ap_assoc_sta_count--; 2695 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2696 } 2697 ret = 0; 2698 } else if (old_state == IEEE80211_STA_AUTH && 2699 new_state == IEEE80211_STA_NONE) { 2700 ret = 0; 2701 } else if (old_state == IEEE80211_STA_NONE && 2702 new_state == IEEE80211_STA_NOTEXIST) { 2703 ret = iwl_mvm_rm_sta(mvm, vif, sta); 2704 if (sta->tdls) { 2705 iwl_mvm_recalc_tdls_state(mvm, vif, false); 2706 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 2707 NL80211_TDLS_DISABLE_LINK); 2708 } 2709 } else { 2710 ret = -EIO; 2711 } 2712 out_unlock: 2713 mutex_unlock(&mvm->mutex); 2714 2715 if (sta->tdls && ret == 0) { 2716 if (old_state == IEEE80211_STA_NOTEXIST && 2717 new_state == IEEE80211_STA_NONE) 2718 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); 2719 else if (old_state == IEEE80211_STA_NONE && 2720 new_state == IEEE80211_STA_NOTEXIST) 2721 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); 2722 } 2723 2724 return ret; 2725 } 2726 2727 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 2728 { 2729 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2730 2731 mvm->rts_threshold = value; 2732 2733 return 0; 2734 } 2735 2736 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, 2737 struct ieee80211_vif *vif, 2738 struct ieee80211_sta *sta, u32 changed) 2739 { 2740 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2741 2742 if (vif->type == NL80211_IFTYPE_STATION && 2743 changed & IEEE80211_RC_NSS_CHANGED) 2744 iwl_mvm_sf_update(mvm, vif, false); 2745 } 2746 2747 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, 2748 struct ieee80211_vif *vif, u16 ac, 2749 const struct ieee80211_tx_queue_params *params) 2750 { 2751 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2752 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2753 2754 mvmvif->queue_params[ac] = *params; 2755 2756 /* 2757 * No need to update right away, we'll get BSS_CHANGED_QOS 2758 * The exception is P2P_DEVICE interface which needs immediate update. 2759 */ 2760 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 2761 int ret; 2762 2763 mutex_lock(&mvm->mutex); 2764 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2765 mutex_unlock(&mvm->mutex); 2766 return ret; 2767 } 2768 return 0; 2769 } 2770 2771 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, 2772 struct ieee80211_vif *vif) 2773 { 2774 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2775 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 2776 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; 2777 2778 if (WARN_ON_ONCE(vif->bss_conf.assoc)) 2779 return; 2780 2781 /* 2782 * iwl_mvm_protect_session() reads directly from the device 2783 * (the system time), so make sure it is available. 2784 */ 2785 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) 2786 return; 2787 2788 mutex_lock(&mvm->mutex); 2789 /* Try really hard to protect the session and hear a beacon */ 2790 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); 2791 mutex_unlock(&mvm->mutex); 2792 2793 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); 2794 } 2795 2796 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, 2797 struct ieee80211_vif *vif, 2798 struct cfg80211_sched_scan_request *req, 2799 struct ieee80211_scan_ies *ies) 2800 { 2801 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2802 2803 int ret; 2804 2805 mutex_lock(&mvm->mutex); 2806 2807 if (!vif->bss_conf.idle) { 2808 ret = -EBUSY; 2809 goto out; 2810 } 2811 2812 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); 2813 2814 out: 2815 mutex_unlock(&mvm->mutex); 2816 return ret; 2817 } 2818 2819 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, 2820 struct ieee80211_vif *vif) 2821 { 2822 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2823 int ret; 2824 2825 mutex_lock(&mvm->mutex); 2826 2827 /* Due to a race condition, it's possible that mac80211 asks 2828 * us to stop a sched_scan when it's already stopped. This 2829 * can happen, for instance, if we stopped the scan ourselves, 2830 * called ieee80211_sched_scan_stopped() and the userspace called 2831 * stop sched scan scan before ieee80211_sched_scan_stopped_work() 2832 * could run. To handle this, simply return if the scan is 2833 * not running. 2834 */ 2835 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { 2836 mutex_unlock(&mvm->mutex); 2837 return 0; 2838 } 2839 2840 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); 2841 mutex_unlock(&mvm->mutex); 2842 iwl_mvm_wait_for_async_handlers(mvm); 2843 2844 return ret; 2845 } 2846 2847 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 2848 enum set_key_cmd cmd, 2849 struct ieee80211_vif *vif, 2850 struct ieee80211_sta *sta, 2851 struct ieee80211_key_conf *key) 2852 { 2853 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2854 struct iwl_mvm_sta *mvmsta; 2855 struct iwl_mvm_key_pn *ptk_pn; 2856 int keyidx = key->keyidx; 2857 int ret; 2858 u8 key_offset; 2859 2860 if (iwlwifi_mod_params.swcrypto) { 2861 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); 2862 return -EOPNOTSUPP; 2863 } 2864 2865 switch (key->cipher) { 2866 case WLAN_CIPHER_SUITE_TKIP: 2867 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 2868 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 2869 break; 2870 case WLAN_CIPHER_SUITE_CCMP: 2871 case WLAN_CIPHER_SUITE_GCMP: 2872 case WLAN_CIPHER_SUITE_GCMP_256: 2873 if (!iwl_mvm_has_new_tx_api(mvm)) 2874 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 2875 break; 2876 case WLAN_CIPHER_SUITE_AES_CMAC: 2877 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2878 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2879 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); 2880 break; 2881 case WLAN_CIPHER_SUITE_WEP40: 2882 case WLAN_CIPHER_SUITE_WEP104: 2883 /* For non-client mode, only use WEP keys for TX as we probably 2884 * don't have a station yet anyway and would then have to keep 2885 * track of the keys, linking them to each of the clients/peers 2886 * as they appear. For now, don't do that, for performance WEP 2887 * offload doesn't really matter much, but we need it for some 2888 * other offload features in client mode. 2889 */ 2890 if (vif->type != NL80211_IFTYPE_STATION) 2891 return 0; 2892 break; 2893 default: 2894 /* currently FW supports only one optional cipher scheme */ 2895 if (hw->n_cipher_schemes && 2896 hw->cipher_schemes->cipher == key->cipher) 2897 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 2898 else 2899 return -EOPNOTSUPP; 2900 } 2901 2902 mutex_lock(&mvm->mutex); 2903 2904 switch (cmd) { 2905 case SET_KEY: 2906 if ((vif->type == NL80211_IFTYPE_ADHOC || 2907 vif->type == NL80211_IFTYPE_AP) && !sta) { 2908 /* 2909 * GTK on AP interface is a TX-only key, return 0; 2910 * on IBSS they're per-station and because we're lazy 2911 * we don't support them for RX, so do the same. 2912 * CMAC/GMAC in AP/IBSS modes must be done in software. 2913 */ 2914 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 2915 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 2916 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 2917 ret = -EOPNOTSUPP; 2918 else 2919 ret = 0; 2920 2921 if (key->cipher != WLAN_CIPHER_SUITE_GCMP && 2922 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && 2923 !iwl_mvm_has_new_tx_api(mvm)) { 2924 key->hw_key_idx = STA_KEY_IDX_INVALID; 2925 break; 2926 } 2927 } 2928 2929 /* During FW restart, in order to restore the state as it was, 2930 * don't try to reprogram keys we previously failed for. 2931 */ 2932 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2933 key->hw_key_idx == STA_KEY_IDX_INVALID) { 2934 IWL_DEBUG_MAC80211(mvm, 2935 "skip invalid idx key programming during restart\n"); 2936 ret = 0; 2937 break; 2938 } 2939 2940 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2941 sta && iwl_mvm_has_new_rx_api(mvm) && 2942 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 2943 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 2944 key->cipher == WLAN_CIPHER_SUITE_GCMP || 2945 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 2946 struct ieee80211_key_seq seq; 2947 int tid, q; 2948 2949 mvmsta = iwl_mvm_sta_from_mac80211(sta); 2950 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 2951 ptk_pn = kzalloc(sizeof(*ptk_pn) + 2952 mvm->trans->num_rx_queues * 2953 sizeof(ptk_pn->q[0]), 2954 GFP_KERNEL); 2955 if (!ptk_pn) { 2956 ret = -ENOMEM; 2957 break; 2958 } 2959 2960 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 2961 ieee80211_get_key_rx_seq(key, tid, &seq); 2962 for (q = 0; q < mvm->trans->num_rx_queues; q++) 2963 memcpy(ptk_pn->q[q].pn[tid], 2964 seq.ccmp.pn, 2965 IEEE80211_CCMP_PN_LEN); 2966 } 2967 2968 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); 2969 } 2970 2971 /* in HW restart reuse the index, otherwise request a new one */ 2972 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 2973 key_offset = key->hw_key_idx; 2974 else 2975 key_offset = STA_KEY_IDX_INVALID; 2976 2977 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 2978 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); 2979 if (ret) { 2980 IWL_WARN(mvm, "set key failed\n"); 2981 /* 2982 * can't add key for RX, but we don't need it 2983 * in the device for TX so still return 0 2984 */ 2985 key->hw_key_idx = STA_KEY_IDX_INVALID; 2986 ret = 0; 2987 } 2988 2989 break; 2990 case DISABLE_KEY: 2991 if (key->hw_key_idx == STA_KEY_IDX_INVALID) { 2992 ret = 0; 2993 break; 2994 } 2995 2996 if (sta && iwl_mvm_has_new_rx_api(mvm) && 2997 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 2998 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 2999 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3000 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3001 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3002 ptk_pn = rcu_dereference_protected( 3003 mvmsta->ptk_pn[keyidx], 3004 lockdep_is_held(&mvm->mutex)); 3005 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); 3006 if (ptk_pn) 3007 kfree_rcu(ptk_pn, rcu_head); 3008 } 3009 3010 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); 3011 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); 3012 break; 3013 default: 3014 ret = -EINVAL; 3015 } 3016 3017 mutex_unlock(&mvm->mutex); 3018 return ret; 3019 } 3020 3021 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, 3022 struct ieee80211_vif *vif, 3023 struct ieee80211_key_conf *keyconf, 3024 struct ieee80211_sta *sta, 3025 u32 iv32, u16 *phase1key) 3026 { 3027 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3028 3029 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 3030 return; 3031 3032 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); 3033 } 3034 3035 3036 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, 3037 struct iwl_rx_packet *pkt, void *data) 3038 { 3039 struct iwl_mvm *mvm = 3040 container_of(notif_wait, struct iwl_mvm, notif_wait); 3041 struct iwl_hs20_roc_res *resp; 3042 int resp_len = iwl_rx_packet_payload_len(pkt); 3043 struct iwl_mvm_time_event_data *te_data = data; 3044 3045 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) 3046 return true; 3047 3048 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 3049 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); 3050 return true; 3051 } 3052 3053 resp = (void *)pkt->data; 3054 3055 IWL_DEBUG_TE(mvm, 3056 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n", 3057 resp->status, resp->event_unique_id); 3058 3059 te_data->uid = le32_to_cpu(resp->event_unique_id); 3060 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 3061 te_data->uid); 3062 3063 spin_lock_bh(&mvm->time_event_lock); 3064 list_add_tail(&te_data->list, &mvm->aux_roc_te_list); 3065 spin_unlock_bh(&mvm->time_event_lock); 3066 3067 return true; 3068 } 3069 3070 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) 3071 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) 3072 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) 3073 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) 3074 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) 3075 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, 3076 struct ieee80211_channel *channel, 3077 struct ieee80211_vif *vif, 3078 int duration) 3079 { 3080 int res, time_reg = DEVICE_SYSTEM_TIME_REG; 3081 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3082 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; 3083 static const u16 time_event_response[] = { HOT_SPOT_CMD }; 3084 struct iwl_notification_wait wait_time_event; 3085 u32 dtim_interval = vif->bss_conf.dtim_period * 3086 vif->bss_conf.beacon_int; 3087 u32 req_dur, delay; 3088 struct iwl_hs20_roc_req aux_roc_req = { 3089 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 3090 .id_and_color = 3091 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), 3092 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), 3093 /* Set the channel info data */ 3094 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ? 3095 PHY_BAND_24 : PHY_BAND_5, 3096 .channel_info.channel = channel->hw_value, 3097 .channel_info.width = PHY_VHT_CHANNEL_MODE20, 3098 /* Set the time and duration */ 3099 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)), 3100 }; 3101 3102 delay = AUX_ROC_MIN_DELAY; 3103 req_dur = MSEC_TO_TU(duration); 3104 3105 /* 3106 * If we are associated we want the delay time to be at least one 3107 * dtim interval so that the FW can wait until after the DTIM and 3108 * then start the time event, this will potentially allow us to 3109 * remain off-channel for the max duration. 3110 * Since we want to use almost a whole dtim interval we would also 3111 * like the delay to be for 2-3 dtim intervals, in case there are 3112 * other time events with higher priority. 3113 */ 3114 if (vif->bss_conf.assoc) { 3115 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); 3116 /* We cannot remain off-channel longer than the DTIM interval */ 3117 if (dtim_interval <= req_dur) { 3118 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; 3119 if (req_dur <= AUX_ROC_MIN_DURATION) 3120 req_dur = dtim_interval - 3121 AUX_ROC_MIN_SAFETY_BUFFER; 3122 } 3123 } 3124 3125 aux_roc_req.duration = cpu_to_le32(req_dur); 3126 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); 3127 3128 IWL_DEBUG_TE(mvm, 3129 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", 3130 channel->hw_value, req_dur, duration, delay, 3131 dtim_interval); 3132 /* Set the node address */ 3133 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); 3134 3135 lockdep_assert_held(&mvm->mutex); 3136 3137 spin_lock_bh(&mvm->time_event_lock); 3138 3139 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { 3140 spin_unlock_bh(&mvm->time_event_lock); 3141 return -EIO; 3142 } 3143 3144 te_data->vif = vif; 3145 te_data->duration = duration; 3146 te_data->id = HOT_SPOT_CMD; 3147 3148 spin_unlock_bh(&mvm->time_event_lock); 3149 3150 /* 3151 * Use a notification wait, which really just processes the 3152 * command response and doesn't wait for anything, in order 3153 * to be able to process the response and get the UID inside 3154 * the RX path. Using CMD_WANT_SKB doesn't work because it 3155 * stores the buffer and then wakes up this thread, by which 3156 * time another notification (that the time event started) 3157 * might already be processed unsuccessfully. 3158 */ 3159 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 3160 time_event_response, 3161 ARRAY_SIZE(time_event_response), 3162 iwl_mvm_rx_aux_roc, te_data); 3163 3164 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req), 3165 &aux_roc_req); 3166 3167 if (res) { 3168 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); 3169 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 3170 goto out_clear_te; 3171 } 3172 3173 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 3174 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 3175 /* should never fail */ 3176 WARN_ON_ONCE(res); 3177 3178 if (res) { 3179 out_clear_te: 3180 spin_lock_bh(&mvm->time_event_lock); 3181 iwl_mvm_te_clear_data(mvm, te_data); 3182 spin_unlock_bh(&mvm->time_event_lock); 3183 } 3184 3185 return res; 3186 } 3187 3188 static int iwl_mvm_roc(struct ieee80211_hw *hw, 3189 struct ieee80211_vif *vif, 3190 struct ieee80211_channel *channel, 3191 int duration, 3192 enum ieee80211_roc_type type) 3193 { 3194 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3195 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3196 struct cfg80211_chan_def chandef; 3197 struct iwl_mvm_phy_ctxt *phy_ctxt; 3198 int ret, i; 3199 3200 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 3201 duration, type); 3202 3203 flush_work(&mvm->roc_done_wk); 3204 3205 mutex_lock(&mvm->mutex); 3206 3207 switch (vif->type) { 3208 case NL80211_IFTYPE_STATION: 3209 if (fw_has_capa(&mvm->fw->ucode_capa, 3210 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { 3211 /* Use aux roc framework (HS20) */ 3212 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 3213 vif, duration); 3214 goto out_unlock; 3215 } 3216 IWL_ERR(mvm, "hotspot not supported\n"); 3217 ret = -EINVAL; 3218 goto out_unlock; 3219 case NL80211_IFTYPE_P2P_DEVICE: 3220 /* handle below */ 3221 break; 3222 default: 3223 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 3224 ret = -EINVAL; 3225 goto out_unlock; 3226 } 3227 3228 for (i = 0; i < NUM_PHY_CTX; i++) { 3229 phy_ctxt = &mvm->phy_ctxts[i]; 3230 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 3231 continue; 3232 3233 if (phy_ctxt->ref && channel == phy_ctxt->channel) { 3234 /* 3235 * Unbind the P2P_DEVICE from the current PHY context, 3236 * and if the PHY context is not used remove it. 3237 */ 3238 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3239 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3240 goto out_unlock; 3241 3242 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3243 3244 /* Bind the P2P_DEVICE to the current PHY Context */ 3245 mvmvif->phy_ctxt = phy_ctxt; 3246 3247 ret = iwl_mvm_binding_add_vif(mvm, vif); 3248 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3249 goto out_unlock; 3250 3251 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3252 goto schedule_time_event; 3253 } 3254 } 3255 3256 /* Need to update the PHY context only if the ROC channel changed */ 3257 if (channel == mvmvif->phy_ctxt->channel) 3258 goto schedule_time_event; 3259 3260 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); 3261 3262 /* 3263 * Change the PHY context configuration as it is currently referenced 3264 * only by the P2P Device MAC 3265 */ 3266 if (mvmvif->phy_ctxt->ref == 1) { 3267 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, 3268 &chandef, 1, 1); 3269 if (ret) 3270 goto out_unlock; 3271 } else { 3272 /* 3273 * The PHY context is shared with other MACs. Need to remove the 3274 * P2P Device from the binding, allocate an new PHY context and 3275 * create a new binding 3276 */ 3277 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3278 if (!phy_ctxt) { 3279 ret = -ENOSPC; 3280 goto out_unlock; 3281 } 3282 3283 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 3284 1, 1); 3285 if (ret) { 3286 IWL_ERR(mvm, "Failed to change PHY context\n"); 3287 goto out_unlock; 3288 } 3289 3290 /* Unbind the P2P_DEVICE from the current PHY context */ 3291 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3292 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3293 goto out_unlock; 3294 3295 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3296 3297 /* Bind the P2P_DEVICE to the new allocated PHY context */ 3298 mvmvif->phy_ctxt = phy_ctxt; 3299 3300 ret = iwl_mvm_binding_add_vif(mvm, vif); 3301 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3302 goto out_unlock; 3303 3304 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3305 } 3306 3307 schedule_time_event: 3308 /* Schedule the time events */ 3309 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); 3310 3311 out_unlock: 3312 mutex_unlock(&mvm->mutex); 3313 IWL_DEBUG_MAC80211(mvm, "leave\n"); 3314 return ret; 3315 } 3316 3317 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) 3318 { 3319 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3320 3321 IWL_DEBUG_MAC80211(mvm, "enter\n"); 3322 3323 mutex_lock(&mvm->mutex); 3324 iwl_mvm_stop_roc(mvm); 3325 mutex_unlock(&mvm->mutex); 3326 3327 IWL_DEBUG_MAC80211(mvm, "leave\n"); 3328 return 0; 3329 } 3330 3331 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, 3332 struct ieee80211_chanctx_conf *ctx) 3333 { 3334 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3335 struct iwl_mvm_phy_ctxt *phy_ctxt; 3336 int ret; 3337 3338 lockdep_assert_held(&mvm->mutex); 3339 3340 IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); 3341 3342 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3343 if (!phy_ctxt) { 3344 ret = -ENOSPC; 3345 goto out; 3346 } 3347 3348 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, 3349 ctx->rx_chains_static, 3350 ctx->rx_chains_dynamic); 3351 if (ret) { 3352 IWL_ERR(mvm, "Failed to add PHY context\n"); 3353 goto out; 3354 } 3355 3356 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); 3357 *phy_ctxt_id = phy_ctxt->id; 3358 out: 3359 return ret; 3360 } 3361 3362 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, 3363 struct ieee80211_chanctx_conf *ctx) 3364 { 3365 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3366 int ret; 3367 3368 mutex_lock(&mvm->mutex); 3369 ret = __iwl_mvm_add_chanctx(mvm, ctx); 3370 mutex_unlock(&mvm->mutex); 3371 3372 return ret; 3373 } 3374 3375 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, 3376 struct ieee80211_chanctx_conf *ctx) 3377 { 3378 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3379 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3380 3381 lockdep_assert_held(&mvm->mutex); 3382 3383 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); 3384 } 3385 3386 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, 3387 struct ieee80211_chanctx_conf *ctx) 3388 { 3389 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3390 3391 mutex_lock(&mvm->mutex); 3392 __iwl_mvm_remove_chanctx(mvm, ctx); 3393 mutex_unlock(&mvm->mutex); 3394 } 3395 3396 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, 3397 struct ieee80211_chanctx_conf *ctx, 3398 u32 changed) 3399 { 3400 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3401 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3402 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3403 3404 if (WARN_ONCE((phy_ctxt->ref > 1) && 3405 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | 3406 IEEE80211_CHANCTX_CHANGE_RX_CHAINS | 3407 IEEE80211_CHANCTX_CHANGE_RADAR | 3408 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), 3409 "Cannot change PHY. Ref=%d, changed=0x%X\n", 3410 phy_ctxt->ref, changed)) 3411 return; 3412 3413 mutex_lock(&mvm->mutex); 3414 iwl_mvm_bt_coex_vif_change(mvm); 3415 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, 3416 ctx->rx_chains_static, 3417 ctx->rx_chains_dynamic); 3418 mutex_unlock(&mvm->mutex); 3419 } 3420 3421 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, 3422 struct ieee80211_vif *vif, 3423 struct ieee80211_chanctx_conf *ctx, 3424 bool switching_chanctx) 3425 { 3426 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3427 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3428 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3429 int ret; 3430 3431 lockdep_assert_held(&mvm->mutex); 3432 3433 mvmvif->phy_ctxt = phy_ctxt; 3434 3435 switch (vif->type) { 3436 case NL80211_IFTYPE_AP: 3437 /* only needed if we're switching chanctx (i.e. during CSA) */ 3438 if (switching_chanctx) { 3439 mvmvif->ap_ibss_active = true; 3440 break; 3441 } 3442 case NL80211_IFTYPE_ADHOC: 3443 /* 3444 * The AP binding flow is handled as part of the start_ap flow 3445 * (in bss_info_changed), similarly for IBSS. 3446 */ 3447 ret = 0; 3448 goto out; 3449 case NL80211_IFTYPE_STATION: 3450 break; 3451 case NL80211_IFTYPE_MONITOR: 3452 /* always disable PS when a monitor interface is active */ 3453 mvmvif->ps_disabled = true; 3454 break; 3455 default: 3456 ret = -EINVAL; 3457 goto out; 3458 } 3459 3460 ret = iwl_mvm_binding_add_vif(mvm, vif); 3461 if (ret) 3462 goto out; 3463 3464 /* 3465 * Power state must be updated before quotas, 3466 * otherwise fw will complain. 3467 */ 3468 iwl_mvm_power_update_mac(mvm); 3469 3470 /* Setting the quota at this stage is only required for monitor 3471 * interfaces. For the other types, the bss_info changed flow 3472 * will handle quota settings. 3473 */ 3474 if (vif->type == NL80211_IFTYPE_MONITOR) { 3475 mvmvif->monitor_active = true; 3476 ret = iwl_mvm_update_quotas(mvm, false, NULL); 3477 if (ret) 3478 goto out_remove_binding; 3479 3480 ret = iwl_mvm_add_snif_sta(mvm, vif); 3481 if (ret) 3482 goto out_remove_binding; 3483 3484 } 3485 3486 /* Handle binding during CSA */ 3487 if (vif->type == NL80211_IFTYPE_AP) { 3488 iwl_mvm_update_quotas(mvm, false, NULL); 3489 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3490 } 3491 3492 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { 3493 u32 duration = 2 * vif->bss_conf.beacon_int; 3494 3495 /* iwl_mvm_protect_session() reads directly from the 3496 * device (the system time), so make sure it is 3497 * available. 3498 */ 3499 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); 3500 if (ret) 3501 goto out_remove_binding; 3502 3503 /* Protect the session to make sure we hear the first 3504 * beacon on the new channel. 3505 */ 3506 iwl_mvm_protect_session(mvm, vif, duration, duration, 3507 vif->bss_conf.beacon_int / 2, 3508 true); 3509 3510 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); 3511 3512 iwl_mvm_update_quotas(mvm, false, NULL); 3513 } 3514 3515 goto out; 3516 3517 out_remove_binding: 3518 iwl_mvm_binding_remove_vif(mvm, vif); 3519 iwl_mvm_power_update_mac(mvm); 3520 out: 3521 if (ret) 3522 mvmvif->phy_ctxt = NULL; 3523 return ret; 3524 } 3525 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, 3526 struct ieee80211_vif *vif, 3527 struct ieee80211_chanctx_conf *ctx) 3528 { 3529 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3530 int ret; 3531 3532 mutex_lock(&mvm->mutex); 3533 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); 3534 mutex_unlock(&mvm->mutex); 3535 3536 return ret; 3537 } 3538 3539 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, 3540 struct ieee80211_vif *vif, 3541 struct ieee80211_chanctx_conf *ctx, 3542 bool switching_chanctx) 3543 { 3544 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3545 struct ieee80211_vif *disabled_vif = NULL; 3546 3547 lockdep_assert_held(&mvm->mutex); 3548 3549 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 3550 3551 switch (vif->type) { 3552 case NL80211_IFTYPE_ADHOC: 3553 goto out; 3554 case NL80211_IFTYPE_MONITOR: 3555 mvmvif->monitor_active = false; 3556 mvmvif->ps_disabled = false; 3557 iwl_mvm_rm_snif_sta(mvm, vif); 3558 break; 3559 case NL80211_IFTYPE_AP: 3560 /* This part is triggered only during CSA */ 3561 if (!switching_chanctx || !mvmvif->ap_ibss_active) 3562 goto out; 3563 3564 mvmvif->csa_countdown = false; 3565 3566 /* Set CS bit on all the stations */ 3567 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); 3568 3569 /* Save blocked iface, the timeout is set on the next beacon */ 3570 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); 3571 3572 mvmvif->ap_ibss_active = false; 3573 break; 3574 case NL80211_IFTYPE_STATION: 3575 if (!switching_chanctx) 3576 break; 3577 3578 disabled_vif = vif; 3579 3580 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); 3581 break; 3582 default: 3583 break; 3584 } 3585 3586 iwl_mvm_update_quotas(mvm, false, disabled_vif); 3587 iwl_mvm_binding_remove_vif(mvm, vif); 3588 3589 out: 3590 mvmvif->phy_ctxt = NULL; 3591 iwl_mvm_power_update_mac(mvm); 3592 } 3593 3594 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, 3595 struct ieee80211_vif *vif, 3596 struct ieee80211_chanctx_conf *ctx) 3597 { 3598 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3599 3600 mutex_lock(&mvm->mutex); 3601 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); 3602 mutex_unlock(&mvm->mutex); 3603 } 3604 3605 static int 3606 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, 3607 struct ieee80211_vif_chanctx_switch *vifs) 3608 { 3609 int ret; 3610 3611 mutex_lock(&mvm->mutex); 3612 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 3613 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); 3614 3615 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); 3616 if (ret) { 3617 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); 3618 goto out_reassign; 3619 } 3620 3621 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 3622 true); 3623 if (ret) { 3624 IWL_ERR(mvm, 3625 "failed to assign new_ctx during channel switch\n"); 3626 goto out_remove; 3627 } 3628 3629 /* we don't support TDLS during DCM - can be caused by channel switch */ 3630 if (iwl_mvm_phy_ctx_count(mvm) > 1) 3631 iwl_mvm_teardown_tdls_peers(mvm); 3632 3633 goto out; 3634 3635 out_remove: 3636 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); 3637 3638 out_reassign: 3639 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { 3640 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); 3641 goto out_restart; 3642 } 3643 3644 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 3645 true)) { 3646 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 3647 goto out_restart; 3648 } 3649 3650 goto out; 3651 3652 out_restart: 3653 /* things keep failing, better restart the hw */ 3654 iwl_mvm_nic_restart(mvm, false); 3655 3656 out: 3657 mutex_unlock(&mvm->mutex); 3658 3659 return ret; 3660 } 3661 3662 static int 3663 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, 3664 struct ieee80211_vif_chanctx_switch *vifs) 3665 { 3666 int ret; 3667 3668 mutex_lock(&mvm->mutex); 3669 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 3670 3671 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 3672 true); 3673 if (ret) { 3674 IWL_ERR(mvm, 3675 "failed to assign new_ctx during channel switch\n"); 3676 goto out_reassign; 3677 } 3678 3679 goto out; 3680 3681 out_reassign: 3682 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 3683 true)) { 3684 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 3685 goto out_restart; 3686 } 3687 3688 goto out; 3689 3690 out_restart: 3691 /* things keep failing, better restart the hw */ 3692 iwl_mvm_nic_restart(mvm, false); 3693 3694 out: 3695 mutex_unlock(&mvm->mutex); 3696 3697 return ret; 3698 } 3699 3700 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, 3701 struct ieee80211_vif_chanctx_switch *vifs, 3702 int n_vifs, 3703 enum ieee80211_chanctx_switch_mode mode) 3704 { 3705 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3706 int ret; 3707 3708 /* we only support a single-vif right now */ 3709 if (n_vifs > 1) 3710 return -EOPNOTSUPP; 3711 3712 switch (mode) { 3713 case CHANCTX_SWMODE_SWAP_CONTEXTS: 3714 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); 3715 break; 3716 case CHANCTX_SWMODE_REASSIGN_VIF: 3717 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); 3718 break; 3719 default: 3720 ret = -EOPNOTSUPP; 3721 break; 3722 } 3723 3724 return ret; 3725 } 3726 3727 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) 3728 { 3729 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3730 3731 return mvm->ibss_manager; 3732 } 3733 3734 static int iwl_mvm_set_tim(struct ieee80211_hw *hw, 3735 struct ieee80211_sta *sta, 3736 bool set) 3737 { 3738 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3739 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3740 3741 if (!mvm_sta || !mvm_sta->vif) { 3742 IWL_ERR(mvm, "Station is not associated to a vif\n"); 3743 return -EINVAL; 3744 } 3745 3746 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 3747 } 3748 3749 #ifdef CONFIG_NL80211_TESTMODE 3750 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { 3751 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, 3752 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, 3753 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, 3754 }; 3755 3756 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, 3757 struct ieee80211_vif *vif, 3758 void *data, int len) 3759 { 3760 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; 3761 int err; 3762 u32 noa_duration; 3763 3764 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, 3765 NULL); 3766 if (err) 3767 return err; 3768 3769 if (!tb[IWL_MVM_TM_ATTR_CMD]) 3770 return -EINVAL; 3771 3772 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { 3773 case IWL_MVM_TM_CMD_SET_NOA: 3774 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || 3775 !vif->bss_conf.enable_beacon || 3776 !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) 3777 return -EINVAL; 3778 3779 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); 3780 if (noa_duration >= vif->bss_conf.beacon_int) 3781 return -EINVAL; 3782 3783 mvm->noa_duration = noa_duration; 3784 mvm->noa_vif = vif; 3785 3786 return iwl_mvm_update_quotas(mvm, false, NULL); 3787 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 3788 /* must be associated client vif - ignore authorized */ 3789 if (!vif || vif->type != NL80211_IFTYPE_STATION || 3790 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || 3791 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) 3792 return -EINVAL; 3793 3794 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 3795 return iwl_mvm_enable_beacon_filter(mvm, vif, 0); 3796 return iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3797 } 3798 3799 return -EOPNOTSUPP; 3800 } 3801 3802 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, 3803 struct ieee80211_vif *vif, 3804 void *data, int len) 3805 { 3806 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3807 int err; 3808 3809 mutex_lock(&mvm->mutex); 3810 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); 3811 mutex_unlock(&mvm->mutex); 3812 3813 return err; 3814 } 3815 #endif 3816 3817 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, 3818 struct ieee80211_vif *vif, 3819 struct ieee80211_channel_switch *chsw) 3820 { 3821 /* By implementing this operation, we prevent mac80211 from 3822 * starting its own channel switch timer, so that we can call 3823 * ieee80211_chswitch_done() ourselves at the right time 3824 * (which is when the absence time event starts). 3825 */ 3826 3827 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), 3828 "dummy channel switch op\n"); 3829 } 3830 3831 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, 3832 struct ieee80211_vif *vif, 3833 struct ieee80211_channel_switch *chsw) 3834 { 3835 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3836 struct ieee80211_vif *csa_vif; 3837 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3838 u32 apply_time; 3839 int ret; 3840 3841 mutex_lock(&mvm->mutex); 3842 3843 mvmvif->csa_failed = false; 3844 3845 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", 3846 chsw->chandef.center_freq1); 3847 3848 iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, 3849 ieee80211_vif_to_wdev(vif), 3850 FW_DBG_TRIGGER_CHANNEL_SWITCH); 3851 3852 switch (vif->type) { 3853 case NL80211_IFTYPE_AP: 3854 csa_vif = 3855 rcu_dereference_protected(mvm->csa_vif, 3856 lockdep_is_held(&mvm->mutex)); 3857 if (WARN_ONCE(csa_vif && csa_vif->csa_active, 3858 "Another CSA is already in progress")) { 3859 ret = -EBUSY; 3860 goto out_unlock; 3861 } 3862 3863 /* we still didn't unblock tx. prevent new CS meanwhile */ 3864 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, 3865 lockdep_is_held(&mvm->mutex))) { 3866 ret = -EBUSY; 3867 goto out_unlock; 3868 } 3869 3870 rcu_assign_pointer(mvm->csa_vif, vif); 3871 3872 if (WARN_ONCE(mvmvif->csa_countdown, 3873 "Previous CSA countdown didn't complete")) { 3874 ret = -EBUSY; 3875 goto out_unlock; 3876 } 3877 3878 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; 3879 3880 break; 3881 case NL80211_IFTYPE_STATION: 3882 if (mvmvif->lqm_active) 3883 iwl_mvm_send_lqm_cmd(vif, 3884 LQM_CMD_OPERATION_STOP_MEASUREMENT, 3885 0, 0); 3886 3887 /* Schedule the time event to a bit before beacon 1, 3888 * to make sure we're in the new channel when the 3889 * GO/AP arrives. In case count <= 1 immediately schedule the 3890 * TE (this might result with some packet loss or connection 3891 * loss). 3892 */ 3893 if (chsw->count <= 1) 3894 apply_time = 0; 3895 else 3896 apply_time = chsw->device_timestamp + 3897 ((vif->bss_conf.beacon_int * (chsw->count - 1) - 3898 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); 3899 3900 if (chsw->block_tx) 3901 iwl_mvm_csa_client_absent(mvm, vif); 3902 3903 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, 3904 apply_time); 3905 if (mvmvif->bf_data.bf_enabled) { 3906 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3907 if (ret) 3908 goto out_unlock; 3909 } 3910 3911 break; 3912 default: 3913 break; 3914 } 3915 3916 mvmvif->ps_disabled = true; 3917 3918 ret = iwl_mvm_power_update_ps(mvm); 3919 if (ret) 3920 goto out_unlock; 3921 3922 /* we won't be on this channel any longer */ 3923 iwl_mvm_teardown_tdls_peers(mvm); 3924 3925 out_unlock: 3926 mutex_unlock(&mvm->mutex); 3927 3928 return ret; 3929 } 3930 3931 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, 3932 struct ieee80211_vif *vif) 3933 { 3934 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3935 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3936 int ret; 3937 3938 mutex_lock(&mvm->mutex); 3939 3940 if (mvmvif->csa_failed) { 3941 mvmvif->csa_failed = false; 3942 ret = -EIO; 3943 goto out_unlock; 3944 } 3945 3946 if (vif->type == NL80211_IFTYPE_STATION) { 3947 struct iwl_mvm_sta *mvmsta; 3948 3949 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, 3950 mvmvif->ap_sta_id); 3951 3952 if (WARN_ON(!mvmsta)) { 3953 ret = -EIO; 3954 goto out_unlock; 3955 } 3956 3957 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 3958 3959 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3960 3961 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 3962 if (ret) 3963 goto out_unlock; 3964 3965 iwl_mvm_stop_session_protection(mvm, vif); 3966 } 3967 3968 mvmvif->ps_disabled = false; 3969 3970 ret = iwl_mvm_power_update_ps(mvm); 3971 3972 out_unlock: 3973 mutex_unlock(&mvm->mutex); 3974 3975 return ret; 3976 } 3977 3978 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 3979 struct ieee80211_vif *vif, u32 queues, bool drop) 3980 { 3981 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3982 struct iwl_mvm_vif *mvmvif; 3983 struct iwl_mvm_sta *mvmsta; 3984 struct ieee80211_sta *sta; 3985 int i; 3986 u32 msk = 0; 3987 3988 if (!vif || vif->type != NL80211_IFTYPE_STATION) 3989 return; 3990 3991 /* Make sure we're done with the deferred traffic before flushing */ 3992 flush_work(&mvm->add_stream_wk); 3993 3994 mutex_lock(&mvm->mutex); 3995 mvmvif = iwl_mvm_vif_from_mac80211(vif); 3996 3997 /* flush the AP-station and all TDLS peers */ 3998 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 3999 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4000 lockdep_is_held(&mvm->mutex)); 4001 if (IS_ERR_OR_NULL(sta)) 4002 continue; 4003 4004 mvmsta = iwl_mvm_sta_from_mac80211(sta); 4005 if (mvmsta->vif != vif) 4006 continue; 4007 4008 /* make sure only TDLS peers or the AP are flushed */ 4009 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); 4010 4011 if (drop) { 4012 if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0)) 4013 IWL_ERR(mvm, "flush request fail\n"); 4014 } else { 4015 msk |= mvmsta->tfd_queue_msk; 4016 if (iwl_mvm_has_new_tx_api(mvm)) 4017 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); 4018 } 4019 } 4020 4021 mutex_unlock(&mvm->mutex); 4022 4023 /* this can take a while, and we may need/want other operations 4024 * to succeed while doing this, so do it without the mutex held 4025 */ 4026 if (!drop && !iwl_mvm_has_new_tx_api(mvm)) 4027 iwl_trans_wait_tx_queues_empty(mvm->trans, msk); 4028 } 4029 4030 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, 4031 struct survey_info *survey) 4032 { 4033 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4034 int ret; 4035 4036 memset(survey, 0, sizeof(*survey)); 4037 4038 /* only support global statistics right now */ 4039 if (idx != 0) 4040 return -ENOENT; 4041 4042 if (!fw_has_capa(&mvm->fw->ucode_capa, 4043 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 4044 return -ENOENT; 4045 4046 mutex_lock(&mvm->mutex); 4047 4048 if (iwl_mvm_firmware_running(mvm)) { 4049 ret = iwl_mvm_request_statistics(mvm, false); 4050 if (ret) 4051 goto out; 4052 } 4053 4054 survey->filled = SURVEY_INFO_TIME | 4055 SURVEY_INFO_TIME_RX | 4056 SURVEY_INFO_TIME_TX | 4057 SURVEY_INFO_TIME_SCAN; 4058 survey->time = mvm->accu_radio_stats.on_time_rf + 4059 mvm->radio_stats.on_time_rf; 4060 do_div(survey->time, USEC_PER_MSEC); 4061 4062 survey->time_rx = mvm->accu_radio_stats.rx_time + 4063 mvm->radio_stats.rx_time; 4064 do_div(survey->time_rx, USEC_PER_MSEC); 4065 4066 survey->time_tx = mvm->accu_radio_stats.tx_time + 4067 mvm->radio_stats.tx_time; 4068 do_div(survey->time_tx, USEC_PER_MSEC); 4069 4070 survey->time_scan = mvm->accu_radio_stats.on_time_scan + 4071 mvm->radio_stats.on_time_scan; 4072 do_div(survey->time_scan, USEC_PER_MSEC); 4073 4074 ret = 0; 4075 out: 4076 mutex_unlock(&mvm->mutex); 4077 return ret; 4078 } 4079 4080 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, 4081 struct ieee80211_vif *vif, 4082 struct ieee80211_sta *sta, 4083 struct station_info *sinfo) 4084 { 4085 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4086 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4087 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 4088 4089 if (mvmsta->avg_energy) { 4090 sinfo->signal_avg = mvmsta->avg_energy; 4091 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); 4092 } 4093 4094 if (!fw_has_capa(&mvm->fw->ucode_capa, 4095 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 4096 return; 4097 4098 /* if beacon filtering isn't on mac80211 does it anyway */ 4099 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 4100 return; 4101 4102 if (!vif->bss_conf.assoc) 4103 return; 4104 4105 mutex_lock(&mvm->mutex); 4106 4107 if (mvmvif->ap_sta_id != mvmsta->sta_id) 4108 goto unlock; 4109 4110 if (iwl_mvm_request_statistics(mvm, false)) 4111 goto unlock; 4112 4113 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + 4114 mvmvif->beacon_stats.accu_num_beacons; 4115 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); 4116 if (mvmvif->beacon_stats.avg_signal) { 4117 /* firmware only reports a value after RXing a few beacons */ 4118 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; 4119 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 4120 } 4121 unlock: 4122 mutex_unlock(&mvm->mutex); 4123 } 4124 4125 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, 4126 struct ieee80211_vif *vif, 4127 const struct ieee80211_event *event) 4128 { 4129 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ 4130 do { \ 4131 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ 4132 break; \ 4133 iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ 4134 } while (0) 4135 4136 struct iwl_fw_dbg_trigger_tlv *trig; 4137 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 4138 4139 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) 4140 return; 4141 4142 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); 4143 trig_mlme = (void *)trig->data; 4144 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, 4145 ieee80211_vif_to_wdev(vif), trig)) 4146 return; 4147 4148 if (event->u.mlme.data == ASSOC_EVENT) { 4149 if (event->u.mlme.status == MLME_DENIED) 4150 CHECK_MLME_TRIGGER(stop_assoc_denied, 4151 "DENIED ASSOC: reason %d", 4152 event->u.mlme.reason); 4153 else if (event->u.mlme.status == MLME_TIMEOUT) 4154 CHECK_MLME_TRIGGER(stop_assoc_timeout, 4155 "ASSOC TIMEOUT"); 4156 } else if (event->u.mlme.data == AUTH_EVENT) { 4157 if (event->u.mlme.status == MLME_DENIED) 4158 CHECK_MLME_TRIGGER(stop_auth_denied, 4159 "DENIED AUTH: reason %d", 4160 event->u.mlme.reason); 4161 else if (event->u.mlme.status == MLME_TIMEOUT) 4162 CHECK_MLME_TRIGGER(stop_auth_timeout, 4163 "AUTH TIMEOUT"); 4164 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { 4165 CHECK_MLME_TRIGGER(stop_rx_deauth, 4166 "DEAUTH RX %d", event->u.mlme.reason); 4167 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { 4168 CHECK_MLME_TRIGGER(stop_tx_deauth, 4169 "DEAUTH TX %d", event->u.mlme.reason); 4170 } 4171 #undef CHECK_MLME_TRIGGER 4172 } 4173 4174 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, 4175 struct ieee80211_vif *vif, 4176 const struct ieee80211_event *event) 4177 { 4178 struct iwl_fw_dbg_trigger_tlv *trig; 4179 struct iwl_fw_dbg_trigger_ba *ba_trig; 4180 4181 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) 4182 return; 4183 4184 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); 4185 ba_trig = (void *)trig->data; 4186 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, 4187 ieee80211_vif_to_wdev(vif), trig)) 4188 return; 4189 4190 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) 4191 return; 4192 4193 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 4194 "BAR received from %pM, tid %d, ssn %d", 4195 event->u.ba.sta->addr, event->u.ba.tid, 4196 event->u.ba.ssn); 4197 } 4198 4199 static void 4200 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, 4201 struct ieee80211_vif *vif, 4202 const struct ieee80211_event *event) 4203 { 4204 struct iwl_fw_dbg_trigger_tlv *trig; 4205 struct iwl_fw_dbg_trigger_ba *ba_trig; 4206 4207 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) 4208 return; 4209 4210 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); 4211 ba_trig = (void *)trig->data; 4212 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, 4213 ieee80211_vif_to_wdev(vif), trig)) 4214 return; 4215 4216 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) 4217 return; 4218 4219 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 4220 "Frame from %pM timed out, tid %d", 4221 event->u.ba.sta->addr, event->u.ba.tid); 4222 } 4223 4224 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, 4225 struct ieee80211_vif *vif, 4226 const struct ieee80211_event *event) 4227 { 4228 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4229 4230 switch (event->type) { 4231 case MLME_EVENT: 4232 iwl_mvm_event_mlme_callback(mvm, vif, event); 4233 break; 4234 case BAR_RX_EVENT: 4235 iwl_mvm_event_bar_rx_callback(mvm, vif, event); 4236 break; 4237 case BA_FRAME_TIMEOUT: 4238 iwl_mvm_event_frame_timeout_callback(mvm, vif, event); 4239 break; 4240 default: 4241 break; 4242 } 4243 } 4244 4245 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, 4246 struct iwl_mvm_internal_rxq_notif *notif, 4247 u32 size) 4248 { 4249 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; 4250 int ret; 4251 4252 lockdep_assert_held(&mvm->mutex); 4253 4254 /* TODO - remove a000 disablement when we have RXQ config API */ 4255 if (!iwl_mvm_has_new_rx_api(mvm) || 4256 mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) 4257 return; 4258 4259 notif->cookie = mvm->queue_sync_cookie; 4260 4261 if (notif->sync) 4262 atomic_set(&mvm->queue_sync_counter, 4263 mvm->trans->num_rx_queues); 4264 4265 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); 4266 if (ret) { 4267 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 4268 goto out; 4269 } 4270 4271 if (notif->sync) { 4272 ret = wait_event_timeout(mvm->rx_sync_waitq, 4273 atomic_read(&mvm->queue_sync_counter) == 0 || 4274 iwl_mvm_is_radio_killed(mvm), 4275 HZ); 4276 WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm)); 4277 } 4278 4279 out: 4280 atomic_set(&mvm->queue_sync_counter, 0); 4281 mvm->queue_sync_cookie++; 4282 } 4283 4284 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) 4285 { 4286 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4287 struct iwl_mvm_internal_rxq_notif data = { 4288 .type = IWL_MVM_RXQ_EMPTY, 4289 .sync = 1, 4290 }; 4291 4292 mutex_lock(&mvm->mutex); 4293 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); 4294 mutex_unlock(&mvm->mutex); 4295 } 4296 4297 const struct ieee80211_ops iwl_mvm_hw_ops = { 4298 .tx = iwl_mvm_mac_tx, 4299 .ampdu_action = iwl_mvm_mac_ampdu_action, 4300 .start = iwl_mvm_mac_start, 4301 .reconfig_complete = iwl_mvm_mac_reconfig_complete, 4302 .stop = iwl_mvm_mac_stop, 4303 .add_interface = iwl_mvm_mac_add_interface, 4304 .remove_interface = iwl_mvm_mac_remove_interface, 4305 .config = iwl_mvm_mac_config, 4306 .prepare_multicast = iwl_mvm_prepare_multicast, 4307 .configure_filter = iwl_mvm_configure_filter, 4308 .config_iface_filter = iwl_mvm_config_iface_filter, 4309 .bss_info_changed = iwl_mvm_bss_info_changed, 4310 .hw_scan = iwl_mvm_mac_hw_scan, 4311 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, 4312 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, 4313 .sta_state = iwl_mvm_mac_sta_state, 4314 .sta_notify = iwl_mvm_mac_sta_notify, 4315 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, 4316 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, 4317 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 4318 .sta_rc_update = iwl_mvm_sta_rc_update, 4319 .conf_tx = iwl_mvm_mac_conf_tx, 4320 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 4321 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, 4322 .flush = iwl_mvm_mac_flush, 4323 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 4324 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 4325 .set_key = iwl_mvm_mac_set_key, 4326 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 4327 .remain_on_channel = iwl_mvm_roc, 4328 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 4329 .add_chanctx = iwl_mvm_add_chanctx, 4330 .remove_chanctx = iwl_mvm_remove_chanctx, 4331 .change_chanctx = iwl_mvm_change_chanctx, 4332 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, 4333 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, 4334 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, 4335 4336 .start_ap = iwl_mvm_start_ap_ibss, 4337 .stop_ap = iwl_mvm_stop_ap_ibss, 4338 .join_ibss = iwl_mvm_start_ap_ibss, 4339 .leave_ibss = iwl_mvm_stop_ap_ibss, 4340 4341 .tx_last_beacon = iwl_mvm_tx_last_beacon, 4342 4343 .set_tim = iwl_mvm_set_tim, 4344 4345 .channel_switch = iwl_mvm_channel_switch, 4346 .pre_channel_switch = iwl_mvm_pre_channel_switch, 4347 .post_channel_switch = iwl_mvm_post_channel_switch, 4348 4349 .tdls_channel_switch = iwl_mvm_tdls_channel_switch, 4350 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, 4351 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, 4352 4353 .event_callback = iwl_mvm_mac_event_callback, 4354 4355 .sync_rx_queues = iwl_mvm_sync_rx_queues, 4356 4357 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 4358 4359 #ifdef CONFIG_PM_SLEEP 4360 /* look at d3.c */ 4361 .suspend = iwl_mvm_suspend, 4362 .resume = iwl_mvm_resume, 4363 .set_wakeup = iwl_mvm_set_wakeup, 4364 .set_rekey_data = iwl_mvm_set_rekey_data, 4365 #if IS_ENABLED(CONFIG_IPV6) 4366 .ipv6_addr_change = iwl_mvm_ipv6_addr_change, 4367 #endif 4368 .set_default_unicast_key = iwl_mvm_set_default_unicast_key, 4369 #endif 4370 .get_survey = iwl_mvm_mac_get_survey, 4371 .sta_statistics = iwl_mvm_mac_sta_statistics, 4372 }; 4373