1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #include <linux/kernel.h> 68 #include <linux/slab.h> 69 #include <linux/skbuff.h> 70 #include <linux/netdevice.h> 71 #include <linux/etherdevice.h> 72 #include <linux/ip.h> 73 #include <linux/if_arp.h> 74 #include <linux/time.h> 75 #include <net/mac80211.h> 76 #include <net/ieee80211_radiotap.h> 77 #include <net/tcp.h> 78 79 #include "iwl-op-mode.h" 80 #include "iwl-io.h" 81 #include "mvm.h" 82 #include "sta.h" 83 #include "time-event.h" 84 #include "iwl-eeprom-parse.h" 85 #include "iwl-phy-db.h" 86 #include "testmode.h" 87 #include "fw/error-dump.h" 88 #include "iwl-prph.h" 89 #include "iwl-nvm-parse.h" 90 #include "fw-dbg.h" 91 92 static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 93 { 94 .max = 1, 95 .types = BIT(NL80211_IFTYPE_STATION), 96 }, 97 { 98 .max = 1, 99 .types = BIT(NL80211_IFTYPE_AP) | 100 BIT(NL80211_IFTYPE_P2P_CLIENT) | 101 BIT(NL80211_IFTYPE_P2P_GO), 102 }, 103 { 104 .max = 1, 105 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 106 }, 107 }; 108 109 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { 110 { 111 .num_different_channels = 2, 112 .max_interfaces = 3, 113 .limits = iwl_mvm_limits, 114 .n_limits = ARRAY_SIZE(iwl_mvm_limits), 115 }, 116 }; 117 118 #ifdef CONFIG_PM_SLEEP 119 static const struct nl80211_wowlan_tcp_data_token_feature 120 iwl_mvm_wowlan_tcp_token_feature = { 121 .min_len = 0, 122 .max_len = 255, 123 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS, 124 }; 125 126 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = { 127 .tok = &iwl_mvm_wowlan_tcp_token_feature, 128 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN - 129 sizeof(struct ethhdr) - 130 sizeof(struct iphdr) - 131 sizeof(struct tcphdr), 132 .data_interval_max = 65535, /* __le16 in API */ 133 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN - 134 sizeof(struct ethhdr) - 135 sizeof(struct iphdr) - 136 sizeof(struct tcphdr), 137 .seq = true, 138 }; 139 #endif 140 141 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 142 /* 143 * Use the reserved field to indicate magic values. 144 * these values will only be used internally by the driver, 145 * and won't make it to the fw (reserved will be 0). 146 * BC_FILTER_MAGIC_IP - configure the val of this attribute to 147 * be the vif's ip address. in case there is not a single 148 * ip address (0, or more than 1), this attribute will 149 * be skipped. 150 * BC_FILTER_MAGIC_MAC - set the val of this attribute to 151 * the LSB bytes of the vif's mac address 152 */ 153 enum { 154 BC_FILTER_MAGIC_NONE = 0, 155 BC_FILTER_MAGIC_IP, 156 BC_FILTER_MAGIC_MAC, 157 }; 158 159 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { 160 { 161 /* arp */ 162 .discard = 0, 163 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, 164 .attrs = { 165 { 166 /* frame type - arp, hw type - ethernet */ 167 .offset_type = 168 BCAST_FILTER_OFFSET_PAYLOAD_START, 169 .offset = sizeof(rfc1042_header), 170 .val = cpu_to_be32(0x08060001), 171 .mask = cpu_to_be32(0xffffffff), 172 }, 173 { 174 /* arp dest ip */ 175 .offset_type = 176 BCAST_FILTER_OFFSET_PAYLOAD_START, 177 .offset = sizeof(rfc1042_header) + 2 + 178 sizeof(struct arphdr) + 179 ETH_ALEN + sizeof(__be32) + 180 ETH_ALEN, 181 .mask = cpu_to_be32(0xffffffff), 182 /* mark it as special field */ 183 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), 184 }, 185 }, 186 }, 187 { 188 /* dhcp offer bcast */ 189 .discard = 0, 190 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, 191 .attrs = { 192 { 193 /* udp dest port - 68 (bootp client)*/ 194 .offset_type = BCAST_FILTER_OFFSET_IP_END, 195 .offset = offsetof(struct udphdr, dest), 196 .val = cpu_to_be32(0x00440000), 197 .mask = cpu_to_be32(0xffff0000), 198 }, 199 { 200 /* dhcp - lsb bytes of client hw address */ 201 .offset_type = BCAST_FILTER_OFFSET_IP_END, 202 .offset = 38, 203 .mask = cpu_to_be32(0xffffffff), 204 /* mark it as special field */ 205 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), 206 }, 207 }, 208 }, 209 /* last filter must be empty */ 210 {}, 211 }; 212 #endif 213 214 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 215 { 216 if (!iwl_mvm_is_d0i3_supported(mvm)) 217 return; 218 219 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type); 220 spin_lock_bh(&mvm->refs_lock); 221 mvm->refs[ref_type]++; 222 spin_unlock_bh(&mvm->refs_lock); 223 iwl_trans_ref(mvm->trans); 224 } 225 226 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 227 { 228 if (!iwl_mvm_is_d0i3_supported(mvm)) 229 return; 230 231 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); 232 spin_lock_bh(&mvm->refs_lock); 233 if (WARN_ON(!mvm->refs[ref_type])) { 234 spin_unlock_bh(&mvm->refs_lock); 235 return; 236 } 237 mvm->refs[ref_type]--; 238 spin_unlock_bh(&mvm->refs_lock); 239 iwl_trans_unref(mvm->trans); 240 } 241 242 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, 243 enum iwl_mvm_ref_type except_ref) 244 { 245 int i, j; 246 247 if (!iwl_mvm_is_d0i3_supported(mvm)) 248 return; 249 250 spin_lock_bh(&mvm->refs_lock); 251 for (i = 0; i < IWL_MVM_REF_COUNT; i++) { 252 if (except_ref == i || !mvm->refs[i]) 253 continue; 254 255 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n", 256 i, mvm->refs[i]); 257 for (j = 0; j < mvm->refs[i]; j++) 258 iwl_trans_unref(mvm->trans); 259 mvm->refs[i] = 0; 260 } 261 spin_unlock_bh(&mvm->refs_lock); 262 } 263 264 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) 265 { 266 int i; 267 bool taken = false; 268 269 if (!iwl_mvm_is_d0i3_supported(mvm)) 270 return true; 271 272 spin_lock_bh(&mvm->refs_lock); 273 for (i = 0; i < IWL_MVM_REF_COUNT; i++) { 274 if (mvm->refs[i]) { 275 taken = true; 276 break; 277 } 278 } 279 spin_unlock_bh(&mvm->refs_lock); 280 281 return taken; 282 } 283 284 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 285 { 286 iwl_mvm_ref(mvm, ref_type); 287 288 if (!wait_event_timeout(mvm->d0i3_exit_waitq, 289 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), 290 HZ)) { 291 WARN_ON_ONCE(1); 292 iwl_mvm_unref(mvm, ref_type); 293 return -EIO; 294 } 295 296 return 0; 297 } 298 299 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) 300 { 301 int i; 302 303 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); 304 for (i = 0; i < NUM_PHY_CTX; i++) { 305 mvm->phy_ctxts[i].id = i; 306 mvm->phy_ctxts[i].ref = 0; 307 } 308 } 309 310 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 311 const char *alpha2, 312 enum iwl_mcc_source src_id, 313 bool *changed) 314 { 315 struct ieee80211_regdomain *regd = NULL; 316 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 317 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 318 struct iwl_mcc_update_resp *resp; 319 320 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); 321 322 lockdep_assert_held(&mvm->mutex); 323 324 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); 325 if (IS_ERR_OR_NULL(resp)) { 326 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", 327 PTR_ERR_OR_ZERO(resp)); 328 goto out; 329 } 330 331 if (changed) 332 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); 333 334 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 335 __le32_to_cpu(resp->n_channels), 336 resp->channels, 337 __le16_to_cpu(resp->mcc)); 338 /* Store the return source id */ 339 src_id = resp->source_id; 340 kfree(resp); 341 if (IS_ERR_OR_NULL(regd)) { 342 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", 343 PTR_ERR_OR_ZERO(regd)); 344 goto out; 345 } 346 347 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", 348 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); 349 mvm->lar_regdom_set = true; 350 mvm->mcc_src = src_id; 351 352 out: 353 return regd; 354 } 355 356 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) 357 { 358 bool changed; 359 struct ieee80211_regdomain *regd; 360 361 if (!iwl_mvm_is_lar_supported(mvm)) 362 return; 363 364 regd = iwl_mvm_get_current_regdomain(mvm, &changed); 365 if (!IS_ERR_OR_NULL(regd)) { 366 /* only update the regulatory core if changed */ 367 if (changed) 368 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 369 370 kfree(regd); 371 } 372 } 373 374 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 375 bool *changed) 376 { 377 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", 378 iwl_mvm_is_wifi_mcc_supported(mvm) ? 379 MCC_SOURCE_GET_CURRENT : 380 MCC_SOURCE_OLD_FW, changed); 381 } 382 383 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) 384 { 385 enum iwl_mcc_source used_src; 386 struct ieee80211_regdomain *regd; 387 int ret; 388 bool changed; 389 const struct ieee80211_regdomain *r = 390 rtnl_dereference(mvm->hw->wiphy->regd); 391 392 if (!r) 393 return -ENOENT; 394 395 /* save the last source in case we overwrite it below */ 396 used_src = mvm->mcc_src; 397 if (iwl_mvm_is_wifi_mcc_supported(mvm)) { 398 /* Notify the firmware we support wifi location updates */ 399 regd = iwl_mvm_get_current_regdomain(mvm, NULL); 400 if (!IS_ERR_OR_NULL(regd)) 401 kfree(regd); 402 } 403 404 /* Now set our last stored MCC and source */ 405 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, 406 &changed); 407 if (IS_ERR_OR_NULL(regd)) 408 return -EIO; 409 410 /* update cfg80211 if the regdomain was changed */ 411 if (changed) 412 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); 413 else 414 ret = 0; 415 416 kfree(regd); 417 return ret; 418 } 419 420 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 421 { 422 struct ieee80211_hw *hw = mvm->hw; 423 int num_mac, ret, i; 424 static const u32 mvm_ciphers[] = { 425 WLAN_CIPHER_SUITE_WEP40, 426 WLAN_CIPHER_SUITE_WEP104, 427 WLAN_CIPHER_SUITE_TKIP, 428 WLAN_CIPHER_SUITE_CCMP, 429 }; 430 431 /* Tell mac80211 our characteristics */ 432 ieee80211_hw_set(hw, SIGNAL_DBM); 433 ieee80211_hw_set(hw, SPECTRUM_MGMT); 434 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 435 ieee80211_hw_set(hw, QUEUE_CONTROL); 436 ieee80211_hw_set(hw, WANT_MONITOR_VIF); 437 ieee80211_hw_set(hw, SUPPORTS_PS); 438 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 439 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 440 ieee80211_hw_set(hw, TIMING_BEACON_ONLY); 441 ieee80211_hw_set(hw, CONNECTION_MONITOR); 442 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 443 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 444 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 445 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 446 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); 447 if (iwl_mvm_has_new_rx_api(mvm)) 448 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 449 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) 450 ieee80211_hw_set(hw, AP_LINK_PS); 451 452 if (mvm->trans->num_rx_queues > 1) 453 ieee80211_hw_set(hw, USES_RSS); 454 455 if (mvm->trans->max_skb_frags) 456 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; 457 458 if (!iwl_mvm_is_dqa_supported(mvm)) 459 hw->queues = mvm->first_agg_queue; 460 else 461 hw->queues = IEEE80211_MAX_QUEUES; 462 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 463 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 464 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 465 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 466 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; 467 468 hw->radiotap_timestamp.units_pos = 469 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | 470 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; 471 /* this is the case for CCK frames, it's better (only 8) for OFDM */ 472 hw->radiotap_timestamp.accuracy = 22; 473 474 hw->rate_control_algorithm = "iwl-mvm-rs"; 475 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; 476 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 477 478 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); 479 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); 480 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); 481 hw->wiphy->cipher_suites = mvm->ciphers; 482 483 if (iwl_mvm_has_new_rx_api(mvm)) { 484 mvm->ciphers[hw->wiphy->n_cipher_suites] = 485 WLAN_CIPHER_SUITE_GCMP; 486 hw->wiphy->n_cipher_suites++; 487 mvm->ciphers[hw->wiphy->n_cipher_suites] = 488 WLAN_CIPHER_SUITE_GCMP_256; 489 hw->wiphy->n_cipher_suites++; 490 } 491 492 /* Enable 11w if software crypto is not enabled (as the 493 * firmware will interpret some mgmt packets, so enabling it 494 * with software crypto isn't safe). 495 */ 496 if (!iwlwifi_mod_params.swcrypto) { 497 ieee80211_hw_set(hw, MFP_CAPABLE); 498 mvm->ciphers[hw->wiphy->n_cipher_suites] = 499 WLAN_CIPHER_SUITE_AES_CMAC; 500 hw->wiphy->n_cipher_suites++; 501 if (iwl_mvm_has_new_rx_api(mvm)) { 502 mvm->ciphers[hw->wiphy->n_cipher_suites] = 503 WLAN_CIPHER_SUITE_BIP_GMAC_128; 504 hw->wiphy->n_cipher_suites++; 505 mvm->ciphers[hw->wiphy->n_cipher_suites] = 506 WLAN_CIPHER_SUITE_BIP_GMAC_256; 507 hw->wiphy->n_cipher_suites++; 508 } 509 } 510 511 /* currently FW API supports only one optional cipher scheme */ 512 if (mvm->fw->cs[0].cipher) { 513 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0]; 514 struct ieee80211_cipher_scheme *cs = &mvm->cs[0]; 515 516 mvm->hw->n_cipher_schemes = 1; 517 518 cs->cipher = le32_to_cpu(fwcs->cipher); 519 cs->iftype = BIT(NL80211_IFTYPE_STATION); 520 cs->hdr_len = fwcs->hdr_len; 521 cs->pn_len = fwcs->pn_len; 522 cs->pn_off = fwcs->pn_off; 523 cs->key_idx_off = fwcs->key_idx_off; 524 cs->key_idx_mask = fwcs->key_idx_mask; 525 cs->key_idx_shift = fwcs->key_idx_shift; 526 cs->mic_len = fwcs->mic_len; 527 528 mvm->hw->cipher_schemes = mvm->cs; 529 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher; 530 hw->wiphy->n_cipher_suites++; 531 } 532 533 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 534 hw->wiphy->features |= 535 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | 536 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | 537 NL80211_FEATURE_ND_RANDOM_MAC_ADDR; 538 539 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 540 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 541 hw->chanctx_data_size = sizeof(u16); 542 543 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 544 BIT(NL80211_IFTYPE_P2P_CLIENT) | 545 BIT(NL80211_IFTYPE_AP) | 546 BIT(NL80211_IFTYPE_P2P_GO) | 547 BIT(NL80211_IFTYPE_P2P_DEVICE) | 548 BIT(NL80211_IFTYPE_ADHOC); 549 550 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 551 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 552 if (iwl_mvm_is_lar_supported(mvm)) 553 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 554 else 555 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 556 REGULATORY_DISABLE_BEACON_HINTS; 557 558 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 559 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 560 561 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 562 hw->wiphy->n_iface_combinations = 563 ARRAY_SIZE(iwl_mvm_iface_combinations); 564 565 hw->wiphy->max_remain_on_channel_duration = 10000; 566 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 567 /* we can compensate an offset of up to 3 channels = 15 MHz */ 568 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5; 569 570 /* Extract MAC address */ 571 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 572 hw->wiphy->addresses = mvm->addresses; 573 hw->wiphy->n_addresses = 1; 574 575 /* Extract additional MAC addresses if available */ 576 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? 577 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; 578 579 for (i = 1; i < num_mac; i++) { 580 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, 581 ETH_ALEN); 582 mvm->addresses[i].addr[5]++; 583 hw->wiphy->n_addresses++; 584 } 585 586 iwl_mvm_reset_phy_ctxts(mvm); 587 588 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); 589 590 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 591 592 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); 593 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || 594 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); 595 596 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 597 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; 598 else 599 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; 600 601 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) 602 hw->wiphy->bands[NL80211_BAND_2GHZ] = 603 &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 604 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { 605 hw->wiphy->bands[NL80211_BAND_5GHZ] = 606 &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 607 608 if (fw_has_capa(&mvm->fw->ucode_capa, 609 IWL_UCODE_TLV_CAPA_BEAMFORMER) && 610 fw_has_api(&mvm->fw->ucode_capa, 611 IWL_UCODE_TLV_API_LQ_SS_PARAMS)) 612 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= 613 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 614 } 615 616 hw->wiphy->hw_version = mvm->trans->hw_id; 617 618 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 619 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 620 else 621 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 622 623 hw->wiphy->max_sched_scan_reqs = 1; 624 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 625 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 626 /* we create the 802.11 header and zero length SSID IE. */ 627 hw->wiphy->max_sched_scan_ie_len = 628 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; 629 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; 630 hw->wiphy->max_sched_scan_plan_interval = U16_MAX; 631 632 /* 633 * the firmware uses u8 for num of iterations, but 0xff is saved for 634 * infinite loop, so the maximum number of iterations is actually 254. 635 */ 636 hw->wiphy->max_sched_scan_plan_iterations = 254; 637 638 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 639 NL80211_FEATURE_LOW_PRIORITY_SCAN | 640 NL80211_FEATURE_P2P_GO_OPPPS | 641 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 642 NL80211_FEATURE_DYNAMIC_SMPS | 643 NL80211_FEATURE_STATIC_SMPS | 644 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 645 646 if (fw_has_capa(&mvm->fw->ucode_capa, 647 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) 648 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; 649 if (fw_has_capa(&mvm->fw->ucode_capa, 650 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) 651 hw->wiphy->features |= NL80211_FEATURE_QUIET; 652 653 if (fw_has_capa(&mvm->fw->ucode_capa, 654 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) 655 hw->wiphy->features |= 656 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; 657 658 if (fw_has_capa(&mvm->fw->ucode_capa, 659 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 660 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; 661 662 if (fw_has_api(&mvm->fw->ucode_capa, 663 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { 664 wiphy_ext_feature_set(hw->wiphy, 665 NL80211_EXT_FEATURE_SCAN_START_TIME); 666 wiphy_ext_feature_set(hw->wiphy, 667 NL80211_EXT_FEATURE_BSS_PARENT_TSF); 668 wiphy_ext_feature_set(hw->wiphy, 669 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 670 } 671 672 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 673 674 #ifdef CONFIG_PM_SLEEP 675 if (iwl_mvm_is_d0i3_supported(mvm) && 676 device_can_wakeup(mvm->trans->dev)) { 677 mvm->wowlan.flags = WIPHY_WOWLAN_ANY; 678 hw->wiphy->wowlan = &mvm->wowlan; 679 } 680 681 if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec && 682 mvm->trans->ops->d3_suspend && 683 mvm->trans->ops->d3_resume && 684 device_can_wakeup(mvm->trans->dev)) { 685 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | 686 WIPHY_WOWLAN_DISCONNECT | 687 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 688 WIPHY_WOWLAN_RFKILL_RELEASE | 689 WIPHY_WOWLAN_NET_DETECT; 690 if (!iwlwifi_mod_params.swcrypto) 691 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 692 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 693 WIPHY_WOWLAN_4WAY_HANDSHAKE; 694 695 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 696 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 697 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 698 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; 699 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; 700 hw->wiphy->wowlan = &mvm->wowlan; 701 } 702 #endif 703 704 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 705 /* assign default bcast filtering configuration */ 706 mvm->bcast_filters = iwl_mvm_default_bcast_filters; 707 #endif 708 709 ret = iwl_mvm_leds_init(mvm); 710 if (ret) 711 return ret; 712 713 if (fw_has_capa(&mvm->fw->ucode_capa, 714 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { 715 IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); 716 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 717 ieee80211_hw_set(hw, TDLS_WIDER_BW); 718 } 719 720 if (fw_has_capa(&mvm->fw->ucode_capa, 721 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { 722 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); 723 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; 724 } 725 726 hw->netdev_features |= mvm->cfg->features; 727 if (!iwl_mvm_is_csum_supported(mvm)) { 728 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | 729 NETIF_F_RXCSUM); 730 /* We may support SW TX CSUM */ 731 if (IWL_MVM_SW_TX_CSUM_OFFLOAD) 732 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; 733 } 734 735 ret = ieee80211_register_hw(mvm->hw); 736 if (ret) 737 iwl_mvm_leds_exit(mvm); 738 mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE; 739 740 if (mvm->cfg->vht_mu_mimo_supported) 741 wiphy_ext_feature_set(hw->wiphy, 742 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); 743 744 return ret; 745 } 746 747 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, 748 struct ieee80211_sta *sta, 749 struct sk_buff *skb) 750 { 751 struct iwl_mvm_sta *mvmsta; 752 bool defer = false; 753 754 /* 755 * double check the IN_D0I3 flag both before and after 756 * taking the spinlock, in order to prevent taking 757 * the spinlock when not needed. 758 */ 759 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))) 760 return false; 761 762 spin_lock(&mvm->d0i3_tx_lock); 763 /* 764 * testing the flag again ensures the skb dequeue 765 * loop (on d0i3 exit) hasn't run yet. 766 */ 767 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) 768 goto out; 769 770 mvmsta = iwl_mvm_sta_from_mac80211(sta); 771 if (mvmsta->sta_id == IWL_MVM_INVALID_STA || 772 mvmsta->sta_id != mvm->d0i3_ap_sta_id) 773 goto out; 774 775 __skb_queue_tail(&mvm->d0i3_tx, skb); 776 ieee80211_stop_queues(mvm->hw); 777 778 /* trigger wakeup */ 779 iwl_mvm_ref(mvm, IWL_MVM_REF_TX); 780 iwl_mvm_unref(mvm, IWL_MVM_REF_TX); 781 782 defer = true; 783 out: 784 spin_unlock(&mvm->d0i3_tx_lock); 785 return defer; 786 } 787 788 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 789 struct ieee80211_tx_control *control, 790 struct sk_buff *skb) 791 { 792 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 793 struct ieee80211_sta *sta = control->sta; 794 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 795 struct ieee80211_hdr *hdr = (void *)skb->data; 796 797 if (iwl_mvm_is_radio_killed(mvm)) { 798 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); 799 goto drop; 800 } 801 802 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && 803 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && 804 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) 805 goto drop; 806 807 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ 808 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER && 809 ieee80211_is_mgmt(hdr->frame_control) && 810 !ieee80211_is_deauth(hdr->frame_control) && 811 !ieee80211_is_disassoc(hdr->frame_control) && 812 !ieee80211_is_action(hdr->frame_control))) 813 sta = NULL; 814 815 if (sta) { 816 if (iwl_mvm_defer_tx(mvm, sta, skb)) 817 return; 818 if (iwl_mvm_tx_skb(mvm, skb, sta)) 819 goto drop; 820 return; 821 } 822 823 if (iwl_mvm_tx_skb_non_sta(mvm, skb)) 824 goto drop; 825 return; 826 drop: 827 ieee80211_free_txskb(hw, skb); 828 } 829 830 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) 831 { 832 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 833 return false; 834 return true; 835 } 836 837 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) 838 { 839 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 840 return false; 841 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) 842 return true; 843 844 /* enabled by default */ 845 return true; 846 } 847 848 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ 849 do { \ 850 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ 851 break; \ 852 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \ 853 } while (0) 854 855 static void 856 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 857 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, 858 enum ieee80211_ampdu_mlme_action action) 859 { 860 struct iwl_fw_dbg_trigger_tlv *trig; 861 struct iwl_fw_dbg_trigger_ba *ba_trig; 862 863 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) 864 return; 865 866 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); 867 ba_trig = (void *)trig->data; 868 869 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 870 return; 871 872 switch (action) { 873 case IEEE80211_AMPDU_TX_OPERATIONAL: { 874 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 875 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 876 877 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, 878 "TX AGG START: MAC %pM tid %d ssn %d\n", 879 sta->addr, tid, tid_data->ssn); 880 break; 881 } 882 case IEEE80211_AMPDU_TX_STOP_CONT: 883 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, 884 "TX AGG STOP: MAC %pM tid %d\n", 885 sta->addr, tid); 886 break; 887 case IEEE80211_AMPDU_RX_START: 888 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, 889 "RX AGG START: MAC %pM tid %d ssn %d\n", 890 sta->addr, tid, rx_ba_ssn); 891 break; 892 case IEEE80211_AMPDU_RX_STOP: 893 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, 894 "RX AGG STOP: MAC %pM tid %d\n", 895 sta->addr, tid); 896 break; 897 default: 898 break; 899 } 900 } 901 902 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 903 struct ieee80211_vif *vif, 904 struct ieee80211_ampdu_params *params) 905 { 906 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 907 int ret; 908 bool tx_agg_ref = false; 909 struct ieee80211_sta *sta = params->sta; 910 enum ieee80211_ampdu_mlme_action action = params->action; 911 u16 tid = params->tid; 912 u16 *ssn = ¶ms->ssn; 913 u8 buf_size = params->buf_size; 914 bool amsdu = params->amsdu; 915 u16 timeout = params->timeout; 916 917 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 918 sta->addr, tid, action); 919 920 if (!(mvm->nvm_data->sku_cap_11n_enable)) 921 return -EACCES; 922 923 /* return from D0i3 before starting a new Tx aggregation */ 924 switch (action) { 925 case IEEE80211_AMPDU_TX_START: 926 case IEEE80211_AMPDU_TX_STOP_CONT: 927 case IEEE80211_AMPDU_TX_STOP_FLUSH: 928 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 929 case IEEE80211_AMPDU_TX_OPERATIONAL: 930 /* 931 * for tx start, wait synchronously until D0i3 exit to 932 * get the correct sequence number for the tid. 933 * additionally, some other ampdu actions use direct 934 * target access, which is not handled automatically 935 * by the trans layer (unlike commands), so wait for 936 * d0i3 exit in these cases as well. 937 */ 938 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG); 939 if (ret) 940 return ret; 941 942 tx_agg_ref = true; 943 break; 944 default: 945 break; 946 } 947 948 mutex_lock(&mvm->mutex); 949 950 switch (action) { 951 case IEEE80211_AMPDU_RX_START: 952 if (!iwl_enable_rx_ampdu(mvm->cfg)) { 953 ret = -EINVAL; 954 break; 955 } 956 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, 957 timeout); 958 break; 959 case IEEE80211_AMPDU_RX_STOP: 960 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, 961 timeout); 962 break; 963 case IEEE80211_AMPDU_TX_START: 964 if (!iwl_enable_tx_ampdu(mvm->cfg)) { 965 ret = -EINVAL; 966 break; 967 } 968 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 969 break; 970 case IEEE80211_AMPDU_TX_STOP_CONT: 971 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 972 break; 973 case IEEE80211_AMPDU_TX_STOP_FLUSH: 974 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 975 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); 976 break; 977 case IEEE80211_AMPDU_TX_OPERATIONAL: 978 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, 979 buf_size, amsdu); 980 break; 981 default: 982 WARN_ON_ONCE(1); 983 ret = -EINVAL; 984 break; 985 } 986 987 if (!ret) { 988 u16 rx_ba_ssn = 0; 989 990 if (action == IEEE80211_AMPDU_RX_START) 991 rx_ba_ssn = *ssn; 992 993 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, 994 rx_ba_ssn, action); 995 } 996 mutex_unlock(&mvm->mutex); 997 998 /* 999 * If the tid is marked as started, we won't use it for offloaded 1000 * traffic on the next D0i3 entry. It's safe to unref. 1001 */ 1002 if (tx_agg_ref) 1003 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); 1004 1005 return ret; 1006 } 1007 1008 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, 1009 struct ieee80211_vif *vif) 1010 { 1011 struct iwl_mvm *mvm = data; 1012 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1013 1014 mvmvif->uploaded = false; 1015 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1016 1017 spin_lock_bh(&mvm->time_event_lock); 1018 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 1019 spin_unlock_bh(&mvm->time_event_lock); 1020 1021 mvmvif->phy_ctxt = NULL; 1022 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); 1023 } 1024 1025 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 1026 { 1027 /* clear the D3 reconfig, we only need it to avoid dumping a 1028 * firmware coredump on reconfiguration, we shouldn't do that 1029 * on D3->D0 transition 1030 */ 1031 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { 1032 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert; 1033 iwl_mvm_fw_error_dump(mvm); 1034 } 1035 1036 /* cleanup all stale references (scan, roc), but keep the 1037 * ucode_down ref until reconfig is complete 1038 */ 1039 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); 1040 1041 iwl_mvm_stop_device(mvm); 1042 1043 mvm->scan_status = 0; 1044 mvm->ps_disabled = false; 1045 mvm->calibrating = false; 1046 1047 /* just in case one was running */ 1048 iwl_mvm_cleanup_roc_te(mvm); 1049 ieee80211_remain_on_channel_expired(mvm->hw); 1050 1051 /* 1052 * cleanup all interfaces, even inactive ones, as some might have 1053 * gone down during the HW restart 1054 */ 1055 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); 1056 1057 mvm->p2p_device_vif = NULL; 1058 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; 1059 1060 iwl_mvm_reset_phy_ctxts(mvm); 1061 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 1062 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); 1063 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); 1064 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); 1065 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 1066 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); 1067 1068 ieee80211_wake_queues(mvm->hw); 1069 1070 /* clear any stale d0i3 state */ 1071 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); 1072 1073 mvm->vif_count = 0; 1074 mvm->rx_ba_sessions = 0; 1075 mvm->fw_dbg_conf = FW_DBG_INVALID; 1076 1077 /* keep statistics ticking */ 1078 iwl_mvm_accu_radio_stats(mvm); 1079 } 1080 1081 int __iwl_mvm_mac_start(struct iwl_mvm *mvm) 1082 { 1083 int ret; 1084 1085 lockdep_assert_held(&mvm->mutex); 1086 1087 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { 1088 /* 1089 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART 1090 * so later code will - from now on - see that we're doing it. 1091 */ 1092 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1093 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1094 /* Clean up some internal and mac80211 state on restart */ 1095 iwl_mvm_restart_cleanup(mvm); 1096 } else { 1097 /* Hold the reference to prevent runtime suspend while 1098 * the start procedure runs. It's a bit confusing 1099 * that the UCODE_DOWN reference is taken, but it just 1100 * means "UCODE is not UP yet". ( TODO: rename this 1101 * reference). 1102 */ 1103 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1104 } 1105 ret = iwl_mvm_up(mvm); 1106 1107 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1108 /* Something went wrong - we need to finish some cleanup 1109 * that normally iwl_mvm_mac_restart_complete() below 1110 * would do. 1111 */ 1112 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1113 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1114 } 1115 1116 return ret; 1117 } 1118 1119 static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 1120 { 1121 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1122 int ret; 1123 1124 /* Some hw restart cleanups must not hold the mutex */ 1125 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1126 /* 1127 * Make sure we are out of d0i3. This is needed 1128 * to make sure the reference accounting is correct 1129 * (and there is no stale d0i3_exit_work). 1130 */ 1131 wait_event_timeout(mvm->d0i3_exit_waitq, 1132 !test_bit(IWL_MVM_STATUS_IN_D0I3, 1133 &mvm->status), 1134 HZ); 1135 } 1136 1137 mutex_lock(&mvm->mutex); 1138 ret = __iwl_mvm_mac_start(mvm); 1139 mutex_unlock(&mvm->mutex); 1140 1141 return ret; 1142 } 1143 1144 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) 1145 { 1146 int ret; 1147 1148 mutex_lock(&mvm->mutex); 1149 1150 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1151 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1152 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1153 if (ret) 1154 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1155 ret); 1156 1157 /* allow transport/FW low power modes */ 1158 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1159 1160 /* 1161 * If we have TDLS peers, remove them. We don't know the last seqno/PN 1162 * of packets the FW sent out, so we must reconnect. 1163 */ 1164 iwl_mvm_teardown_tdls_peers(mvm); 1165 1166 mutex_unlock(&mvm->mutex); 1167 } 1168 1169 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) 1170 { 1171 if (iwl_mvm_is_d0i3_supported(mvm) && 1172 iwl_mvm_enter_d0i3_on_suspend(mvm)) 1173 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq, 1174 !test_bit(IWL_MVM_STATUS_IN_D0I3, 1175 &mvm->status), 1176 HZ), 1177 "D0i3 exit on resume timed out\n"); 1178 } 1179 1180 static void 1181 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, 1182 enum ieee80211_reconfig_type reconfig_type) 1183 { 1184 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1185 1186 switch (reconfig_type) { 1187 case IEEE80211_RECONFIG_TYPE_RESTART: 1188 iwl_mvm_restart_complete(mvm); 1189 break; 1190 case IEEE80211_RECONFIG_TYPE_SUSPEND: 1191 iwl_mvm_resume_complete(mvm); 1192 break; 1193 } 1194 } 1195 1196 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) 1197 { 1198 lockdep_assert_held(&mvm->mutex); 1199 1200 /* firmware counters are obviously reset now, but we shouldn't 1201 * partially track so also clear the fw_reset_accu counters. 1202 */ 1203 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); 1204 1205 /* async_handlers_wk is now blocked */ 1206 1207 /* 1208 * The work item could be running or queued if the 1209 * ROC time event stops just as we get here. 1210 */ 1211 flush_work(&mvm->roc_done_wk); 1212 1213 iwl_mvm_stop_device(mvm); 1214 1215 iwl_mvm_async_handlers_purge(mvm); 1216 /* async_handlers_list is empty and will stay empty: HW is stopped */ 1217 1218 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1219 iwl_mvm_del_aux_sta(mvm); 1220 1221 /* 1222 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1223 * won't be called in this case). 1224 * But make sure to cleanup interfaces that have gone down before/during 1225 * HW restart was requested. 1226 */ 1227 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1228 ieee80211_iterate_interfaces(mvm->hw, 0, 1229 iwl_mvm_cleanup_iterator, mvm); 1230 1231 /* We shouldn't have any UIDs still set. Loop over all the UIDs to 1232 * make sure there's nothing left there and warn if any is found. 1233 */ 1234 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1235 int i; 1236 1237 for (i = 0; i < mvm->max_scans; i++) { 1238 if (WARN_ONCE(mvm->scan_uid_status[i], 1239 "UMAC scan UID %d status was not cleaned\n", 1240 i)) 1241 mvm->scan_uid_status[i] = 0; 1242 } 1243 } 1244 } 1245 1246 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) 1247 { 1248 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1249 1250 flush_work(&mvm->d0i3_exit_work); 1251 flush_work(&mvm->async_handlers_wk); 1252 flush_work(&mvm->add_stream_wk); 1253 1254 /* 1255 * Lock and clear the firmware running bit here already, so that 1256 * new commands coming in elsewhere, e.g. from debugfs, will not 1257 * be able to proceed. This is important here because one of those 1258 * debugfs files causes the fw_dump_wk to be triggered, and if we 1259 * don't stop debugfs accesses before canceling that it could be 1260 * retriggered after we flush it but before we've cleared the bit. 1261 */ 1262 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1263 1264 cancel_delayed_work_sync(&mvm->fw_dump_wk); 1265 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); 1266 cancel_delayed_work_sync(&mvm->scan_timeout_dwork); 1267 iwl_mvm_free_fw_dump_desc(mvm); 1268 1269 mutex_lock(&mvm->mutex); 1270 __iwl_mvm_mac_stop(mvm); 1271 mutex_unlock(&mvm->mutex); 1272 1273 /* 1274 * The worker might have been waiting for the mutex, let it run and 1275 * discover that its list is now empty. 1276 */ 1277 cancel_work_sync(&mvm->async_handlers_wk); 1278 } 1279 1280 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) 1281 { 1282 u16 i; 1283 1284 lockdep_assert_held(&mvm->mutex); 1285 1286 for (i = 0; i < NUM_PHY_CTX; i++) 1287 if (!mvm->phy_ctxts[i].ref) 1288 return &mvm->phy_ctxts[i]; 1289 1290 IWL_ERR(mvm, "No available PHY context\n"); 1291 return NULL; 1292 } 1293 1294 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1295 s16 tx_power) 1296 { 1297 struct iwl_dev_tx_power_cmd cmd = { 1298 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), 1299 .v3.mac_context_id = 1300 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), 1301 .v3.pwr_restriction = cpu_to_le16(8 * tx_power), 1302 }; 1303 int len = sizeof(cmd); 1304 1305 if (tx_power == IWL_DEFAULT_MAX_TX_POWER) 1306 cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); 1307 1308 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 1309 len = sizeof(cmd.v3); 1310 1311 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); 1312 } 1313 1314 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 1315 struct ieee80211_vif *vif) 1316 { 1317 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1318 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1319 int ret; 1320 1321 mvmvif->mvm = mvm; 1322 1323 /* 1324 * make sure D0i3 exit is completed, otherwise a target access 1325 * during tx queue configuration could be done when still in 1326 * D0i3 state. 1327 */ 1328 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF); 1329 if (ret) 1330 return ret; 1331 1332 /* 1333 * Not much to do here. The stack will not allow interface 1334 * types or combinations that we didn't advertise, so we 1335 * don't really have to check the types. 1336 */ 1337 1338 mutex_lock(&mvm->mutex); 1339 1340 /* make sure that beacon statistics don't go backwards with FW reset */ 1341 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1342 mvmvif->beacon_stats.accu_num_beacons += 1343 mvmvif->beacon_stats.num_beacons; 1344 1345 /* Allocate resources for the MAC context, and add it to the fw */ 1346 ret = iwl_mvm_mac_ctxt_init(mvm, vif); 1347 if (ret) 1348 goto out_unlock; 1349 1350 /* Counting number of interfaces is needed for legacy PM */ 1351 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1352 mvm->vif_count++; 1353 1354 /* 1355 * The AP binding flow can be done only after the beacon 1356 * template is configured (which happens only in the mac80211 1357 * start_ap() flow), and adding the broadcast station can happen 1358 * only after the binding. 1359 * In addition, since modifying the MAC before adding a bcast 1360 * station is not allowed by the FW, delay the adding of MAC context to 1361 * the point where we can also add the bcast station. 1362 * In short: there's not much we can do at this point, other than 1363 * allocating resources :) 1364 */ 1365 if (vif->type == NL80211_IFTYPE_AP || 1366 vif->type == NL80211_IFTYPE_ADHOC) { 1367 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1368 if (ret) { 1369 IWL_ERR(mvm, "Failed to allocate bcast sta\n"); 1370 goto out_release; 1371 } 1372 1373 if (iwl_mvm_is_dqa_supported(mvm)) { 1374 /* 1375 * Only queue for this station is the mcast queue, 1376 * which shouldn't be in TFD mask anyway 1377 */ 1378 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 1379 0, vif->type, 1380 IWL_STA_MULTICAST); 1381 if (ret) 1382 goto out_release; 1383 } 1384 1385 iwl_mvm_vif_dbgfs_register(mvm, vif); 1386 goto out_unlock; 1387 } 1388 1389 mvmvif->features |= hw->netdev_features; 1390 1391 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 1392 if (ret) 1393 goto out_release; 1394 1395 ret = iwl_mvm_power_update_mac(mvm); 1396 if (ret) 1397 goto out_remove_mac; 1398 1399 /* beacon filtering */ 1400 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 1401 if (ret) 1402 goto out_remove_mac; 1403 1404 if (!mvm->bf_allowed_vif && 1405 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { 1406 mvm->bf_allowed_vif = mvmvif; 1407 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 1408 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 1409 } 1410 1411 /* 1412 * P2P_DEVICE interface does not have a channel context assigned to it, 1413 * so a dedicated PHY context is allocated to it and the corresponding 1414 * MAC context is bound to it at this stage. 1415 */ 1416 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1417 1418 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 1419 if (!mvmvif->phy_ctxt) { 1420 ret = -ENOSPC; 1421 goto out_free_bf; 1422 } 1423 1424 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 1425 ret = iwl_mvm_binding_add_vif(mvm, vif); 1426 if (ret) 1427 goto out_unref_phy; 1428 1429 ret = iwl_mvm_add_bcast_sta(mvm, vif); 1430 if (ret) 1431 goto out_unbind; 1432 1433 /* Save a pointer to p2p device vif, so it can later be used to 1434 * update the p2p device MAC when a GO is started/stopped */ 1435 mvm->p2p_device_vif = vif; 1436 } 1437 1438 iwl_mvm_vif_dbgfs_register(mvm, vif); 1439 goto out_unlock; 1440 1441 out_unbind: 1442 iwl_mvm_binding_remove_vif(mvm, vif); 1443 out_unref_phy: 1444 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1445 out_free_bf: 1446 if (mvm->bf_allowed_vif == mvmvif) { 1447 mvm->bf_allowed_vif = NULL; 1448 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1449 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1450 } 1451 out_remove_mac: 1452 mvmvif->phy_ctxt = NULL; 1453 iwl_mvm_mac_ctxt_remove(mvm, vif); 1454 out_release: 1455 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1456 mvm->vif_count--; 1457 1458 iwl_mvm_mac_ctxt_release(mvm, vif); 1459 out_unlock: 1460 mutex_unlock(&mvm->mutex); 1461 1462 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF); 1463 1464 return ret; 1465 } 1466 1467 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, 1468 struct ieee80211_vif *vif) 1469 { 1470 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif); 1471 1472 if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) { 1473 /* 1474 * mac80211 first removes all the stations of the vif and 1475 * then removes the vif. When it removes a station it also 1476 * flushes the AMPDU session. So by now, all the AMPDU sessions 1477 * of all the stations of this vif are closed, and the queues 1478 * of these AMPDU sessions are properly closed. 1479 * We still need to take care of the shared queues of the vif. 1480 * Flush them here. 1481 * For DQA mode there is no need - broacast and multicast queue 1482 * are flushed separately. 1483 */ 1484 mutex_lock(&mvm->mutex); 1485 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0); 1486 mutex_unlock(&mvm->mutex); 1487 1488 /* 1489 * There are transports that buffer a few frames in the host. 1490 * For these, the flush above isn't enough since while we were 1491 * flushing, the transport might have sent more frames to the 1492 * device. To solve this, wait here until the transport is 1493 * empty. Technically, this could have replaced the flush 1494 * above, but flush is much faster than draining. So flush 1495 * first, and drain to make sure we have no frames in the 1496 * transport anymore. 1497 * If a station still had frames on the shared queues, it is 1498 * already marked as draining, so to complete the draining, we 1499 * just need to wait until the transport is empty. 1500 */ 1501 iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk); 1502 } 1503 1504 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1505 /* 1506 * Flush the ROC worker which will flush the OFFCHANNEL queue. 1507 * We assume here that all the packets sent to the OFFCHANNEL 1508 * queue are sent in ROC session. 1509 */ 1510 flush_work(&mvm->roc_done_wk); 1511 } else { 1512 /* 1513 * By now, all the AC queues are empty. The AGG queues are 1514 * empty too. We already got all the Tx responses for all the 1515 * packets in the queues. The drain work can have been 1516 * triggered. Flush it. 1517 */ 1518 flush_work(&mvm->sta_drained_wk); 1519 } 1520 } 1521 1522 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 1523 struct ieee80211_vif *vif) 1524 { 1525 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1526 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1527 1528 iwl_mvm_prepare_mac_removal(mvm, vif); 1529 1530 mutex_lock(&mvm->mutex); 1531 1532 if (mvm->bf_allowed_vif == mvmvif) { 1533 mvm->bf_allowed_vif = NULL; 1534 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1535 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1536 } 1537 1538 iwl_mvm_vif_dbgfs_clean(mvm, vif); 1539 1540 /* 1541 * For AP/GO interface, the tear down of the resources allocated to the 1542 * interface is be handled as part of the stop_ap flow. 1543 */ 1544 if (vif->type == NL80211_IFTYPE_AP || 1545 vif->type == NL80211_IFTYPE_ADHOC) { 1546 #ifdef CONFIG_NL80211_TESTMODE 1547 if (vif == mvm->noa_vif) { 1548 mvm->noa_vif = NULL; 1549 mvm->noa_duration = 0; 1550 } 1551 #endif 1552 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); 1553 iwl_mvm_dealloc_bcast_sta(mvm, vif); 1554 goto out_release; 1555 } 1556 1557 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1558 mvm->p2p_device_vif = NULL; 1559 iwl_mvm_rm_bcast_sta(mvm, vif); 1560 iwl_mvm_binding_remove_vif(mvm, vif); 1561 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1562 mvmvif->phy_ctxt = NULL; 1563 } 1564 1565 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) 1566 mvm->vif_count--; 1567 1568 iwl_mvm_power_update_mac(mvm); 1569 iwl_mvm_mac_ctxt_remove(mvm, vif); 1570 1571 out_release: 1572 iwl_mvm_mac_ctxt_release(mvm, vif); 1573 mutex_unlock(&mvm->mutex); 1574 } 1575 1576 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) 1577 { 1578 return 0; 1579 } 1580 1581 struct iwl_mvm_mc_iter_data { 1582 struct iwl_mvm *mvm; 1583 int port_id; 1584 }; 1585 1586 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, 1587 struct ieee80211_vif *vif) 1588 { 1589 struct iwl_mvm_mc_iter_data *data = _data; 1590 struct iwl_mvm *mvm = data->mvm; 1591 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1592 int ret, len; 1593 1594 /* if we don't have free ports, mcast frames will be dropped */ 1595 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) 1596 return; 1597 1598 if (vif->type != NL80211_IFTYPE_STATION || 1599 !vif->bss_conf.assoc) 1600 return; 1601 1602 cmd->port_id = data->port_id++; 1603 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1604 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1605 1606 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); 1607 if (ret) 1608 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1609 } 1610 1611 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) 1612 { 1613 struct iwl_mvm_mc_iter_data iter_data = { 1614 .mvm = mvm, 1615 }; 1616 1617 lockdep_assert_held(&mvm->mutex); 1618 1619 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1620 return; 1621 1622 ieee80211_iterate_active_interfaces_atomic( 1623 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1624 iwl_mvm_mc_iface_iterator, &iter_data); 1625 } 1626 1627 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, 1628 struct netdev_hw_addr_list *mc_list) 1629 { 1630 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1631 struct iwl_mcast_filter_cmd *cmd; 1632 struct netdev_hw_addr *addr; 1633 int addr_count; 1634 bool pass_all; 1635 int len; 1636 1637 addr_count = netdev_hw_addr_list_count(mc_list); 1638 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || 1639 IWL_MVM_FW_MCAST_FILTER_PASS_ALL; 1640 if (pass_all) 1641 addr_count = 0; 1642 1643 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); 1644 cmd = kzalloc(len, GFP_ATOMIC); 1645 if (!cmd) 1646 return 0; 1647 1648 if (pass_all) { 1649 cmd->pass_all = 1; 1650 return (u64)(unsigned long)cmd; 1651 } 1652 1653 netdev_hw_addr_list_for_each(addr, mc_list) { 1654 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", 1655 cmd->count, addr->addr); 1656 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], 1657 addr->addr, ETH_ALEN); 1658 cmd->count++; 1659 } 1660 1661 return (u64)(unsigned long)cmd; 1662 } 1663 1664 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, 1665 unsigned int changed_flags, 1666 unsigned int *total_flags, 1667 u64 multicast) 1668 { 1669 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1670 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; 1671 1672 mutex_lock(&mvm->mutex); 1673 1674 /* replace previous configuration */ 1675 kfree(mvm->mcast_filter_cmd); 1676 mvm->mcast_filter_cmd = cmd; 1677 1678 if (!cmd) 1679 goto out; 1680 1681 iwl_mvm_recalc_multicast(mvm); 1682 out: 1683 mutex_unlock(&mvm->mutex); 1684 *total_flags = 0; 1685 } 1686 1687 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, 1688 struct ieee80211_vif *vif, 1689 unsigned int filter_flags, 1690 unsigned int changed_flags) 1691 { 1692 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1693 1694 /* We support only filter for probe requests */ 1695 if (!(changed_flags & FIF_PROBE_REQ)) 1696 return; 1697 1698 /* Supported only for p2p client interfaces */ 1699 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || 1700 !vif->p2p) 1701 return; 1702 1703 mutex_lock(&mvm->mutex); 1704 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1705 mutex_unlock(&mvm->mutex); 1706 } 1707 1708 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1709 struct iwl_bcast_iter_data { 1710 struct iwl_mvm *mvm; 1711 struct iwl_bcast_filter_cmd *cmd; 1712 u8 current_filter; 1713 }; 1714 1715 static void 1716 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, 1717 const struct iwl_fw_bcast_filter *in_filter, 1718 struct iwl_fw_bcast_filter *out_filter) 1719 { 1720 struct iwl_fw_bcast_filter_attr *attr; 1721 int i; 1722 1723 memcpy(out_filter, in_filter, sizeof(*out_filter)); 1724 1725 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { 1726 attr = &out_filter->attrs[i]; 1727 1728 if (!attr->mask) 1729 break; 1730 1731 switch (attr->reserved1) { 1732 case cpu_to_le16(BC_FILTER_MAGIC_IP): 1733 if (vif->bss_conf.arp_addr_cnt != 1) { 1734 attr->mask = 0; 1735 continue; 1736 } 1737 1738 attr->val = vif->bss_conf.arp_addr_list[0]; 1739 break; 1740 case cpu_to_le16(BC_FILTER_MAGIC_MAC): 1741 attr->val = *(__be32 *)&vif->addr[2]; 1742 break; 1743 default: 1744 break; 1745 } 1746 attr->reserved1 = 0; 1747 out_filter->num_attrs++; 1748 } 1749 } 1750 1751 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, 1752 struct ieee80211_vif *vif) 1753 { 1754 struct iwl_bcast_iter_data *data = _data; 1755 struct iwl_mvm *mvm = data->mvm; 1756 struct iwl_bcast_filter_cmd *cmd = data->cmd; 1757 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1758 struct iwl_fw_bcast_mac *bcast_mac; 1759 int i; 1760 1761 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) 1762 return; 1763 1764 bcast_mac = &cmd->macs[mvmvif->id]; 1765 1766 /* 1767 * enable filtering only for associated stations, but not for P2P 1768 * Clients 1769 */ 1770 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || 1771 !vif->bss_conf.assoc) 1772 return; 1773 1774 bcast_mac->default_discard = 1; 1775 1776 /* copy all configured filters */ 1777 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { 1778 /* 1779 * Make sure we don't exceed our filters limit. 1780 * if there is still a valid filter to be configured, 1781 * be on the safe side and just allow bcast for this mac. 1782 */ 1783 if (WARN_ON_ONCE(data->current_filter >= 1784 ARRAY_SIZE(cmd->filters))) { 1785 bcast_mac->default_discard = 0; 1786 bcast_mac->attached_filters = 0; 1787 break; 1788 } 1789 1790 iwl_mvm_set_bcast_filter(vif, 1791 &mvm->bcast_filters[i], 1792 &cmd->filters[data->current_filter]); 1793 1794 /* skip current filter if it contains no attributes */ 1795 if (!cmd->filters[data->current_filter].num_attrs) 1796 continue; 1797 1798 /* attach the filter to current mac */ 1799 bcast_mac->attached_filters |= 1800 cpu_to_le16(BIT(data->current_filter)); 1801 1802 data->current_filter++; 1803 } 1804 } 1805 1806 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, 1807 struct iwl_bcast_filter_cmd *cmd) 1808 { 1809 struct iwl_bcast_iter_data iter_data = { 1810 .mvm = mvm, 1811 .cmd = cmd, 1812 }; 1813 1814 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) 1815 return false; 1816 1817 memset(cmd, 0, sizeof(*cmd)); 1818 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); 1819 cmd->max_macs = ARRAY_SIZE(cmd->macs); 1820 1821 #ifdef CONFIG_IWLWIFI_DEBUGFS 1822 /* use debugfs filters/macs if override is configured */ 1823 if (mvm->dbgfs_bcast_filtering.override) { 1824 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, 1825 sizeof(cmd->filters)); 1826 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, 1827 sizeof(cmd->macs)); 1828 return true; 1829 } 1830 #endif 1831 1832 /* if no filters are configured, do nothing */ 1833 if (!mvm->bcast_filters) 1834 return false; 1835 1836 /* configure and attach these filters for each associated sta vif */ 1837 ieee80211_iterate_active_interfaces( 1838 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1839 iwl_mvm_bcast_filter_iterator, &iter_data); 1840 1841 return true; 1842 } 1843 1844 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) 1845 { 1846 struct iwl_bcast_filter_cmd cmd; 1847 1848 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 1849 return 0; 1850 1851 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 1852 return 0; 1853 1854 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, 1855 sizeof(cmd), &cmd); 1856 } 1857 #else 1858 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) 1859 { 1860 return 0; 1861 } 1862 #endif 1863 1864 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, 1865 struct ieee80211_vif *vif) 1866 { 1867 struct iwl_mu_group_mgmt_cmd cmd = {}; 1868 1869 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, 1870 WLAN_MEMBERSHIP_LEN); 1871 memcpy(cmd.user_position, vif->bss_conf.mu_group.position, 1872 WLAN_USER_POSITION_LEN); 1873 1874 return iwl_mvm_send_cmd_pdu(mvm, 1875 WIDE_ID(DATA_PATH_GROUP, 1876 UPDATE_MU_GROUPS_CMD), 1877 0, sizeof(cmd), &cmd); 1878 } 1879 1880 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, 1881 struct ieee80211_vif *vif) 1882 { 1883 if (vif->mu_mimo_owner) { 1884 struct iwl_mu_group_mgmt_notif *notif = _data; 1885 1886 /* 1887 * MU-MIMO Group Id action frame is little endian. We treat 1888 * the data received from firmware as if it came from the 1889 * action frame, so no conversion is needed. 1890 */ 1891 ieee80211_update_mu_groups(vif, 1892 (u8 *)¬if->membership_status, 1893 (u8 *)¬if->user_position); 1894 } 1895 } 1896 1897 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, 1898 struct iwl_rx_cmd_buffer *rxb) 1899 { 1900 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1901 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; 1902 1903 ieee80211_iterate_active_interfaces_atomic( 1904 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1905 iwl_mvm_mu_mimo_iface_iterator, notif); 1906 } 1907 1908 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 1909 struct ieee80211_vif *vif, 1910 struct ieee80211_bss_conf *bss_conf, 1911 u32 changes) 1912 { 1913 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1914 int ret; 1915 1916 /* 1917 * Re-calculate the tsf id, as the master-slave relations depend on the 1918 * beacon interval, which was not known when the station interface was 1919 * added. 1920 */ 1921 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) 1922 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 1923 1924 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc && 1925 mvmvif->lqm_active) 1926 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT, 1927 0, 0); 1928 1929 /* 1930 * If we're not associated yet, take the (new) BSSID before associating 1931 * so the firmware knows. If we're already associated, then use the old 1932 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC 1933 * branch for disassociation below. 1934 */ 1935 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) 1936 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 1937 1938 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); 1939 if (ret) 1940 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 1941 1942 /* after sending it once, adopt mac80211 data */ 1943 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 1944 mvmvif->associated = bss_conf->assoc; 1945 1946 if (changes & BSS_CHANGED_ASSOC) { 1947 if (bss_conf->assoc) { 1948 /* clear statistics to get clean beacon counter */ 1949 iwl_mvm_request_statistics(mvm, true); 1950 memset(&mvmvif->beacon_stats, 0, 1951 sizeof(mvmvif->beacon_stats)); 1952 1953 /* add quota for this interface */ 1954 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1955 if (ret) { 1956 IWL_ERR(mvm, "failed to update quotas\n"); 1957 return; 1958 } 1959 1960 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 1961 &mvm->status)) { 1962 /* 1963 * If we're restarting then the firmware will 1964 * obviously have lost synchronisation with 1965 * the AP. It will attempt to synchronise by 1966 * itself, but we can make it more reliable by 1967 * scheduling a session protection time event. 1968 * 1969 * The firmware needs to receive a beacon to 1970 * catch up with synchronisation, use 110% of 1971 * the beacon interval. 1972 * 1973 * Set a large maximum delay to allow for more 1974 * than a single interface. 1975 */ 1976 u32 dur = (11 * vif->bss_conf.beacon_int) / 10; 1977 iwl_mvm_protect_session(mvm, vif, dur, dur, 1978 5 * dur, false); 1979 } 1980 1981 iwl_mvm_sf_update(mvm, vif, false); 1982 iwl_mvm_power_vif_assoc(mvm, vif); 1983 if (vif->p2p) { 1984 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); 1985 iwl_mvm_update_smps(mvm, vif, 1986 IWL_MVM_SMPS_REQ_PROT, 1987 IEEE80211_SMPS_DYNAMIC); 1988 } 1989 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 1990 /* 1991 * If update fails - SF might be running in associated 1992 * mode while disassociated - which is forbidden. 1993 */ 1994 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), 1995 "Failed to update SF upon disassociation\n"); 1996 1997 /* 1998 * If we get an assert during the connection (after the 1999 * station has been added, but before the vif is set 2000 * to associated), mac80211 will re-add the station and 2001 * then configure the vif. Since the vif is not 2002 * associated, we would remove the station here and 2003 * this would fail the recovery. 2004 */ 2005 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2006 &mvm->status)) { 2007 /* 2008 * Remove AP station now that 2009 * the MAC is unassoc 2010 */ 2011 ret = iwl_mvm_rm_sta_id(mvm, vif, 2012 mvmvif->ap_sta_id); 2013 if (ret) 2014 IWL_ERR(mvm, 2015 "failed to remove AP station\n"); 2016 2017 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) 2018 mvm->d0i3_ap_sta_id = 2019 IWL_MVM_INVALID_STA; 2020 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 2021 } 2022 2023 /* remove quota for this interface */ 2024 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2025 if (ret) 2026 IWL_ERR(mvm, "failed to update quotas\n"); 2027 2028 if (vif->p2p) 2029 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); 2030 2031 /* this will take the cleared BSSID from bss_conf */ 2032 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2033 if (ret) 2034 IWL_ERR(mvm, 2035 "failed to update MAC %pM (clear after unassoc)\n", 2036 vif->addr); 2037 } 2038 2039 /* 2040 * The firmware tracks the MU-MIMO group on its own. 2041 * However, on HW restart we should restore this data. 2042 */ 2043 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2044 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { 2045 ret = iwl_mvm_update_mu_groups(mvm, vif); 2046 if (ret) 2047 IWL_ERR(mvm, 2048 "failed to update VHT MU_MIMO groups\n"); 2049 } 2050 2051 iwl_mvm_recalc_multicast(mvm); 2052 iwl_mvm_configure_bcast_filter(mvm); 2053 2054 /* reset rssi values */ 2055 mvmvif->bf_data.ave_beacon_signal = 0; 2056 2057 iwl_mvm_bt_coex_vif_change(mvm); 2058 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, 2059 IEEE80211_SMPS_AUTOMATIC); 2060 if (fw_has_capa(&mvm->fw->ucode_capa, 2061 IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 2062 iwl_mvm_config_scan(mvm); 2063 } 2064 2065 if (changes & BSS_CHANGED_BEACON_INFO) { 2066 /* 2067 * We received a beacon from the associated AP so 2068 * remove the session protection. 2069 */ 2070 iwl_mvm_remove_time_event(mvm, mvmvif, 2071 &mvmvif->time_event_data); 2072 2073 iwl_mvm_sf_update(mvm, vif, false); 2074 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2075 } 2076 2077 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | 2078 /* 2079 * Send power command on every beacon change, 2080 * because we may have not enabled beacon abort yet. 2081 */ 2082 BSS_CHANGED_BEACON_INFO)) { 2083 ret = iwl_mvm_power_update_mac(mvm); 2084 if (ret) 2085 IWL_ERR(mvm, "failed to update power mode\n"); 2086 } 2087 2088 if (changes & BSS_CHANGED_TXPOWER) { 2089 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 2090 bss_conf->txpower); 2091 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2092 } 2093 2094 if (changes & BSS_CHANGED_CQM) { 2095 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); 2096 /* reset cqm events tracking */ 2097 mvmvif->bf_data.last_cqm_event = 0; 2098 if (mvmvif->bf_data.bf_enabled) { 2099 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 2100 if (ret) 2101 IWL_ERR(mvm, 2102 "failed to update CQM thresholds\n"); 2103 } 2104 } 2105 2106 if (changes & BSS_CHANGED_ARP_FILTER) { 2107 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); 2108 iwl_mvm_configure_bcast_filter(mvm); 2109 } 2110 } 2111 2112 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, 2113 struct ieee80211_vif *vif) 2114 { 2115 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2116 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2117 int ret; 2118 2119 /* 2120 * iwl_mvm_mac_ctxt_add() might read directly from the device 2121 * (the system time), so make sure it is available. 2122 */ 2123 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP); 2124 if (ret) 2125 return ret; 2126 2127 mutex_lock(&mvm->mutex); 2128 2129 /* Send the beacon template */ 2130 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); 2131 if (ret) 2132 goto out_unlock; 2133 2134 /* 2135 * Re-calculate the tsf id, as the master-slave relations depend on the 2136 * beacon interval, which was not known when the AP interface was added. 2137 */ 2138 if (vif->type == NL80211_IFTYPE_AP) 2139 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2140 2141 mvmvif->ap_assoc_sta_count = 0; 2142 2143 /* Add the mac context */ 2144 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 2145 if (ret) 2146 goto out_unlock; 2147 2148 /* Perform the binding */ 2149 ret = iwl_mvm_binding_add_vif(mvm, vif); 2150 if (ret) 2151 goto out_remove; 2152 2153 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2154 if (ret) 2155 goto out_unbind; 2156 2157 /* Send the bcast station. At this stage the TBTT and DTIM time events 2158 * are added and applied to the scheduler */ 2159 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2160 if (ret) 2161 goto out_rm_mcast; 2162 2163 /* must be set before quota calculations */ 2164 mvmvif->ap_ibss_active = true; 2165 2166 /* power updated needs to be done before quotas */ 2167 iwl_mvm_power_update_mac(mvm); 2168 2169 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2170 if (ret) 2171 goto out_quota_failed; 2172 2173 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2174 if (vif->p2p && mvm->p2p_device_vif) 2175 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2176 2177 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); 2178 2179 iwl_mvm_bt_coex_vif_change(mvm); 2180 2181 /* we don't support TDLS during DCM */ 2182 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2183 iwl_mvm_teardown_tdls_peers(mvm); 2184 2185 goto out_unlock; 2186 2187 out_quota_failed: 2188 iwl_mvm_power_update_mac(mvm); 2189 mvmvif->ap_ibss_active = false; 2190 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2191 out_rm_mcast: 2192 iwl_mvm_rm_mcast_sta(mvm, vif); 2193 out_unbind: 2194 iwl_mvm_binding_remove_vif(mvm, vif); 2195 out_remove: 2196 iwl_mvm_mac_ctxt_remove(mvm, vif); 2197 out_unlock: 2198 mutex_unlock(&mvm->mutex); 2199 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP); 2200 return ret; 2201 } 2202 2203 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, 2204 struct ieee80211_vif *vif) 2205 { 2206 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2207 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2208 2209 iwl_mvm_prepare_mac_removal(mvm, vif); 2210 2211 mutex_lock(&mvm->mutex); 2212 2213 /* Handle AP stop while in CSA */ 2214 if (rcu_access_pointer(mvm->csa_vif) == vif) { 2215 iwl_mvm_remove_time_event(mvm, mvmvif, 2216 &mvmvif->time_event_data); 2217 RCU_INIT_POINTER(mvm->csa_vif, NULL); 2218 mvmvif->csa_countdown = false; 2219 } 2220 2221 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { 2222 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); 2223 mvm->csa_tx_block_bcn_timeout = 0; 2224 } 2225 2226 mvmvif->ap_ibss_active = false; 2227 mvm->ap_last_beacon_gp2 = 0; 2228 2229 iwl_mvm_bt_coex_vif_change(mvm); 2230 2231 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); 2232 2233 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2234 if (vif->p2p && mvm->p2p_device_vif) 2235 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2236 2237 iwl_mvm_update_quotas(mvm, false, NULL); 2238 2239 /* 2240 * This is not very nice, but the simplest: 2241 * For older FWs removing the mcast sta before the bcast station may 2242 * cause assert 0x2b00. 2243 * This is fixed in later FW (which will stop beaconing when removing 2244 * bcast station). 2245 * So make the order of removal depend on the TLV 2246 */ 2247 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2248 iwl_mvm_rm_mcast_sta(mvm, vif); 2249 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2250 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2251 iwl_mvm_rm_mcast_sta(mvm, vif); 2252 iwl_mvm_binding_remove_vif(mvm, vif); 2253 2254 iwl_mvm_power_update_mac(mvm); 2255 2256 iwl_mvm_mac_ctxt_remove(mvm, vif); 2257 2258 mutex_unlock(&mvm->mutex); 2259 } 2260 2261 static void 2262 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, 2263 struct ieee80211_vif *vif, 2264 struct ieee80211_bss_conf *bss_conf, 2265 u32 changes) 2266 { 2267 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2268 2269 /* Changes will be applied when the AP/IBSS is started */ 2270 if (!mvmvif->ap_ibss_active) 2271 return; 2272 2273 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 2274 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && 2275 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) 2276 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2277 2278 /* Need to send a new beacon template to the FW */ 2279 if (changes & BSS_CHANGED_BEACON && 2280 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 2281 IWL_WARN(mvm, "Failed updating beacon data\n"); 2282 2283 if (changes & BSS_CHANGED_TXPOWER) { 2284 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 2285 bss_conf->txpower); 2286 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2287 } 2288 } 2289 2290 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 2291 struct ieee80211_vif *vif, 2292 struct ieee80211_bss_conf *bss_conf, 2293 u32 changes) 2294 { 2295 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2296 2297 /* 2298 * iwl_mvm_bss_info_changed_station() might call 2299 * iwl_mvm_protect_session(), which reads directly from 2300 * the device (the system time), so make sure it is available. 2301 */ 2302 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED)) 2303 return; 2304 2305 mutex_lock(&mvm->mutex); 2306 2307 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) 2308 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 2309 2310 switch (vif->type) { 2311 case NL80211_IFTYPE_STATION: 2312 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 2313 break; 2314 case NL80211_IFTYPE_AP: 2315 case NL80211_IFTYPE_ADHOC: 2316 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); 2317 break; 2318 case NL80211_IFTYPE_MONITOR: 2319 if (changes & BSS_CHANGED_MU_GROUPS) 2320 iwl_mvm_update_mu_groups(mvm, vif); 2321 break; 2322 default: 2323 /* shouldn't happen */ 2324 WARN_ON_ONCE(1); 2325 } 2326 2327 mutex_unlock(&mvm->mutex); 2328 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED); 2329 } 2330 2331 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, 2332 struct ieee80211_vif *vif, 2333 struct ieee80211_scan_request *hw_req) 2334 { 2335 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2336 int ret; 2337 2338 if (hw_req->req.n_channels == 0 || 2339 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) 2340 return -EINVAL; 2341 2342 mutex_lock(&mvm->mutex); 2343 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); 2344 mutex_unlock(&mvm->mutex); 2345 2346 return ret; 2347 } 2348 2349 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, 2350 struct ieee80211_vif *vif) 2351 { 2352 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2353 2354 mutex_lock(&mvm->mutex); 2355 2356 /* Due to a race condition, it's possible that mac80211 asks 2357 * us to stop a hw_scan when it's already stopped. This can 2358 * happen, for instance, if we stopped the scan ourselves, 2359 * called ieee80211_scan_completed() and the userspace called 2360 * cancel scan scan before ieee80211_scan_work() could run. 2361 * To handle that, simply return if the scan is not running. 2362 */ 2363 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) 2364 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 2365 2366 mutex_unlock(&mvm->mutex); 2367 } 2368 2369 static void 2370 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, 2371 struct ieee80211_sta *sta, u16 tids, 2372 int num_frames, 2373 enum ieee80211_frame_release_type reason, 2374 bool more_data) 2375 { 2376 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2377 2378 /* Called when we need to transmit (a) frame(s) from mac80211 */ 2379 2380 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2381 tids, more_data, false); 2382 } 2383 2384 static void 2385 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, 2386 struct ieee80211_sta *sta, u16 tids, 2387 int num_frames, 2388 enum ieee80211_frame_release_type reason, 2389 bool more_data) 2390 { 2391 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2392 2393 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ 2394 2395 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2396 tids, more_data, true); 2397 } 2398 2399 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2400 enum sta_notify_cmd cmd, 2401 struct ieee80211_sta *sta) 2402 { 2403 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2404 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2405 unsigned long txqs = 0, tids = 0; 2406 int tid; 2407 2408 spin_lock_bh(&mvmsta->lock); 2409 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 2410 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2411 2412 if (!iwl_mvm_is_dqa_supported(mvm) && 2413 tid_data->state != IWL_AGG_ON && 2414 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) 2415 continue; 2416 2417 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) 2418 continue; 2419 2420 __set_bit(tid_data->txq_id, &txqs); 2421 2422 if (iwl_mvm_tid_queued(mvm, tid_data) == 0) 2423 continue; 2424 2425 __set_bit(tid, &tids); 2426 } 2427 2428 switch (cmd) { 2429 case STA_NOTIFY_SLEEP: 2430 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) 2431 ieee80211_sta_block_awake(hw, sta, true); 2432 2433 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) 2434 ieee80211_sta_set_buffered(sta, tid, true); 2435 2436 if (txqs) 2437 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); 2438 /* 2439 * The fw updates the STA to be asleep. Tx packets on the Tx 2440 * queues to this station will not be transmitted. The fw will 2441 * send a Tx response with TX_STATUS_FAIL_DEST_PS. 2442 */ 2443 break; 2444 case STA_NOTIFY_AWAKE: 2445 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 2446 break; 2447 2448 if (txqs) 2449 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); 2450 iwl_mvm_sta_modify_ps_wake(mvm, sta); 2451 break; 2452 default: 2453 break; 2454 } 2455 spin_unlock_bh(&mvmsta->lock); 2456 } 2457 2458 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2459 struct ieee80211_vif *vif, 2460 enum sta_notify_cmd cmd, 2461 struct ieee80211_sta *sta) 2462 { 2463 __iwl_mvm_mac_sta_notify(hw, cmd, sta); 2464 } 2465 2466 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 2467 { 2468 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2469 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; 2470 struct ieee80211_sta *sta; 2471 struct iwl_mvm_sta *mvmsta; 2472 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); 2473 2474 if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) 2475 return; 2476 2477 rcu_read_lock(); 2478 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); 2479 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2480 rcu_read_unlock(); 2481 return; 2482 } 2483 2484 mvmsta = iwl_mvm_sta_from_mac80211(sta); 2485 2486 if (!mvmsta->vif || 2487 mvmsta->vif->type != NL80211_IFTYPE_AP) { 2488 rcu_read_unlock(); 2489 return; 2490 } 2491 2492 if (mvmsta->sleeping != sleeping) { 2493 mvmsta->sleeping = sleeping; 2494 __iwl_mvm_mac_sta_notify(mvm->hw, 2495 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, 2496 sta); 2497 ieee80211_sta_ps_transition(sta, sleeping); 2498 } 2499 2500 if (sleeping) { 2501 switch (notif->type) { 2502 case IWL_MVM_PM_EVENT_AWAKE: 2503 case IWL_MVM_PM_EVENT_ASLEEP: 2504 break; 2505 case IWL_MVM_PM_EVENT_UAPSD: 2506 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); 2507 break; 2508 case IWL_MVM_PM_EVENT_PS_POLL: 2509 ieee80211_sta_pspoll(sta); 2510 break; 2511 default: 2512 break; 2513 } 2514 } 2515 2516 rcu_read_unlock(); 2517 } 2518 2519 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, 2520 struct ieee80211_vif *vif, 2521 struct ieee80211_sta *sta) 2522 { 2523 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2524 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2525 2526 /* 2527 * This is called before mac80211 does RCU synchronisation, 2528 * so here we already invalidate our internal RCU-protected 2529 * station pointer. The rest of the code will thus no longer 2530 * be able to find the station this way, and we don't rely 2531 * on further RCU synchronisation after the sta_state() 2532 * callback deleted the station. 2533 */ 2534 mutex_lock(&mvm->mutex); 2535 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) 2536 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 2537 ERR_PTR(-ENOENT)); 2538 2539 mutex_unlock(&mvm->mutex); 2540 } 2541 2542 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2543 const u8 *bssid) 2544 { 2545 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) 2546 return; 2547 2548 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { 2549 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2550 return; 2551 } 2552 2553 if (!vif->p2p && 2554 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { 2555 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2556 return; 2557 } 2558 2559 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 2560 } 2561 2562 static void 2563 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, 2564 struct ieee80211_vif *vif, u8 *peer_addr, 2565 enum nl80211_tdls_operation action) 2566 { 2567 struct iwl_fw_dbg_trigger_tlv *trig; 2568 struct iwl_fw_dbg_trigger_tdls *tdls_trig; 2569 2570 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS)) 2571 return; 2572 2573 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); 2574 tdls_trig = (void *)trig->data; 2575 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 2576 return; 2577 2578 if (!(tdls_trig->action_bitmap & BIT(action))) 2579 return; 2580 2581 if (tdls_trig->peer_mode && 2582 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) 2583 return; 2584 2585 iwl_mvm_fw_dbg_collect_trig(mvm, trig, 2586 "TDLS event occurred, peer %pM, action %d", 2587 peer_addr, action); 2588 } 2589 2590 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, 2591 struct iwl_mvm_sta *mvm_sta) 2592 { 2593 struct iwl_mvm_tid_data *tid_data; 2594 struct sk_buff *skb; 2595 int i; 2596 2597 spin_lock_bh(&mvm_sta->lock); 2598 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 2599 tid_data = &mvm_sta->tid_data[i]; 2600 2601 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) { 2602 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2603 2604 /* 2605 * The first deferred frame should've stopped the MAC 2606 * queues, so we should never get a second deferred 2607 * frame for the RA/TID. 2608 */ 2609 iwl_mvm_start_mac_queues(mvm, info->hw_queue); 2610 ieee80211_free_txskb(mvm->hw, skb); 2611 } 2612 } 2613 spin_unlock_bh(&mvm_sta->lock); 2614 } 2615 2616 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 2617 struct ieee80211_vif *vif, 2618 struct ieee80211_sta *sta, 2619 enum ieee80211_sta_state old_state, 2620 enum ieee80211_sta_state new_state) 2621 { 2622 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2623 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2624 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2625 int ret; 2626 2627 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", 2628 sta->addr, old_state, new_state); 2629 2630 /* this would be a mac80211 bug ... but don't crash */ 2631 if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) 2632 return -EINVAL; 2633 2634 /* if a STA is being removed, reuse its ID */ 2635 flush_work(&mvm->sta_drained_wk); 2636 2637 /* 2638 * If we are in a STA removal flow and in DQA mode: 2639 * 2640 * This is after the sync_rcu part, so the queues have already been 2641 * flushed. No more TXs on their way in mac80211's path, and no more in 2642 * the queues. 2643 * Also, we won't be getting any new TX frames for this station. 2644 * What we might have are deferred TX frames that need to be taken care 2645 * of. 2646 * 2647 * Drop any still-queued deferred-frame before removing the STA, and 2648 * make sure the worker is no longer handling frames for this STA. 2649 */ 2650 if (old_state == IEEE80211_STA_NONE && 2651 new_state == IEEE80211_STA_NOTEXIST && 2652 iwl_mvm_is_dqa_supported(mvm)) { 2653 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); 2654 flush_work(&mvm->add_stream_wk); 2655 2656 /* 2657 * No need to make sure deferred TX indication is off since the 2658 * worker will already remove it if it was on 2659 */ 2660 } 2661 2662 mutex_lock(&mvm->mutex); 2663 /* track whether or not the station is associated */ 2664 mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC; 2665 2666 if (old_state == IEEE80211_STA_NOTEXIST && 2667 new_state == IEEE80211_STA_NONE) { 2668 /* 2669 * Firmware bug - it'll crash if the beacon interval is less 2670 * than 16. We can't avoid connecting at all, so refuse the 2671 * station state change, this will cause mac80211 to abandon 2672 * attempts to connect to this AP, and eventually wpa_s will 2673 * blacklist the AP... 2674 */ 2675 if (vif->type == NL80211_IFTYPE_STATION && 2676 vif->bss_conf.beacon_int < 16) { 2677 IWL_ERR(mvm, 2678 "AP %pM beacon interval is %d, refusing due to firmware bug!\n", 2679 sta->addr, vif->bss_conf.beacon_int); 2680 ret = -EINVAL; 2681 goto out_unlock; 2682 } 2683 2684 if (sta->tdls && 2685 (vif->p2p || 2686 iwl_mvm_tdls_sta_count(mvm, NULL) == 2687 IWL_MVM_TDLS_STA_COUNT || 2688 iwl_mvm_phy_ctx_count(mvm) > 1)) { 2689 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); 2690 ret = -EBUSY; 2691 goto out_unlock; 2692 } 2693 2694 ret = iwl_mvm_add_sta(mvm, vif, sta); 2695 if (sta->tdls && ret == 0) { 2696 iwl_mvm_recalc_tdls_state(mvm, vif, true); 2697 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 2698 NL80211_TDLS_SETUP); 2699 } 2700 } else if (old_state == IEEE80211_STA_NONE && 2701 new_state == IEEE80211_STA_AUTH) { 2702 /* 2703 * EBS may be disabled due to previous failures reported by FW. 2704 * Reset EBS status here assuming environment has been changed. 2705 */ 2706 mvm->last_ebs_successful = true; 2707 iwl_mvm_check_uapsd(mvm, vif, sta->addr); 2708 ret = 0; 2709 } else if (old_state == IEEE80211_STA_AUTH && 2710 new_state == IEEE80211_STA_ASSOC) { 2711 if (vif->type == NL80211_IFTYPE_AP) { 2712 mvmvif->ap_assoc_sta_count++; 2713 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2714 } 2715 2716 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 2717 true); 2718 ret = iwl_mvm_update_sta(mvm, vif, sta); 2719 } else if (old_state == IEEE80211_STA_ASSOC && 2720 new_state == IEEE80211_STA_AUTHORIZED) { 2721 2722 /* we don't support TDLS during DCM */ 2723 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2724 iwl_mvm_teardown_tdls_peers(mvm); 2725 2726 if (sta->tdls) 2727 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 2728 NL80211_TDLS_ENABLE_LINK); 2729 2730 /* enable beacon filtering */ 2731 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2732 ret = 0; 2733 } else if (old_state == IEEE80211_STA_AUTHORIZED && 2734 new_state == IEEE80211_STA_ASSOC) { 2735 /* disable beacon filtering */ 2736 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0)); 2737 ret = 0; 2738 } else if (old_state == IEEE80211_STA_ASSOC && 2739 new_state == IEEE80211_STA_AUTH) { 2740 if (vif->type == NL80211_IFTYPE_AP) { 2741 mvmvif->ap_assoc_sta_count--; 2742 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2743 } 2744 ret = 0; 2745 } else if (old_state == IEEE80211_STA_AUTH && 2746 new_state == IEEE80211_STA_NONE) { 2747 ret = 0; 2748 } else if (old_state == IEEE80211_STA_NONE && 2749 new_state == IEEE80211_STA_NOTEXIST) { 2750 ret = iwl_mvm_rm_sta(mvm, vif, sta); 2751 if (sta->tdls) { 2752 iwl_mvm_recalc_tdls_state(mvm, vif, false); 2753 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 2754 NL80211_TDLS_DISABLE_LINK); 2755 } 2756 } else { 2757 ret = -EIO; 2758 } 2759 out_unlock: 2760 mutex_unlock(&mvm->mutex); 2761 2762 if (sta->tdls && ret == 0) { 2763 if (old_state == IEEE80211_STA_NOTEXIST && 2764 new_state == IEEE80211_STA_NONE) 2765 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); 2766 else if (old_state == IEEE80211_STA_NONE && 2767 new_state == IEEE80211_STA_NOTEXIST) 2768 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); 2769 } 2770 2771 return ret; 2772 } 2773 2774 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 2775 { 2776 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2777 2778 mvm->rts_threshold = value; 2779 2780 return 0; 2781 } 2782 2783 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, 2784 struct ieee80211_vif *vif, 2785 struct ieee80211_sta *sta, u32 changed) 2786 { 2787 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2788 2789 if (vif->type == NL80211_IFTYPE_STATION && 2790 changed & IEEE80211_RC_NSS_CHANGED) 2791 iwl_mvm_sf_update(mvm, vif, false); 2792 } 2793 2794 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, 2795 struct ieee80211_vif *vif, u16 ac, 2796 const struct ieee80211_tx_queue_params *params) 2797 { 2798 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2799 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2800 2801 mvmvif->queue_params[ac] = *params; 2802 2803 /* 2804 * No need to update right away, we'll get BSS_CHANGED_QOS 2805 * The exception is P2P_DEVICE interface which needs immediate update. 2806 */ 2807 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 2808 int ret; 2809 2810 mutex_lock(&mvm->mutex); 2811 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2812 mutex_unlock(&mvm->mutex); 2813 return ret; 2814 } 2815 return 0; 2816 } 2817 2818 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, 2819 struct ieee80211_vif *vif) 2820 { 2821 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2822 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 2823 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; 2824 2825 if (WARN_ON_ONCE(vif->bss_conf.assoc)) 2826 return; 2827 2828 /* 2829 * iwl_mvm_protect_session() reads directly from the device 2830 * (the system time), so make sure it is available. 2831 */ 2832 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) 2833 return; 2834 2835 mutex_lock(&mvm->mutex); 2836 /* Try really hard to protect the session and hear a beacon */ 2837 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); 2838 mutex_unlock(&mvm->mutex); 2839 2840 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); 2841 } 2842 2843 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, 2844 struct ieee80211_vif *vif, 2845 struct cfg80211_sched_scan_request *req, 2846 struct ieee80211_scan_ies *ies) 2847 { 2848 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2849 2850 int ret; 2851 2852 mutex_lock(&mvm->mutex); 2853 2854 if (!vif->bss_conf.idle) { 2855 ret = -EBUSY; 2856 goto out; 2857 } 2858 2859 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); 2860 2861 out: 2862 mutex_unlock(&mvm->mutex); 2863 return ret; 2864 } 2865 2866 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, 2867 struct ieee80211_vif *vif) 2868 { 2869 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2870 int ret; 2871 2872 mutex_lock(&mvm->mutex); 2873 2874 /* Due to a race condition, it's possible that mac80211 asks 2875 * us to stop a sched_scan when it's already stopped. This 2876 * can happen, for instance, if we stopped the scan ourselves, 2877 * called ieee80211_sched_scan_stopped() and the userspace called 2878 * stop sched scan scan before ieee80211_sched_scan_stopped_work() 2879 * could run. To handle this, simply return if the scan is 2880 * not running. 2881 */ 2882 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { 2883 mutex_unlock(&mvm->mutex); 2884 return 0; 2885 } 2886 2887 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); 2888 mutex_unlock(&mvm->mutex); 2889 iwl_mvm_wait_for_async_handlers(mvm); 2890 2891 return ret; 2892 } 2893 2894 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 2895 enum set_key_cmd cmd, 2896 struct ieee80211_vif *vif, 2897 struct ieee80211_sta *sta, 2898 struct ieee80211_key_conf *key) 2899 { 2900 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2901 struct iwl_mvm_sta *mvmsta; 2902 struct iwl_mvm_key_pn *ptk_pn; 2903 int keyidx = key->keyidx; 2904 int ret; 2905 u8 key_offset; 2906 2907 if (iwlwifi_mod_params.swcrypto) { 2908 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); 2909 return -EOPNOTSUPP; 2910 } 2911 2912 switch (key->cipher) { 2913 case WLAN_CIPHER_SUITE_TKIP: 2914 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 2915 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 2916 break; 2917 case WLAN_CIPHER_SUITE_CCMP: 2918 case WLAN_CIPHER_SUITE_GCMP: 2919 case WLAN_CIPHER_SUITE_GCMP_256: 2920 if (!iwl_mvm_has_new_tx_api(mvm)) 2921 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 2922 break; 2923 case WLAN_CIPHER_SUITE_AES_CMAC: 2924 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2925 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2926 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); 2927 break; 2928 case WLAN_CIPHER_SUITE_WEP40: 2929 case WLAN_CIPHER_SUITE_WEP104: 2930 /* For non-client mode, only use WEP keys for TX as we probably 2931 * don't have a station yet anyway and would then have to keep 2932 * track of the keys, linking them to each of the clients/peers 2933 * as they appear. For now, don't do that, for performance WEP 2934 * offload doesn't really matter much, but we need it for some 2935 * other offload features in client mode. 2936 */ 2937 if (vif->type != NL80211_IFTYPE_STATION) 2938 return 0; 2939 break; 2940 default: 2941 /* currently FW supports only one optional cipher scheme */ 2942 if (hw->n_cipher_schemes && 2943 hw->cipher_schemes->cipher == key->cipher) 2944 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 2945 else 2946 return -EOPNOTSUPP; 2947 } 2948 2949 mutex_lock(&mvm->mutex); 2950 2951 switch (cmd) { 2952 case SET_KEY: 2953 if ((vif->type == NL80211_IFTYPE_ADHOC || 2954 vif->type == NL80211_IFTYPE_AP) && !sta) { 2955 /* 2956 * GTK on AP interface is a TX-only key, return 0; 2957 * on IBSS they're per-station and because we're lazy 2958 * we don't support them for RX, so do the same. 2959 * CMAC/GMAC in AP/IBSS modes must be done in software. 2960 */ 2961 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 2962 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 2963 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 2964 ret = -EOPNOTSUPP; 2965 else 2966 ret = 0; 2967 2968 if (key->cipher != WLAN_CIPHER_SUITE_GCMP && 2969 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && 2970 !iwl_mvm_has_new_tx_api(mvm)) { 2971 key->hw_key_idx = STA_KEY_IDX_INVALID; 2972 break; 2973 } 2974 } 2975 2976 /* During FW restart, in order to restore the state as it was, 2977 * don't try to reprogram keys we previously failed for. 2978 */ 2979 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2980 key->hw_key_idx == STA_KEY_IDX_INVALID) { 2981 IWL_DEBUG_MAC80211(mvm, 2982 "skip invalid idx key programming during restart\n"); 2983 ret = 0; 2984 break; 2985 } 2986 2987 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2988 sta && iwl_mvm_has_new_rx_api(mvm) && 2989 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 2990 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 2991 key->cipher == WLAN_CIPHER_SUITE_GCMP || 2992 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 2993 struct ieee80211_key_seq seq; 2994 int tid, q; 2995 2996 mvmsta = iwl_mvm_sta_from_mac80211(sta); 2997 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 2998 ptk_pn = kzalloc(sizeof(*ptk_pn) + 2999 mvm->trans->num_rx_queues * 3000 sizeof(ptk_pn->q[0]), 3001 GFP_KERNEL); 3002 if (!ptk_pn) { 3003 ret = -ENOMEM; 3004 break; 3005 } 3006 3007 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 3008 ieee80211_get_key_rx_seq(key, tid, &seq); 3009 for (q = 0; q < mvm->trans->num_rx_queues; q++) 3010 memcpy(ptk_pn->q[q].pn[tid], 3011 seq.ccmp.pn, 3012 IEEE80211_CCMP_PN_LEN); 3013 } 3014 3015 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); 3016 } 3017 3018 /* in HW restart reuse the index, otherwise request a new one */ 3019 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3020 key_offset = key->hw_key_idx; 3021 else 3022 key_offset = STA_KEY_IDX_INVALID; 3023 3024 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3025 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); 3026 if (ret) { 3027 IWL_WARN(mvm, "set key failed\n"); 3028 /* 3029 * can't add key for RX, but we don't need it 3030 * in the device for TX so still return 0 3031 */ 3032 key->hw_key_idx = STA_KEY_IDX_INVALID; 3033 ret = 0; 3034 } 3035 3036 break; 3037 case DISABLE_KEY: 3038 if (key->hw_key_idx == STA_KEY_IDX_INVALID) { 3039 ret = 0; 3040 break; 3041 } 3042 3043 if (sta && iwl_mvm_has_new_rx_api(mvm) && 3044 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3045 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3046 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3047 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3048 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3049 ptk_pn = rcu_dereference_protected( 3050 mvmsta->ptk_pn[keyidx], 3051 lockdep_is_held(&mvm->mutex)); 3052 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); 3053 if (ptk_pn) 3054 kfree_rcu(ptk_pn, rcu_head); 3055 } 3056 3057 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); 3058 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); 3059 break; 3060 default: 3061 ret = -EINVAL; 3062 } 3063 3064 mutex_unlock(&mvm->mutex); 3065 return ret; 3066 } 3067 3068 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, 3069 struct ieee80211_vif *vif, 3070 struct ieee80211_key_conf *keyconf, 3071 struct ieee80211_sta *sta, 3072 u32 iv32, u16 *phase1key) 3073 { 3074 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3075 3076 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 3077 return; 3078 3079 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); 3080 } 3081 3082 3083 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, 3084 struct iwl_rx_packet *pkt, void *data) 3085 { 3086 struct iwl_mvm *mvm = 3087 container_of(notif_wait, struct iwl_mvm, notif_wait); 3088 struct iwl_hs20_roc_res *resp; 3089 int resp_len = iwl_rx_packet_payload_len(pkt); 3090 struct iwl_mvm_time_event_data *te_data = data; 3091 3092 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) 3093 return true; 3094 3095 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 3096 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); 3097 return true; 3098 } 3099 3100 resp = (void *)pkt->data; 3101 3102 IWL_DEBUG_TE(mvm, 3103 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n", 3104 resp->status, resp->event_unique_id); 3105 3106 te_data->uid = le32_to_cpu(resp->event_unique_id); 3107 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 3108 te_data->uid); 3109 3110 spin_lock_bh(&mvm->time_event_lock); 3111 list_add_tail(&te_data->list, &mvm->aux_roc_te_list); 3112 spin_unlock_bh(&mvm->time_event_lock); 3113 3114 return true; 3115 } 3116 3117 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) 3118 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) 3119 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) 3120 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) 3121 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) 3122 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, 3123 struct ieee80211_channel *channel, 3124 struct ieee80211_vif *vif, 3125 int duration) 3126 { 3127 int res, time_reg = DEVICE_SYSTEM_TIME_REG; 3128 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3129 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; 3130 static const u16 time_event_response[] = { HOT_SPOT_CMD }; 3131 struct iwl_notification_wait wait_time_event; 3132 u32 dtim_interval = vif->bss_conf.dtim_period * 3133 vif->bss_conf.beacon_int; 3134 u32 req_dur, delay; 3135 struct iwl_hs20_roc_req aux_roc_req = { 3136 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 3137 .id_and_color = 3138 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), 3139 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), 3140 /* Set the channel info data */ 3141 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ? 3142 PHY_BAND_24 : PHY_BAND_5, 3143 .channel_info.channel = channel->hw_value, 3144 .channel_info.width = PHY_VHT_CHANNEL_MODE20, 3145 /* Set the time and duration */ 3146 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)), 3147 }; 3148 3149 delay = AUX_ROC_MIN_DELAY; 3150 req_dur = MSEC_TO_TU(duration); 3151 3152 /* 3153 * If we are associated we want the delay time to be at least one 3154 * dtim interval so that the FW can wait until after the DTIM and 3155 * then start the time event, this will potentially allow us to 3156 * remain off-channel for the max duration. 3157 * Since we want to use almost a whole dtim interval we would also 3158 * like the delay to be for 2-3 dtim intervals, in case there are 3159 * other time events with higher priority. 3160 */ 3161 if (vif->bss_conf.assoc) { 3162 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); 3163 /* We cannot remain off-channel longer than the DTIM interval */ 3164 if (dtim_interval <= req_dur) { 3165 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; 3166 if (req_dur <= AUX_ROC_MIN_DURATION) 3167 req_dur = dtim_interval - 3168 AUX_ROC_MIN_SAFETY_BUFFER; 3169 } 3170 } 3171 3172 aux_roc_req.duration = cpu_to_le32(req_dur); 3173 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); 3174 3175 IWL_DEBUG_TE(mvm, 3176 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", 3177 channel->hw_value, req_dur, duration, delay, 3178 dtim_interval); 3179 /* Set the node address */ 3180 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); 3181 3182 lockdep_assert_held(&mvm->mutex); 3183 3184 spin_lock_bh(&mvm->time_event_lock); 3185 3186 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { 3187 spin_unlock_bh(&mvm->time_event_lock); 3188 return -EIO; 3189 } 3190 3191 te_data->vif = vif; 3192 te_data->duration = duration; 3193 te_data->id = HOT_SPOT_CMD; 3194 3195 spin_unlock_bh(&mvm->time_event_lock); 3196 3197 /* 3198 * Use a notification wait, which really just processes the 3199 * command response and doesn't wait for anything, in order 3200 * to be able to process the response and get the UID inside 3201 * the RX path. Using CMD_WANT_SKB doesn't work because it 3202 * stores the buffer and then wakes up this thread, by which 3203 * time another notification (that the time event started) 3204 * might already be processed unsuccessfully. 3205 */ 3206 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 3207 time_event_response, 3208 ARRAY_SIZE(time_event_response), 3209 iwl_mvm_rx_aux_roc, te_data); 3210 3211 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req), 3212 &aux_roc_req); 3213 3214 if (res) { 3215 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); 3216 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 3217 goto out_clear_te; 3218 } 3219 3220 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 3221 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 3222 /* should never fail */ 3223 WARN_ON_ONCE(res); 3224 3225 if (res) { 3226 out_clear_te: 3227 spin_lock_bh(&mvm->time_event_lock); 3228 iwl_mvm_te_clear_data(mvm, te_data); 3229 spin_unlock_bh(&mvm->time_event_lock); 3230 } 3231 3232 return res; 3233 } 3234 3235 static int iwl_mvm_roc(struct ieee80211_hw *hw, 3236 struct ieee80211_vif *vif, 3237 struct ieee80211_channel *channel, 3238 int duration, 3239 enum ieee80211_roc_type type) 3240 { 3241 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3242 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3243 struct cfg80211_chan_def chandef; 3244 struct iwl_mvm_phy_ctxt *phy_ctxt; 3245 int ret, i; 3246 3247 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 3248 duration, type); 3249 3250 flush_work(&mvm->roc_done_wk); 3251 3252 mutex_lock(&mvm->mutex); 3253 3254 switch (vif->type) { 3255 case NL80211_IFTYPE_STATION: 3256 if (fw_has_capa(&mvm->fw->ucode_capa, 3257 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { 3258 /* Use aux roc framework (HS20) */ 3259 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 3260 vif, duration); 3261 goto out_unlock; 3262 } 3263 IWL_ERR(mvm, "hotspot not supported\n"); 3264 ret = -EINVAL; 3265 goto out_unlock; 3266 case NL80211_IFTYPE_P2P_DEVICE: 3267 /* handle below */ 3268 break; 3269 default: 3270 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 3271 ret = -EINVAL; 3272 goto out_unlock; 3273 } 3274 3275 for (i = 0; i < NUM_PHY_CTX; i++) { 3276 phy_ctxt = &mvm->phy_ctxts[i]; 3277 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 3278 continue; 3279 3280 if (phy_ctxt->ref && channel == phy_ctxt->channel) { 3281 /* 3282 * Unbind the P2P_DEVICE from the current PHY context, 3283 * and if the PHY context is not used remove it. 3284 */ 3285 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3286 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3287 goto out_unlock; 3288 3289 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3290 3291 /* Bind the P2P_DEVICE to the current PHY Context */ 3292 mvmvif->phy_ctxt = phy_ctxt; 3293 3294 ret = iwl_mvm_binding_add_vif(mvm, vif); 3295 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3296 goto out_unlock; 3297 3298 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3299 goto schedule_time_event; 3300 } 3301 } 3302 3303 /* Need to update the PHY context only if the ROC channel changed */ 3304 if (channel == mvmvif->phy_ctxt->channel) 3305 goto schedule_time_event; 3306 3307 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); 3308 3309 /* 3310 * Change the PHY context configuration as it is currently referenced 3311 * only by the P2P Device MAC 3312 */ 3313 if (mvmvif->phy_ctxt->ref == 1) { 3314 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, 3315 &chandef, 1, 1); 3316 if (ret) 3317 goto out_unlock; 3318 } else { 3319 /* 3320 * The PHY context is shared with other MACs. Need to remove the 3321 * P2P Device from the binding, allocate an new PHY context and 3322 * create a new binding 3323 */ 3324 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3325 if (!phy_ctxt) { 3326 ret = -ENOSPC; 3327 goto out_unlock; 3328 } 3329 3330 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 3331 1, 1); 3332 if (ret) { 3333 IWL_ERR(mvm, "Failed to change PHY context\n"); 3334 goto out_unlock; 3335 } 3336 3337 /* Unbind the P2P_DEVICE from the current PHY context */ 3338 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3339 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3340 goto out_unlock; 3341 3342 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3343 3344 /* Bind the P2P_DEVICE to the new allocated PHY context */ 3345 mvmvif->phy_ctxt = phy_ctxt; 3346 3347 ret = iwl_mvm_binding_add_vif(mvm, vif); 3348 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3349 goto out_unlock; 3350 3351 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3352 } 3353 3354 schedule_time_event: 3355 /* Schedule the time events */ 3356 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); 3357 3358 out_unlock: 3359 mutex_unlock(&mvm->mutex); 3360 IWL_DEBUG_MAC80211(mvm, "leave\n"); 3361 return ret; 3362 } 3363 3364 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) 3365 { 3366 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3367 3368 IWL_DEBUG_MAC80211(mvm, "enter\n"); 3369 3370 mutex_lock(&mvm->mutex); 3371 iwl_mvm_stop_roc(mvm); 3372 mutex_unlock(&mvm->mutex); 3373 3374 IWL_DEBUG_MAC80211(mvm, "leave\n"); 3375 return 0; 3376 } 3377 3378 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, 3379 struct ieee80211_chanctx_conf *ctx) 3380 { 3381 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3382 struct iwl_mvm_phy_ctxt *phy_ctxt; 3383 int ret; 3384 3385 lockdep_assert_held(&mvm->mutex); 3386 3387 IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); 3388 3389 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3390 if (!phy_ctxt) { 3391 ret = -ENOSPC; 3392 goto out; 3393 } 3394 3395 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, 3396 ctx->rx_chains_static, 3397 ctx->rx_chains_dynamic); 3398 if (ret) { 3399 IWL_ERR(mvm, "Failed to add PHY context\n"); 3400 goto out; 3401 } 3402 3403 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); 3404 *phy_ctxt_id = phy_ctxt->id; 3405 out: 3406 return ret; 3407 } 3408 3409 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, 3410 struct ieee80211_chanctx_conf *ctx) 3411 { 3412 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3413 int ret; 3414 3415 mutex_lock(&mvm->mutex); 3416 ret = __iwl_mvm_add_chanctx(mvm, ctx); 3417 mutex_unlock(&mvm->mutex); 3418 3419 return ret; 3420 } 3421 3422 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, 3423 struct ieee80211_chanctx_conf *ctx) 3424 { 3425 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3426 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3427 3428 lockdep_assert_held(&mvm->mutex); 3429 3430 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); 3431 } 3432 3433 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, 3434 struct ieee80211_chanctx_conf *ctx) 3435 { 3436 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3437 3438 mutex_lock(&mvm->mutex); 3439 __iwl_mvm_remove_chanctx(mvm, ctx); 3440 mutex_unlock(&mvm->mutex); 3441 } 3442 3443 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, 3444 struct ieee80211_chanctx_conf *ctx, 3445 u32 changed) 3446 { 3447 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3448 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3449 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3450 3451 if (WARN_ONCE((phy_ctxt->ref > 1) && 3452 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | 3453 IEEE80211_CHANCTX_CHANGE_RX_CHAINS | 3454 IEEE80211_CHANCTX_CHANGE_RADAR | 3455 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), 3456 "Cannot change PHY. Ref=%d, changed=0x%X\n", 3457 phy_ctxt->ref, changed)) 3458 return; 3459 3460 mutex_lock(&mvm->mutex); 3461 iwl_mvm_bt_coex_vif_change(mvm); 3462 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, 3463 ctx->rx_chains_static, 3464 ctx->rx_chains_dynamic); 3465 mutex_unlock(&mvm->mutex); 3466 } 3467 3468 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, 3469 struct ieee80211_vif *vif, 3470 struct ieee80211_chanctx_conf *ctx, 3471 bool switching_chanctx) 3472 { 3473 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3474 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3475 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3476 int ret; 3477 3478 lockdep_assert_held(&mvm->mutex); 3479 3480 mvmvif->phy_ctxt = phy_ctxt; 3481 3482 switch (vif->type) { 3483 case NL80211_IFTYPE_AP: 3484 /* only needed if we're switching chanctx (i.e. during CSA) */ 3485 if (switching_chanctx) { 3486 mvmvif->ap_ibss_active = true; 3487 break; 3488 } 3489 case NL80211_IFTYPE_ADHOC: 3490 /* 3491 * The AP binding flow is handled as part of the start_ap flow 3492 * (in bss_info_changed), similarly for IBSS. 3493 */ 3494 ret = 0; 3495 goto out; 3496 case NL80211_IFTYPE_STATION: 3497 break; 3498 case NL80211_IFTYPE_MONITOR: 3499 /* always disable PS when a monitor interface is active */ 3500 mvmvif->ps_disabled = true; 3501 break; 3502 default: 3503 ret = -EINVAL; 3504 goto out; 3505 } 3506 3507 ret = iwl_mvm_binding_add_vif(mvm, vif); 3508 if (ret) 3509 goto out; 3510 3511 /* 3512 * Power state must be updated before quotas, 3513 * otherwise fw will complain. 3514 */ 3515 iwl_mvm_power_update_mac(mvm); 3516 3517 /* Setting the quota at this stage is only required for monitor 3518 * interfaces. For the other types, the bss_info changed flow 3519 * will handle quota settings. 3520 */ 3521 if (vif->type == NL80211_IFTYPE_MONITOR) { 3522 mvmvif->monitor_active = true; 3523 ret = iwl_mvm_update_quotas(mvm, false, NULL); 3524 if (ret) 3525 goto out_remove_binding; 3526 3527 ret = iwl_mvm_add_snif_sta(mvm, vif); 3528 if (ret) 3529 goto out_remove_binding; 3530 3531 } 3532 3533 /* Handle binding during CSA */ 3534 if (vif->type == NL80211_IFTYPE_AP) { 3535 iwl_mvm_update_quotas(mvm, false, NULL); 3536 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3537 } 3538 3539 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { 3540 u32 duration = 2 * vif->bss_conf.beacon_int; 3541 3542 /* iwl_mvm_protect_session() reads directly from the 3543 * device (the system time), so make sure it is 3544 * available. 3545 */ 3546 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); 3547 if (ret) 3548 goto out_remove_binding; 3549 3550 /* Protect the session to make sure we hear the first 3551 * beacon on the new channel. 3552 */ 3553 iwl_mvm_protect_session(mvm, vif, duration, duration, 3554 vif->bss_conf.beacon_int / 2, 3555 true); 3556 3557 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); 3558 3559 iwl_mvm_update_quotas(mvm, false, NULL); 3560 } 3561 3562 goto out; 3563 3564 out_remove_binding: 3565 iwl_mvm_binding_remove_vif(mvm, vif); 3566 iwl_mvm_power_update_mac(mvm); 3567 out: 3568 if (ret) 3569 mvmvif->phy_ctxt = NULL; 3570 return ret; 3571 } 3572 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, 3573 struct ieee80211_vif *vif, 3574 struct ieee80211_chanctx_conf *ctx) 3575 { 3576 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3577 int ret; 3578 3579 mutex_lock(&mvm->mutex); 3580 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); 3581 mutex_unlock(&mvm->mutex); 3582 3583 return ret; 3584 } 3585 3586 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, 3587 struct ieee80211_vif *vif, 3588 struct ieee80211_chanctx_conf *ctx, 3589 bool switching_chanctx) 3590 { 3591 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3592 struct ieee80211_vif *disabled_vif = NULL; 3593 3594 lockdep_assert_held(&mvm->mutex); 3595 3596 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 3597 3598 switch (vif->type) { 3599 case NL80211_IFTYPE_ADHOC: 3600 goto out; 3601 case NL80211_IFTYPE_MONITOR: 3602 mvmvif->monitor_active = false; 3603 mvmvif->ps_disabled = false; 3604 iwl_mvm_rm_snif_sta(mvm, vif); 3605 break; 3606 case NL80211_IFTYPE_AP: 3607 /* This part is triggered only during CSA */ 3608 if (!switching_chanctx || !mvmvif->ap_ibss_active) 3609 goto out; 3610 3611 mvmvif->csa_countdown = false; 3612 3613 /* Set CS bit on all the stations */ 3614 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); 3615 3616 /* Save blocked iface, the timeout is set on the next beacon */ 3617 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); 3618 3619 mvmvif->ap_ibss_active = false; 3620 break; 3621 case NL80211_IFTYPE_STATION: 3622 if (!switching_chanctx) 3623 break; 3624 3625 disabled_vif = vif; 3626 3627 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); 3628 break; 3629 default: 3630 break; 3631 } 3632 3633 iwl_mvm_update_quotas(mvm, false, disabled_vif); 3634 iwl_mvm_binding_remove_vif(mvm, vif); 3635 3636 out: 3637 mvmvif->phy_ctxt = NULL; 3638 iwl_mvm_power_update_mac(mvm); 3639 } 3640 3641 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, 3642 struct ieee80211_vif *vif, 3643 struct ieee80211_chanctx_conf *ctx) 3644 { 3645 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3646 3647 mutex_lock(&mvm->mutex); 3648 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); 3649 mutex_unlock(&mvm->mutex); 3650 } 3651 3652 static int 3653 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, 3654 struct ieee80211_vif_chanctx_switch *vifs) 3655 { 3656 int ret; 3657 3658 mutex_lock(&mvm->mutex); 3659 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 3660 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); 3661 3662 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); 3663 if (ret) { 3664 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); 3665 goto out_reassign; 3666 } 3667 3668 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 3669 true); 3670 if (ret) { 3671 IWL_ERR(mvm, 3672 "failed to assign new_ctx during channel switch\n"); 3673 goto out_remove; 3674 } 3675 3676 /* we don't support TDLS during DCM - can be caused by channel switch */ 3677 if (iwl_mvm_phy_ctx_count(mvm) > 1) 3678 iwl_mvm_teardown_tdls_peers(mvm); 3679 3680 goto out; 3681 3682 out_remove: 3683 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); 3684 3685 out_reassign: 3686 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { 3687 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); 3688 goto out_restart; 3689 } 3690 3691 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 3692 true)) { 3693 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 3694 goto out_restart; 3695 } 3696 3697 goto out; 3698 3699 out_restart: 3700 /* things keep failing, better restart the hw */ 3701 iwl_mvm_nic_restart(mvm, false); 3702 3703 out: 3704 mutex_unlock(&mvm->mutex); 3705 3706 return ret; 3707 } 3708 3709 static int 3710 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, 3711 struct ieee80211_vif_chanctx_switch *vifs) 3712 { 3713 int ret; 3714 3715 mutex_lock(&mvm->mutex); 3716 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 3717 3718 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 3719 true); 3720 if (ret) { 3721 IWL_ERR(mvm, 3722 "failed to assign new_ctx during channel switch\n"); 3723 goto out_reassign; 3724 } 3725 3726 goto out; 3727 3728 out_reassign: 3729 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 3730 true)) { 3731 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 3732 goto out_restart; 3733 } 3734 3735 goto out; 3736 3737 out_restart: 3738 /* things keep failing, better restart the hw */ 3739 iwl_mvm_nic_restart(mvm, false); 3740 3741 out: 3742 mutex_unlock(&mvm->mutex); 3743 3744 return ret; 3745 } 3746 3747 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, 3748 struct ieee80211_vif_chanctx_switch *vifs, 3749 int n_vifs, 3750 enum ieee80211_chanctx_switch_mode mode) 3751 { 3752 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3753 int ret; 3754 3755 /* we only support a single-vif right now */ 3756 if (n_vifs > 1) 3757 return -EOPNOTSUPP; 3758 3759 switch (mode) { 3760 case CHANCTX_SWMODE_SWAP_CONTEXTS: 3761 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); 3762 break; 3763 case CHANCTX_SWMODE_REASSIGN_VIF: 3764 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); 3765 break; 3766 default: 3767 ret = -EOPNOTSUPP; 3768 break; 3769 } 3770 3771 return ret; 3772 } 3773 3774 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) 3775 { 3776 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3777 3778 return mvm->ibss_manager; 3779 } 3780 3781 static int iwl_mvm_set_tim(struct ieee80211_hw *hw, 3782 struct ieee80211_sta *sta, 3783 bool set) 3784 { 3785 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3786 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3787 3788 if (!mvm_sta || !mvm_sta->vif) { 3789 IWL_ERR(mvm, "Station is not associated to a vif\n"); 3790 return -EINVAL; 3791 } 3792 3793 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 3794 } 3795 3796 #ifdef CONFIG_NL80211_TESTMODE 3797 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { 3798 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, 3799 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, 3800 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, 3801 }; 3802 3803 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, 3804 struct ieee80211_vif *vif, 3805 void *data, int len) 3806 { 3807 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; 3808 int err; 3809 u32 noa_duration; 3810 3811 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, 3812 NULL); 3813 if (err) 3814 return err; 3815 3816 if (!tb[IWL_MVM_TM_ATTR_CMD]) 3817 return -EINVAL; 3818 3819 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { 3820 case IWL_MVM_TM_CMD_SET_NOA: 3821 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || 3822 !vif->bss_conf.enable_beacon || 3823 !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) 3824 return -EINVAL; 3825 3826 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); 3827 if (noa_duration >= vif->bss_conf.beacon_int) 3828 return -EINVAL; 3829 3830 mvm->noa_duration = noa_duration; 3831 mvm->noa_vif = vif; 3832 3833 return iwl_mvm_update_quotas(mvm, false, NULL); 3834 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 3835 /* must be associated client vif - ignore authorized */ 3836 if (!vif || vif->type != NL80211_IFTYPE_STATION || 3837 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || 3838 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) 3839 return -EINVAL; 3840 3841 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 3842 return iwl_mvm_enable_beacon_filter(mvm, vif, 0); 3843 return iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3844 } 3845 3846 return -EOPNOTSUPP; 3847 } 3848 3849 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, 3850 struct ieee80211_vif *vif, 3851 void *data, int len) 3852 { 3853 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3854 int err; 3855 3856 mutex_lock(&mvm->mutex); 3857 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); 3858 mutex_unlock(&mvm->mutex); 3859 3860 return err; 3861 } 3862 #endif 3863 3864 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, 3865 struct ieee80211_vif *vif, 3866 struct ieee80211_channel_switch *chsw) 3867 { 3868 /* By implementing this operation, we prevent mac80211 from 3869 * starting its own channel switch timer, so that we can call 3870 * ieee80211_chswitch_done() ourselves at the right time 3871 * (which is when the absence time event starts). 3872 */ 3873 3874 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), 3875 "dummy channel switch op\n"); 3876 } 3877 3878 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, 3879 struct ieee80211_vif *vif, 3880 struct ieee80211_channel_switch *chsw) 3881 { 3882 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3883 struct ieee80211_vif *csa_vif; 3884 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3885 u32 apply_time; 3886 int ret; 3887 3888 mutex_lock(&mvm->mutex); 3889 3890 mvmvif->csa_failed = false; 3891 3892 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", 3893 chsw->chandef.center_freq1); 3894 3895 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH); 3896 3897 switch (vif->type) { 3898 case NL80211_IFTYPE_AP: 3899 csa_vif = 3900 rcu_dereference_protected(mvm->csa_vif, 3901 lockdep_is_held(&mvm->mutex)); 3902 if (WARN_ONCE(csa_vif && csa_vif->csa_active, 3903 "Another CSA is already in progress")) { 3904 ret = -EBUSY; 3905 goto out_unlock; 3906 } 3907 3908 /* we still didn't unblock tx. prevent new CS meanwhile */ 3909 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, 3910 lockdep_is_held(&mvm->mutex))) { 3911 ret = -EBUSY; 3912 goto out_unlock; 3913 } 3914 3915 rcu_assign_pointer(mvm->csa_vif, vif); 3916 3917 if (WARN_ONCE(mvmvif->csa_countdown, 3918 "Previous CSA countdown didn't complete")) { 3919 ret = -EBUSY; 3920 goto out_unlock; 3921 } 3922 3923 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; 3924 3925 break; 3926 case NL80211_IFTYPE_STATION: 3927 if (mvmvif->lqm_active) 3928 iwl_mvm_send_lqm_cmd(vif, 3929 LQM_CMD_OPERATION_STOP_MEASUREMENT, 3930 0, 0); 3931 3932 /* Schedule the time event to a bit before beacon 1, 3933 * to make sure we're in the new channel when the 3934 * GO/AP arrives. 3935 */ 3936 apply_time = chsw->device_timestamp + 3937 ((vif->bss_conf.beacon_int * (chsw->count - 1) - 3938 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); 3939 3940 if (chsw->block_tx) 3941 iwl_mvm_csa_client_absent(mvm, vif); 3942 3943 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, 3944 apply_time); 3945 if (mvmvif->bf_data.bf_enabled) { 3946 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3947 if (ret) 3948 goto out_unlock; 3949 } 3950 3951 break; 3952 default: 3953 break; 3954 } 3955 3956 mvmvif->ps_disabled = true; 3957 3958 ret = iwl_mvm_power_update_ps(mvm); 3959 if (ret) 3960 goto out_unlock; 3961 3962 /* we won't be on this channel any longer */ 3963 iwl_mvm_teardown_tdls_peers(mvm); 3964 3965 out_unlock: 3966 mutex_unlock(&mvm->mutex); 3967 3968 return ret; 3969 } 3970 3971 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, 3972 struct ieee80211_vif *vif) 3973 { 3974 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3975 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3976 int ret; 3977 3978 mutex_lock(&mvm->mutex); 3979 3980 if (mvmvif->csa_failed) { 3981 mvmvif->csa_failed = false; 3982 ret = -EIO; 3983 goto out_unlock; 3984 } 3985 3986 if (vif->type == NL80211_IFTYPE_STATION) { 3987 struct iwl_mvm_sta *mvmsta; 3988 3989 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, 3990 mvmvif->ap_sta_id); 3991 3992 if (WARN_ON(!mvmsta)) { 3993 ret = -EIO; 3994 goto out_unlock; 3995 } 3996 3997 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 3998 3999 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 4000 4001 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 4002 if (ret) 4003 goto out_unlock; 4004 4005 iwl_mvm_stop_session_protection(mvm, vif); 4006 } 4007 4008 mvmvif->ps_disabled = false; 4009 4010 ret = iwl_mvm_power_update_ps(mvm); 4011 4012 out_unlock: 4013 mutex_unlock(&mvm->mutex); 4014 4015 return ret; 4016 } 4017 4018 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 4019 struct ieee80211_vif *vif, u32 queues, bool drop) 4020 { 4021 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4022 struct iwl_mvm_vif *mvmvif; 4023 struct iwl_mvm_sta *mvmsta; 4024 struct ieee80211_sta *sta; 4025 int i; 4026 u32 msk = 0; 4027 4028 if (!vif || vif->type != NL80211_IFTYPE_STATION) 4029 return; 4030 4031 /* Make sure we're done with the deferred traffic before flushing */ 4032 if (iwl_mvm_is_dqa_supported(mvm)) 4033 flush_work(&mvm->add_stream_wk); 4034 4035 mutex_lock(&mvm->mutex); 4036 mvmvif = iwl_mvm_vif_from_mac80211(vif); 4037 4038 /* flush the AP-station and all TDLS peers */ 4039 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 4040 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4041 lockdep_is_held(&mvm->mutex)); 4042 if (IS_ERR_OR_NULL(sta)) 4043 continue; 4044 4045 mvmsta = iwl_mvm_sta_from_mac80211(sta); 4046 if (mvmsta->vif != vif) 4047 continue; 4048 4049 /* make sure only TDLS peers or the AP are flushed */ 4050 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); 4051 4052 if (drop) { 4053 if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0)) 4054 IWL_ERR(mvm, "flush request fail\n"); 4055 } else { 4056 msk |= mvmsta->tfd_queue_msk; 4057 if (iwl_mvm_has_new_tx_api(mvm)) 4058 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); 4059 } 4060 } 4061 4062 mutex_unlock(&mvm->mutex); 4063 4064 /* this can take a while, and we may need/want other operations 4065 * to succeed while doing this, so do it without the mutex held 4066 */ 4067 if (!drop && !iwl_mvm_has_new_tx_api(mvm)) 4068 iwl_trans_wait_tx_queues_empty(mvm->trans, msk); 4069 } 4070 4071 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, 4072 struct survey_info *survey) 4073 { 4074 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4075 int ret; 4076 4077 memset(survey, 0, sizeof(*survey)); 4078 4079 /* only support global statistics right now */ 4080 if (idx != 0) 4081 return -ENOENT; 4082 4083 if (!fw_has_capa(&mvm->fw->ucode_capa, 4084 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 4085 return -ENOENT; 4086 4087 mutex_lock(&mvm->mutex); 4088 4089 if (iwl_mvm_firmware_running(mvm)) { 4090 ret = iwl_mvm_request_statistics(mvm, false); 4091 if (ret) 4092 goto out; 4093 } 4094 4095 survey->filled = SURVEY_INFO_TIME | 4096 SURVEY_INFO_TIME_RX | 4097 SURVEY_INFO_TIME_TX | 4098 SURVEY_INFO_TIME_SCAN; 4099 survey->time = mvm->accu_radio_stats.on_time_rf + 4100 mvm->radio_stats.on_time_rf; 4101 do_div(survey->time, USEC_PER_MSEC); 4102 4103 survey->time_rx = mvm->accu_radio_stats.rx_time + 4104 mvm->radio_stats.rx_time; 4105 do_div(survey->time_rx, USEC_PER_MSEC); 4106 4107 survey->time_tx = mvm->accu_radio_stats.tx_time + 4108 mvm->radio_stats.tx_time; 4109 do_div(survey->time_tx, USEC_PER_MSEC); 4110 4111 survey->time_scan = mvm->accu_radio_stats.on_time_scan + 4112 mvm->radio_stats.on_time_scan; 4113 do_div(survey->time_scan, USEC_PER_MSEC); 4114 4115 ret = 0; 4116 out: 4117 mutex_unlock(&mvm->mutex); 4118 return ret; 4119 } 4120 4121 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, 4122 struct ieee80211_vif *vif, 4123 struct ieee80211_sta *sta, 4124 struct station_info *sinfo) 4125 { 4126 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4127 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4128 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 4129 4130 if (mvmsta->avg_energy) { 4131 sinfo->signal_avg = mvmsta->avg_energy; 4132 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); 4133 } 4134 4135 if (!fw_has_capa(&mvm->fw->ucode_capa, 4136 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 4137 return; 4138 4139 /* if beacon filtering isn't on mac80211 does it anyway */ 4140 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 4141 return; 4142 4143 if (!vif->bss_conf.assoc) 4144 return; 4145 4146 mutex_lock(&mvm->mutex); 4147 4148 if (mvmvif->ap_sta_id != mvmsta->sta_id) 4149 goto unlock; 4150 4151 if (iwl_mvm_request_statistics(mvm, false)) 4152 goto unlock; 4153 4154 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + 4155 mvmvif->beacon_stats.accu_num_beacons; 4156 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); 4157 if (mvmvif->beacon_stats.avg_signal) { 4158 /* firmware only reports a value after RXing a few beacons */ 4159 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; 4160 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 4161 } 4162 unlock: 4163 mutex_unlock(&mvm->mutex); 4164 } 4165 4166 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, 4167 struct ieee80211_vif *vif, 4168 const struct ieee80211_event *event) 4169 { 4170 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ 4171 do { \ 4172 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ 4173 break; \ 4174 iwl_mvm_fw_dbg_collect_trig(mvm, trig, _fmt); \ 4175 } while (0) 4176 4177 struct iwl_fw_dbg_trigger_tlv *trig; 4178 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 4179 4180 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) 4181 return; 4182 4183 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); 4184 trig_mlme = (void *)trig->data; 4185 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 4186 return; 4187 4188 if (event->u.mlme.data == ASSOC_EVENT) { 4189 if (event->u.mlme.status == MLME_DENIED) 4190 CHECK_MLME_TRIGGER(stop_assoc_denied, 4191 "DENIED ASSOC: reason %d", 4192 event->u.mlme.reason); 4193 else if (event->u.mlme.status == MLME_TIMEOUT) 4194 CHECK_MLME_TRIGGER(stop_assoc_timeout, 4195 "ASSOC TIMEOUT"); 4196 } else if (event->u.mlme.data == AUTH_EVENT) { 4197 if (event->u.mlme.status == MLME_DENIED) 4198 CHECK_MLME_TRIGGER(stop_auth_denied, 4199 "DENIED AUTH: reason %d", 4200 event->u.mlme.reason); 4201 else if (event->u.mlme.status == MLME_TIMEOUT) 4202 CHECK_MLME_TRIGGER(stop_auth_timeout, 4203 "AUTH TIMEOUT"); 4204 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { 4205 CHECK_MLME_TRIGGER(stop_rx_deauth, 4206 "DEAUTH RX %d", event->u.mlme.reason); 4207 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { 4208 CHECK_MLME_TRIGGER(stop_tx_deauth, 4209 "DEAUTH TX %d", event->u.mlme.reason); 4210 } 4211 #undef CHECK_MLME_TRIGGER 4212 } 4213 4214 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, 4215 struct ieee80211_vif *vif, 4216 const struct ieee80211_event *event) 4217 { 4218 struct iwl_fw_dbg_trigger_tlv *trig; 4219 struct iwl_fw_dbg_trigger_ba *ba_trig; 4220 4221 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) 4222 return; 4223 4224 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); 4225 ba_trig = (void *)trig->data; 4226 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 4227 return; 4228 4229 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) 4230 return; 4231 4232 iwl_mvm_fw_dbg_collect_trig(mvm, trig, 4233 "BAR received from %pM, tid %d, ssn %d", 4234 event->u.ba.sta->addr, event->u.ba.tid, 4235 event->u.ba.ssn); 4236 } 4237 4238 static void 4239 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, 4240 struct ieee80211_vif *vif, 4241 const struct ieee80211_event *event) 4242 { 4243 struct iwl_fw_dbg_trigger_tlv *trig; 4244 struct iwl_fw_dbg_trigger_ba *ba_trig; 4245 4246 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) 4247 return; 4248 4249 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); 4250 ba_trig = (void *)trig->data; 4251 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 4252 return; 4253 4254 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) 4255 return; 4256 4257 iwl_mvm_fw_dbg_collect_trig(mvm, trig, 4258 "Frame from %pM timed out, tid %d", 4259 event->u.ba.sta->addr, event->u.ba.tid); 4260 } 4261 4262 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, 4263 struct ieee80211_vif *vif, 4264 const struct ieee80211_event *event) 4265 { 4266 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4267 4268 switch (event->type) { 4269 case MLME_EVENT: 4270 iwl_mvm_event_mlme_callback(mvm, vif, event); 4271 break; 4272 case BAR_RX_EVENT: 4273 iwl_mvm_event_bar_rx_callback(mvm, vif, event); 4274 break; 4275 case BA_FRAME_TIMEOUT: 4276 iwl_mvm_event_frame_timeout_callback(mvm, vif, event); 4277 break; 4278 default: 4279 break; 4280 } 4281 } 4282 4283 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, 4284 struct iwl_mvm_internal_rxq_notif *notif, 4285 u32 size) 4286 { 4287 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; 4288 int ret; 4289 4290 lockdep_assert_held(&mvm->mutex); 4291 4292 /* TODO - remove a000 disablement when we have RXQ config API */ 4293 if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm)) 4294 return; 4295 4296 notif->cookie = mvm->queue_sync_cookie; 4297 4298 if (notif->sync) 4299 atomic_set(&mvm->queue_sync_counter, 4300 mvm->trans->num_rx_queues); 4301 4302 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); 4303 if (ret) { 4304 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 4305 goto out; 4306 } 4307 4308 if (notif->sync) { 4309 ret = wait_event_timeout(mvm->rx_sync_waitq, 4310 atomic_read(&mvm->queue_sync_counter) == 0 || 4311 iwl_mvm_is_radio_killed(mvm), 4312 HZ); 4313 WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm)); 4314 } 4315 4316 out: 4317 atomic_set(&mvm->queue_sync_counter, 0); 4318 mvm->queue_sync_cookie++; 4319 } 4320 4321 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) 4322 { 4323 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4324 struct iwl_mvm_internal_rxq_notif data = { 4325 .type = IWL_MVM_RXQ_EMPTY, 4326 .sync = 1, 4327 }; 4328 4329 mutex_lock(&mvm->mutex); 4330 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); 4331 mutex_unlock(&mvm->mutex); 4332 } 4333 4334 const struct ieee80211_ops iwl_mvm_hw_ops = { 4335 .tx = iwl_mvm_mac_tx, 4336 .ampdu_action = iwl_mvm_mac_ampdu_action, 4337 .start = iwl_mvm_mac_start, 4338 .reconfig_complete = iwl_mvm_mac_reconfig_complete, 4339 .stop = iwl_mvm_mac_stop, 4340 .add_interface = iwl_mvm_mac_add_interface, 4341 .remove_interface = iwl_mvm_mac_remove_interface, 4342 .config = iwl_mvm_mac_config, 4343 .prepare_multicast = iwl_mvm_prepare_multicast, 4344 .configure_filter = iwl_mvm_configure_filter, 4345 .config_iface_filter = iwl_mvm_config_iface_filter, 4346 .bss_info_changed = iwl_mvm_bss_info_changed, 4347 .hw_scan = iwl_mvm_mac_hw_scan, 4348 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, 4349 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, 4350 .sta_state = iwl_mvm_mac_sta_state, 4351 .sta_notify = iwl_mvm_mac_sta_notify, 4352 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, 4353 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, 4354 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 4355 .sta_rc_update = iwl_mvm_sta_rc_update, 4356 .conf_tx = iwl_mvm_mac_conf_tx, 4357 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 4358 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, 4359 .flush = iwl_mvm_mac_flush, 4360 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 4361 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 4362 .set_key = iwl_mvm_mac_set_key, 4363 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 4364 .remain_on_channel = iwl_mvm_roc, 4365 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 4366 .add_chanctx = iwl_mvm_add_chanctx, 4367 .remove_chanctx = iwl_mvm_remove_chanctx, 4368 .change_chanctx = iwl_mvm_change_chanctx, 4369 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, 4370 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, 4371 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, 4372 4373 .start_ap = iwl_mvm_start_ap_ibss, 4374 .stop_ap = iwl_mvm_stop_ap_ibss, 4375 .join_ibss = iwl_mvm_start_ap_ibss, 4376 .leave_ibss = iwl_mvm_stop_ap_ibss, 4377 4378 .tx_last_beacon = iwl_mvm_tx_last_beacon, 4379 4380 .set_tim = iwl_mvm_set_tim, 4381 4382 .channel_switch = iwl_mvm_channel_switch, 4383 .pre_channel_switch = iwl_mvm_pre_channel_switch, 4384 .post_channel_switch = iwl_mvm_post_channel_switch, 4385 4386 .tdls_channel_switch = iwl_mvm_tdls_channel_switch, 4387 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, 4388 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, 4389 4390 .event_callback = iwl_mvm_mac_event_callback, 4391 4392 .sync_rx_queues = iwl_mvm_sync_rx_queues, 4393 4394 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 4395 4396 #ifdef CONFIG_PM_SLEEP 4397 /* look at d3.c */ 4398 .suspend = iwl_mvm_suspend, 4399 .resume = iwl_mvm_resume, 4400 .set_wakeup = iwl_mvm_set_wakeup, 4401 .set_rekey_data = iwl_mvm_set_rekey_data, 4402 #if IS_ENABLED(CONFIG_IPV6) 4403 .ipv6_addr_change = iwl_mvm_ipv6_addr_change, 4404 #endif 4405 .set_default_unicast_key = iwl_mvm_set_default_unicast_key, 4406 #endif 4407 .get_survey = iwl_mvm_mac_get_survey, 4408 .sta_statistics = iwl_mvm_mac_sta_statistics, 4409 }; 4410