1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * The full GNU General Public License is included in this distribution 23 * in the file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 65 #include <linux/etherdevice.h> 66 #include <linux/ip.h> 67 #include <linux/fs.h> 68 #include <net/cfg80211.h> 69 #include <net/ipv6.h> 70 #include <net/tcp.h> 71 #include <net/addrconf.h> 72 #include "iwl-modparams.h" 73 #include "fw-api.h" 74 #include "mvm.h" 75 76 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, 77 struct ieee80211_vif *vif, 78 struct cfg80211_gtk_rekey_data *data) 79 { 80 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 81 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 82 83 if (iwlwifi_mod_params.swcrypto) 84 return; 85 86 mutex_lock(&mvm->mutex); 87 88 memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); 89 memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); 90 mvmvif->rekey_data.replay_ctr = 91 cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); 92 mvmvif->rekey_data.valid = true; 93 94 mutex_unlock(&mvm->mutex); 95 } 96 97 #if IS_ENABLED(CONFIG_IPV6) 98 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, 99 struct ieee80211_vif *vif, 100 struct inet6_dev *idev) 101 { 102 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 103 struct inet6_ifaddr *ifa; 104 int idx = 0; 105 106 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); 107 108 read_lock_bh(&idev->lock); 109 list_for_each_entry(ifa, &idev->addr_list, if_list) { 110 mvmvif->target_ipv6_addrs[idx] = ifa->addr; 111 if (ifa->flags & IFA_F_TENTATIVE) 112 __set_bit(idx, mvmvif->tentative_addrs); 113 idx++; 114 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) 115 break; 116 } 117 read_unlock_bh(&idev->lock); 118 119 mvmvif->num_target_ipv6_addrs = idx; 120 } 121 #endif 122 123 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 124 struct ieee80211_vif *vif, int idx) 125 { 126 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 127 128 mvmvif->tx_key_idx = idx; 129 } 130 131 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) 132 { 133 int i; 134 135 for (i = 0; i < IWL_P1K_SIZE; i++) 136 out[i] = cpu_to_le16(p1k[i]); 137 } 138 139 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, 140 struct iwl_mvm_key_pn *ptk_pn, 141 struct ieee80211_key_seq *seq, 142 int tid, int queues) 143 { 144 const u8 *ret = seq->ccmp.pn; 145 int i; 146 147 /* get the PN from mac80211, used on the default queue */ 148 ieee80211_get_key_rx_seq(key, tid, seq); 149 150 /* and use the internal data for the other queues */ 151 for (i = 1; i < queues; i++) { 152 const u8 *tmp = ptk_pn->q[i].pn[tid]; 153 154 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) 155 ret = tmp; 156 } 157 158 return ret; 159 } 160 161 struct wowlan_key_data { 162 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; 163 struct iwl_wowlan_tkip_params_cmd *tkip; 164 bool error, use_rsc_tsc, use_tkip, configure_keys; 165 int wep_key_idx; 166 }; 167 168 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, 169 struct ieee80211_vif *vif, 170 struct ieee80211_sta *sta, 171 struct ieee80211_key_conf *key, 172 void *_data) 173 { 174 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 175 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 176 struct wowlan_key_data *data = _data; 177 struct aes_sc *aes_sc, *aes_tx_sc = NULL; 178 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; 179 struct iwl_p1k_cache *rx_p1ks; 180 u8 *rx_mic_key; 181 struct ieee80211_key_seq seq; 182 u32 cur_rx_iv32 = 0; 183 u16 p1k[IWL_P1K_SIZE]; 184 int ret, i; 185 186 switch (key->cipher) { 187 case WLAN_CIPHER_SUITE_WEP40: 188 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ 189 struct { 190 struct iwl_mvm_wep_key_cmd wep_key_cmd; 191 struct iwl_mvm_wep_key wep_key; 192 } __packed wkc = { 193 .wep_key_cmd.mac_id_n_color = 194 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 195 mvmvif->color)), 196 .wep_key_cmd.num_keys = 1, 197 /* firmware sets STA_KEY_FLG_WEP_13BYTES */ 198 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, 199 .wep_key.key_index = key->keyidx, 200 .wep_key.key_size = key->keylen, 201 }; 202 203 /* 204 * This will fail -- the key functions don't set support 205 * pairwise WEP keys. However, that's better than silently 206 * failing WoWLAN. Or maybe not? 207 */ 208 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 209 break; 210 211 memcpy(&wkc.wep_key.key[3], key->key, key->keylen); 212 if (key->keyidx == mvmvif->tx_key_idx) { 213 /* TX key must be at offset 0 */ 214 wkc.wep_key.key_offset = 0; 215 } else { 216 /* others start at 1 */ 217 data->wep_key_idx++; 218 wkc.wep_key.key_offset = data->wep_key_idx; 219 } 220 221 if (data->configure_keys) { 222 mutex_lock(&mvm->mutex); 223 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, 224 sizeof(wkc), &wkc); 225 data->error = ret != 0; 226 227 mvm->ptk_ivlen = key->iv_len; 228 mvm->ptk_icvlen = key->icv_len; 229 mvm->gtk_ivlen = key->iv_len; 230 mvm->gtk_icvlen = key->icv_len; 231 mutex_unlock(&mvm->mutex); 232 } 233 234 /* don't upload key again */ 235 return; 236 } 237 default: 238 data->error = true; 239 return; 240 case WLAN_CIPHER_SUITE_AES_CMAC: 241 /* 242 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them 243 * but we also shouldn't abort suspend due to that. It does have 244 * support for the IGTK key renewal, but doesn't really use the 245 * IGTK for anything. This means we could spuriously wake up or 246 * be deauthenticated, but that was considered acceptable. 247 */ 248 return; 249 case WLAN_CIPHER_SUITE_TKIP: 250 if (sta) { 251 u64 pn64; 252 253 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; 254 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; 255 256 rx_p1ks = data->tkip->rx_uni; 257 258 pn64 = atomic64_read(&key->tx_pn); 259 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); 260 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); 261 262 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), 263 p1k); 264 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); 265 266 memcpy(data->tkip->mic_keys.tx, 267 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 268 IWL_MIC_KEY_SIZE); 269 270 rx_mic_key = data->tkip->mic_keys.rx_unicast; 271 } else { 272 tkip_sc = 273 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; 274 rx_p1ks = data->tkip->rx_multi; 275 rx_mic_key = data->tkip->mic_keys.rx_mcast; 276 } 277 278 /* 279 * For non-QoS this relies on the fact that both the uCode and 280 * mac80211 use TID 0 (as they need to to avoid replay attacks) 281 * for checking the IV in the frames. 282 */ 283 for (i = 0; i < IWL_NUM_RSC; i++) { 284 ieee80211_get_key_rx_seq(key, i, &seq); 285 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); 286 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); 287 /* wrapping isn't allowed, AP must rekey */ 288 if (seq.tkip.iv32 > cur_rx_iv32) 289 cur_rx_iv32 = seq.tkip.iv32; 290 } 291 292 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 293 cur_rx_iv32, p1k); 294 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); 295 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 296 cur_rx_iv32 + 1, p1k); 297 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); 298 299 memcpy(rx_mic_key, 300 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 301 IWL_MIC_KEY_SIZE); 302 303 data->use_tkip = true; 304 data->use_rsc_tsc = true; 305 break; 306 case WLAN_CIPHER_SUITE_CCMP: 307 if (sta) { 308 u64 pn64; 309 310 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; 311 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; 312 313 pn64 = atomic64_read(&key->tx_pn); 314 aes_tx_sc->pn = cpu_to_le64(pn64); 315 } else { 316 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; 317 } 318 319 /* 320 * For non-QoS this relies on the fact that both the uCode and 321 * mac80211/our RX code use TID 0 for checking the PN. 322 */ 323 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 324 struct iwl_mvm_sta *mvmsta; 325 struct iwl_mvm_key_pn *ptk_pn; 326 const u8 *pn; 327 328 mvmsta = iwl_mvm_sta_from_mac80211(sta); 329 ptk_pn = rcu_dereference_protected( 330 mvmsta->ptk_pn[key->keyidx], 331 lockdep_is_held(&mvm->mutex)); 332 if (WARN_ON(!ptk_pn)) 333 break; 334 335 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 336 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, 337 mvm->trans->num_rx_queues); 338 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 339 ((u64)pn[4] << 8) | 340 ((u64)pn[3] << 16) | 341 ((u64)pn[2] << 24) | 342 ((u64)pn[1] << 32) | 343 ((u64)pn[0] << 40)); 344 } 345 } else { 346 for (i = 0; i < IWL_NUM_RSC; i++) { 347 u8 *pn = seq.ccmp.pn; 348 349 ieee80211_get_key_rx_seq(key, i, &seq); 350 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 351 ((u64)pn[4] << 8) | 352 ((u64)pn[3] << 16) | 353 ((u64)pn[2] << 24) | 354 ((u64)pn[1] << 32) | 355 ((u64)pn[0] << 40)); 356 } 357 } 358 data->use_rsc_tsc = true; 359 break; 360 } 361 362 if (data->configure_keys) { 363 mutex_lock(&mvm->mutex); 364 /* 365 * The D3 firmware hardcodes the key offset 0 as the key it 366 * uses to transmit packets to the AP, i.e. the PTK. 367 */ 368 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 369 mvm->ptk_ivlen = key->iv_len; 370 mvm->ptk_icvlen = key->icv_len; 371 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); 372 } else { 373 /* 374 * firmware only supports TSC/RSC for a single key, 375 * so if there are multiple keep overwriting them 376 * with new ones -- this relies on mac80211 doing 377 * list_add_tail(). 378 */ 379 mvm->gtk_ivlen = key->iv_len; 380 mvm->gtk_icvlen = key->icv_len; 381 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); 382 } 383 mutex_unlock(&mvm->mutex); 384 data->error = ret != 0; 385 } 386 } 387 388 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, 389 struct cfg80211_wowlan *wowlan) 390 { 391 struct iwl_wowlan_patterns_cmd *pattern_cmd; 392 struct iwl_host_cmd cmd = { 393 .id = WOWLAN_PATTERNS, 394 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 395 }; 396 int i, err; 397 398 if (!wowlan->n_patterns) 399 return 0; 400 401 cmd.len[0] = sizeof(*pattern_cmd) + 402 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern); 403 404 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); 405 if (!pattern_cmd) 406 return -ENOMEM; 407 408 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); 409 410 for (i = 0; i < wowlan->n_patterns; i++) { 411 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 412 413 memcpy(&pattern_cmd->patterns[i].mask, 414 wowlan->patterns[i].mask, mask_len); 415 memcpy(&pattern_cmd->patterns[i].pattern, 416 wowlan->patterns[i].pattern, 417 wowlan->patterns[i].pattern_len); 418 pattern_cmd->patterns[i].mask_size = mask_len; 419 pattern_cmd->patterns[i].pattern_size = 420 wowlan->patterns[i].pattern_len; 421 } 422 423 cmd.data[0] = pattern_cmd; 424 err = iwl_mvm_send_cmd(mvm, &cmd); 425 kfree(pattern_cmd); 426 return err; 427 } 428 429 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 430 struct ieee80211_sta *ap_sta) 431 { 432 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 433 struct ieee80211_chanctx_conf *ctx; 434 u8 chains_static, chains_dynamic; 435 struct cfg80211_chan_def chandef; 436 int ret, i; 437 struct iwl_binding_cmd binding_cmd = {}; 438 struct iwl_time_quota_cmd quota_cmd = {}; 439 struct iwl_time_quota_data *quota; 440 u32 status; 441 int size; 442 443 if (fw_has_capa(&mvm->fw->ucode_capa, 444 IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { 445 size = sizeof(binding_cmd); 446 if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ || 447 !iwl_mvm_is_cdb_supported(mvm)) 448 binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX); 449 else 450 binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX); 451 } else { 452 size = IWL_BINDING_CMD_SIZE_V1; 453 } 454 455 /* add back the PHY */ 456 if (WARN_ON(!mvmvif->phy_ctxt)) 457 return -EINVAL; 458 459 rcu_read_lock(); 460 ctx = rcu_dereference(vif->chanctx_conf); 461 if (WARN_ON(!ctx)) { 462 rcu_read_unlock(); 463 return -EINVAL; 464 } 465 chandef = ctx->def; 466 chains_static = ctx->rx_chains_static; 467 chains_dynamic = ctx->rx_chains_dynamic; 468 rcu_read_unlock(); 469 470 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, 471 chains_static, chains_dynamic); 472 if (ret) 473 return ret; 474 475 /* add back the MAC */ 476 mvmvif->uploaded = false; 477 478 if (WARN_ON(!vif->bss_conf.assoc)) 479 return -EINVAL; 480 481 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 482 if (ret) 483 return ret; 484 485 /* add back binding - XXX refactor? */ 486 binding_cmd.id_and_color = 487 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 488 mvmvif->phy_ctxt->color)); 489 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 490 binding_cmd.phy = 491 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 492 mvmvif->phy_ctxt->color)); 493 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 494 mvmvif->color)); 495 for (i = 1; i < MAX_MACS_IN_BINDING; i++) 496 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); 497 498 status = 0; 499 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, 500 size, &binding_cmd, &status); 501 if (ret) { 502 IWL_ERR(mvm, "Failed to add binding: %d\n", ret); 503 return ret; 504 } 505 506 if (status) { 507 IWL_ERR(mvm, "Binding command failed: %u\n", status); 508 return -EIO; 509 } 510 511 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); 512 if (ret) 513 return ret; 514 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); 515 516 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 517 if (ret) 518 return ret; 519 520 /* and some quota */ 521 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); 522 quota->id_and_color = 523 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 524 mvmvif->phy_ctxt->color)); 525 quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); 526 quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); 527 528 for (i = 1; i < MAX_BINDINGS; i++) { 529 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i); 530 quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID); 531 } 532 533 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, 534 iwl_mvm_quota_cmd_size(mvm), "a_cmd); 535 if (ret) 536 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 537 538 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) 539 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); 540 541 return 0; 542 } 543 544 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, 545 struct ieee80211_vif *vif) 546 { 547 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 548 struct iwl_nonqos_seq_query_cmd query_cmd = { 549 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), 550 .mac_id_n_color = 551 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 552 mvmvif->color)), 553 }; 554 struct iwl_host_cmd cmd = { 555 .id = NON_QOS_TX_COUNTER_CMD, 556 .flags = CMD_WANT_SKB, 557 }; 558 int err; 559 u32 size; 560 561 cmd.data[0] = &query_cmd; 562 cmd.len[0] = sizeof(query_cmd); 563 564 err = iwl_mvm_send_cmd(mvm, &cmd); 565 if (err) 566 return err; 567 568 size = iwl_rx_packet_payload_len(cmd.resp_pkt); 569 if (size < sizeof(__le16)) { 570 err = -EINVAL; 571 } else { 572 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); 573 /* firmware returns next, not last-used seqno */ 574 err = (u16) (err - 0x10); 575 } 576 577 iwl_free_resp(&cmd); 578 return err; 579 } 580 581 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 582 { 583 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 584 struct iwl_nonqos_seq_query_cmd query_cmd = { 585 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), 586 .mac_id_n_color = 587 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 588 mvmvif->color)), 589 .value = cpu_to_le16(mvmvif->seqno), 590 }; 591 592 /* return if called during restart, not resume from D3 */ 593 if (!mvmvif->seqno_valid) 594 return; 595 596 mvmvif->seqno_valid = false; 597 598 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, 599 sizeof(query_cmd), &query_cmd)) 600 IWL_ERR(mvm, "failed to set non-QoS seqno\n"); 601 } 602 603 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) 604 { 605 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 606 607 iwl_mvm_stop_device(mvm); 608 /* 609 * Set the HW restart bit -- this is mostly true as we're 610 * going to load new firmware and reprogram that, though 611 * the reprogramming is going to be manual to avoid adding 612 * all the MACs that aren't support. 613 * We don't have to clear up everything though because the 614 * reprogramming is manual. When we resume, we'll actually 615 * go through a proper restart sequence again to switch 616 * back to the runtime firmware image. 617 */ 618 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 619 620 /* the fw is reset, so all the keys are cleared */ 621 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 622 623 mvm->ptk_ivlen = 0; 624 mvm->ptk_icvlen = 0; 625 mvm->ptk_ivlen = 0; 626 mvm->ptk_icvlen = 0; 627 628 return iwl_mvm_load_d3_fw(mvm); 629 } 630 631 static int 632 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, 633 struct cfg80211_wowlan *wowlan, 634 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 635 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 636 struct ieee80211_sta *ap_sta) 637 { 638 int ret; 639 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); 640 641 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ 642 643 wowlan_config_cmd->is_11n_connection = 644 ap_sta->ht_cap.ht_supported; 645 wowlan_config_cmd->flags = ENABLE_L3_FILTERING | 646 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; 647 648 /* Query the last used seqno and set it */ 649 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); 650 if (ret < 0) 651 return ret; 652 653 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); 654 655 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); 656 657 if (wowlan->disconnect) 658 wowlan_config_cmd->wakeup_filter |= 659 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 660 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 661 if (wowlan->magic_pkt) 662 wowlan_config_cmd->wakeup_filter |= 663 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); 664 if (wowlan->gtk_rekey_failure) 665 wowlan_config_cmd->wakeup_filter |= 666 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); 667 if (wowlan->eap_identity_req) 668 wowlan_config_cmd->wakeup_filter |= 669 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); 670 if (wowlan->four_way_handshake) 671 wowlan_config_cmd->wakeup_filter |= 672 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); 673 if (wowlan->n_patterns) 674 wowlan_config_cmd->wakeup_filter |= 675 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); 676 677 if (wowlan->rfkill_release) 678 wowlan_config_cmd->wakeup_filter |= 679 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 680 681 if (wowlan->tcp) { 682 /* 683 * Set the "link change" (really "link lost") flag as well 684 * since that implies losing the TCP connection. 685 */ 686 wowlan_config_cmd->wakeup_filter |= 687 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | 688 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | 689 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | 690 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 691 } 692 693 if (wowlan->any) { 694 wowlan_config_cmd->wakeup_filter |= 695 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 696 IWL_WOWLAN_WAKEUP_LINK_CHANGE | 697 IWL_WOWLAN_WAKEUP_RX_FRAME | 698 IWL_WOWLAN_WAKEUP_BCN_FILTERING); 699 } 700 701 return 0; 702 } 703 704 static void 705 iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm, 706 struct ieee80211_vif *vif, 707 void (*iter)(struct ieee80211_hw *hw, 708 struct ieee80211_vif *vif, 709 struct ieee80211_sta *sta, 710 struct ieee80211_key_conf *key, 711 void *data), 712 void *data) 713 { 714 struct ieee80211_sta *ap_sta; 715 716 rcu_read_lock(); 717 718 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]); 719 if (IS_ERR_OR_NULL(ap_sta)) 720 goto out; 721 722 ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data); 723 out: 724 rcu_read_unlock(); 725 } 726 727 int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, 728 struct ieee80211_vif *vif, 729 bool d0i3, 730 u32 cmd_flags) 731 { 732 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; 733 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 734 struct wowlan_key_data key_data = { 735 .configure_keys = !d0i3, 736 .use_rsc_tsc = false, 737 .tkip = &tkip_cmd, 738 .use_tkip = false, 739 }; 740 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 741 int ret; 742 743 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); 744 if (!key_data.rsc_tsc) 745 return -ENOMEM; 746 747 /* 748 * if we have to configure keys, call ieee80211_iter_keys(), 749 * as we need non-atomic context in order to take the 750 * required locks. 751 * for the d0i3 we can't use ieee80211_iter_keys(), as 752 * taking (almost) any mutex might result in deadlock. 753 */ 754 if (!d0i3) { 755 /* 756 * Note that currently we don't propagate cmd_flags 757 * to the iterator. In case of key_data.configure_keys, 758 * all the configured commands are SYNC, and 759 * iwl_mvm_wowlan_program_keys() will take care of 760 * locking/unlocking mvm->mutex. 761 */ 762 ieee80211_iter_keys(mvm->hw, vif, 763 iwl_mvm_wowlan_program_keys, 764 &key_data); 765 } else { 766 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, 767 iwl_mvm_wowlan_program_keys, 768 &key_data); 769 } 770 771 if (key_data.error) { 772 ret = -EIO; 773 goto out; 774 } 775 776 if (key_data.use_rsc_tsc) { 777 ret = iwl_mvm_send_cmd_pdu(mvm, 778 WOWLAN_TSC_RSC_PARAM, cmd_flags, 779 sizeof(*key_data.rsc_tsc), 780 key_data.rsc_tsc); 781 if (ret) 782 goto out; 783 } 784 785 if (key_data.use_tkip && 786 !fw_has_api(&mvm->fw->ucode_capa, 787 IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { 788 ret = iwl_mvm_send_cmd_pdu(mvm, 789 WOWLAN_TKIP_PARAM, 790 cmd_flags, sizeof(tkip_cmd), 791 &tkip_cmd); 792 if (ret) 793 goto out; 794 } 795 796 /* configure rekey data only if offloaded rekey is supported (d3) */ 797 if (mvmvif->rekey_data.valid && !d0i3) { 798 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); 799 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, 800 NL80211_KCK_LEN); 801 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); 802 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, 803 NL80211_KEK_LEN); 804 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); 805 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; 806 807 ret = iwl_mvm_send_cmd_pdu(mvm, 808 WOWLAN_KEK_KCK_MATERIAL, cmd_flags, 809 sizeof(kek_kck_cmd), 810 &kek_kck_cmd); 811 if (ret) 812 goto out; 813 } 814 ret = 0; 815 out: 816 kfree(key_data.rsc_tsc); 817 return ret; 818 } 819 820 static int 821 iwl_mvm_wowlan_config(struct iwl_mvm *mvm, 822 struct cfg80211_wowlan *wowlan, 823 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 824 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 825 struct ieee80211_sta *ap_sta) 826 { 827 int ret; 828 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 829 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 830 831 if (!unified_image) { 832 ret = iwl_mvm_switch_to_d3(mvm); 833 if (ret) 834 return ret; 835 836 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); 837 if (ret) 838 return ret; 839 } 840 841 if (!iwlwifi_mod_params.swcrypto) { 842 /* 843 * This needs to be unlocked due to lock ordering 844 * constraints. Since we're in the suspend path 845 * that isn't really a problem though. 846 */ 847 mutex_unlock(&mvm->mutex); 848 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false, 849 CMD_ASYNC); 850 mutex_lock(&mvm->mutex); 851 if (ret) 852 return ret; 853 } 854 855 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, 856 sizeof(*wowlan_config_cmd), 857 wowlan_config_cmd); 858 if (ret) 859 return ret; 860 861 ret = iwl_mvm_send_patterns(mvm, wowlan); 862 if (ret) 863 return ret; 864 865 return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); 866 } 867 868 static int 869 iwl_mvm_netdetect_config(struct iwl_mvm *mvm, 870 struct cfg80211_wowlan *wowlan, 871 struct cfg80211_sched_scan_request *nd_config, 872 struct ieee80211_vif *vif) 873 { 874 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 875 int ret; 876 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 877 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 878 879 if (!unified_image) { 880 ret = iwl_mvm_switch_to_d3(mvm); 881 if (ret) 882 return ret; 883 } else { 884 /* In theory, we wouldn't have to stop a running sched 885 * scan in order to start another one (for 886 * net-detect). But in practice this doesn't seem to 887 * work properly, so stop any running sched_scan now. 888 */ 889 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 890 if (ret) 891 return ret; 892 } 893 894 /* rfkill release can be either for wowlan or netdetect */ 895 if (wowlan->rfkill_release) 896 wowlan_config_cmd.wakeup_filter |= 897 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 898 899 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, 900 sizeof(wowlan_config_cmd), 901 &wowlan_config_cmd); 902 if (ret) 903 return ret; 904 905 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, 906 IWL_MVM_SCAN_NETDETECT); 907 if (ret) 908 return ret; 909 910 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) 911 return -EBUSY; 912 913 /* save the sched scan matchsets... */ 914 if (nd_config->n_match_sets) { 915 mvm->nd_match_sets = kmemdup(nd_config->match_sets, 916 sizeof(*nd_config->match_sets) * 917 nd_config->n_match_sets, 918 GFP_KERNEL); 919 if (mvm->nd_match_sets) 920 mvm->n_nd_match_sets = nd_config->n_match_sets; 921 } 922 923 /* ...and the sched scan channels for later reporting */ 924 mvm->nd_channels = kmemdup(nd_config->channels, 925 sizeof(*nd_config->channels) * 926 nd_config->n_channels, 927 GFP_KERNEL); 928 if (mvm->nd_channels) 929 mvm->n_nd_channels = nd_config->n_channels; 930 931 return 0; 932 } 933 934 static void iwl_mvm_free_nd(struct iwl_mvm *mvm) 935 { 936 kfree(mvm->nd_match_sets); 937 mvm->nd_match_sets = NULL; 938 mvm->n_nd_match_sets = 0; 939 kfree(mvm->nd_channels); 940 mvm->nd_channels = NULL; 941 mvm->n_nd_channels = 0; 942 } 943 944 static int __iwl_mvm_suspend(struct ieee80211_hw *hw, 945 struct cfg80211_wowlan *wowlan, 946 bool test) 947 { 948 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 949 struct ieee80211_vif *vif = NULL; 950 struct iwl_mvm_vif *mvmvif = NULL; 951 struct ieee80211_sta *ap_sta = NULL; 952 struct iwl_d3_manager_config d3_cfg_cmd_data = { 953 /* 954 * Program the minimum sleep time to 10 seconds, as many 955 * platforms have issues processing a wakeup signal while 956 * still being in the process of suspending. 957 */ 958 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), 959 }; 960 struct iwl_host_cmd d3_cfg_cmd = { 961 .id = D3_CONFIG_CMD, 962 .flags = CMD_WANT_SKB, 963 .data[0] = &d3_cfg_cmd_data, 964 .len[0] = sizeof(d3_cfg_cmd_data), 965 }; 966 int ret; 967 int len __maybe_unused; 968 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 969 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 970 971 if (!wowlan) { 972 /* 973 * mac80211 shouldn't get here, but for D3 test 974 * it doesn't warrant a warning 975 */ 976 WARN_ON(!test); 977 return -EINVAL; 978 } 979 980 mutex_lock(&mvm->mutex); 981 982 vif = iwl_mvm_get_bss_vif(mvm); 983 if (IS_ERR_OR_NULL(vif)) { 984 ret = 1; 985 goto out_noreset; 986 } 987 988 mvmvif = iwl_mvm_vif_from_mac80211(vif); 989 990 if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { 991 /* if we're not associated, this must be netdetect */ 992 if (!wowlan->nd_config) { 993 ret = 1; 994 goto out_noreset; 995 } 996 997 ret = iwl_mvm_netdetect_config( 998 mvm, wowlan, wowlan->nd_config, vif); 999 if (ret) 1000 goto out; 1001 1002 mvm->net_detect = true; 1003 } else { 1004 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 1005 1006 ap_sta = rcu_dereference_protected( 1007 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], 1008 lockdep_is_held(&mvm->mutex)); 1009 if (IS_ERR_OR_NULL(ap_sta)) { 1010 ret = -EINVAL; 1011 goto out_noreset; 1012 } 1013 1014 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1015 vif, mvmvif, ap_sta); 1016 if (ret) 1017 goto out_noreset; 1018 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1019 vif, mvmvif, ap_sta); 1020 if (ret) 1021 goto out; 1022 1023 mvm->net_detect = false; 1024 } 1025 1026 ret = iwl_mvm_power_update_device(mvm); 1027 if (ret) 1028 goto out; 1029 1030 ret = iwl_mvm_power_update_mac(mvm); 1031 if (ret) 1032 goto out; 1033 1034 #ifdef CONFIG_IWLWIFI_DEBUGFS 1035 if (mvm->d3_wake_sysassert) 1036 d3_cfg_cmd_data.wakeup_flags |= 1037 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); 1038 #endif 1039 1040 /* 1041 * TODO: this is needed because the firmware is not stopping 1042 * the recording automatically before entering D3. This can 1043 * be removed once the FW starts doing that. 1044 */ 1045 iwl_fw_dbg_stop_recording(&mvm->fwrt); 1046 1047 /* must be last -- this switches firmware state */ 1048 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); 1049 if (ret) 1050 goto out; 1051 #ifdef CONFIG_IWLWIFI_DEBUGFS 1052 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); 1053 if (len >= sizeof(u32)) { 1054 mvm->d3_test_pme_ptr = 1055 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); 1056 } 1057 #endif 1058 iwl_free_resp(&d3_cfg_cmd); 1059 1060 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1061 1062 iwl_trans_d3_suspend(mvm->trans, test, !unified_image); 1063 out: 1064 if (ret < 0) { 1065 iwl_mvm_free_nd(mvm); 1066 1067 if (!unified_image) { 1068 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1069 if (mvm->fw_restart > 0) { 1070 mvm->fw_restart--; 1071 ieee80211_restart_hw(mvm->hw); 1072 } 1073 } 1074 } 1075 out_noreset: 1076 mutex_unlock(&mvm->mutex); 1077 1078 return ret; 1079 } 1080 1081 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm) 1082 { 1083 struct iwl_notification_wait wait_d3; 1084 static const u16 d3_notif[] = { D3_CONFIG_CMD }; 1085 int ret; 1086 1087 iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, 1088 d3_notif, ARRAY_SIZE(d3_notif), 1089 NULL, NULL); 1090 1091 ret = iwl_mvm_enter_d0i3(mvm->hw->priv); 1092 if (ret) 1093 goto remove_notif; 1094 1095 ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ); 1096 WARN_ON_ONCE(ret); 1097 return ret; 1098 1099 remove_notif: 1100 iwl_remove_notification(&mvm->notif_wait, &wait_d3); 1101 return ret; 1102 } 1103 1104 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 1105 { 1106 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1107 struct iwl_trans *trans = mvm->trans; 1108 int ret; 1109 1110 /* make sure the d0i3 exit work is not pending */ 1111 flush_work(&mvm->d0i3_exit_work); 1112 iwl_mvm_pause_tcm(mvm, true); 1113 1114 iwl_fw_runtime_suspend(&mvm->fwrt); 1115 1116 ret = iwl_trans_suspend(trans); 1117 if (ret) 1118 return ret; 1119 1120 if (wowlan->any) { 1121 trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; 1122 1123 if (iwl_mvm_enter_d0i3_on_suspend(mvm)) { 1124 ret = iwl_mvm_enter_d0i3_sync(mvm); 1125 1126 if (ret) 1127 return ret; 1128 } 1129 1130 mutex_lock(&mvm->d0i3_suspend_mutex); 1131 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); 1132 mutex_unlock(&mvm->d0i3_suspend_mutex); 1133 1134 iwl_trans_d3_suspend(trans, false, false); 1135 1136 return 0; 1137 } 1138 1139 trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 1140 1141 return __iwl_mvm_suspend(hw, wowlan, false); 1142 } 1143 1144 /* converted data from the different status responses */ 1145 struct iwl_wowlan_status_data { 1146 u16 pattern_number; 1147 u16 qos_seq_ctr[8]; 1148 u32 wakeup_reasons; 1149 u32 wake_packet_length; 1150 u32 wake_packet_bufsize; 1151 const u8 *wake_packet; 1152 }; 1153 1154 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, 1155 struct ieee80211_vif *vif, 1156 struct iwl_wowlan_status_data *status) 1157 { 1158 struct sk_buff *pkt = NULL; 1159 struct cfg80211_wowlan_wakeup wakeup = { 1160 .pattern_idx = -1, 1161 }; 1162 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1163 u32 reasons = status->wakeup_reasons; 1164 1165 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { 1166 wakeup_report = NULL; 1167 goto report; 1168 } 1169 1170 pm_wakeup_event(mvm->dev, 0); 1171 1172 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) 1173 wakeup.magic_pkt = true; 1174 1175 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) 1176 wakeup.pattern_idx = 1177 status->pattern_number; 1178 1179 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1180 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) 1181 wakeup.disconnect = true; 1182 1183 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) 1184 wakeup.gtk_rekey_failure = true; 1185 1186 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1187 wakeup.rfkill_release = true; 1188 1189 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) 1190 wakeup.eap_identity_req = true; 1191 1192 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) 1193 wakeup.four_way_handshake = true; 1194 1195 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) 1196 wakeup.tcp_connlost = true; 1197 1198 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) 1199 wakeup.tcp_nomoretokens = true; 1200 1201 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) 1202 wakeup.tcp_match = true; 1203 1204 if (status->wake_packet_bufsize) { 1205 int pktsize = status->wake_packet_bufsize; 1206 int pktlen = status->wake_packet_length; 1207 const u8 *pktdata = status->wake_packet; 1208 struct ieee80211_hdr *hdr = (void *)pktdata; 1209 int truncated = pktlen - pktsize; 1210 1211 /* this would be a firmware bug */ 1212 if (WARN_ON_ONCE(truncated < 0)) 1213 truncated = 0; 1214 1215 if (ieee80211_is_data(hdr->frame_control)) { 1216 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 1217 int ivlen = 0, icvlen = 4; /* also FCS */ 1218 1219 pkt = alloc_skb(pktsize, GFP_KERNEL); 1220 if (!pkt) 1221 goto report; 1222 1223 skb_put_data(pkt, pktdata, hdrlen); 1224 pktdata += hdrlen; 1225 pktsize -= hdrlen; 1226 1227 if (ieee80211_has_protected(hdr->frame_control)) { 1228 /* 1229 * This is unlocked and using gtk_i(c)vlen, 1230 * but since everything is under RTNL still 1231 * that's not really a problem - changing 1232 * it would be difficult. 1233 */ 1234 if (is_multicast_ether_addr(hdr->addr1)) { 1235 ivlen = mvm->gtk_ivlen; 1236 icvlen += mvm->gtk_icvlen; 1237 } else { 1238 ivlen = mvm->ptk_ivlen; 1239 icvlen += mvm->ptk_icvlen; 1240 } 1241 } 1242 1243 /* if truncated, FCS/ICV is (partially) gone */ 1244 if (truncated >= icvlen) { 1245 icvlen = 0; 1246 truncated -= icvlen; 1247 } else { 1248 icvlen -= truncated; 1249 truncated = 0; 1250 } 1251 1252 pktsize -= ivlen + icvlen; 1253 pktdata += ivlen; 1254 1255 skb_put_data(pkt, pktdata, pktsize); 1256 1257 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) 1258 goto report; 1259 wakeup.packet = pkt->data; 1260 wakeup.packet_present_len = pkt->len; 1261 wakeup.packet_len = pkt->len - truncated; 1262 wakeup.packet_80211 = false; 1263 } else { 1264 int fcslen = 4; 1265 1266 if (truncated >= 4) { 1267 truncated -= 4; 1268 fcslen = 0; 1269 } else { 1270 fcslen -= truncated; 1271 truncated = 0; 1272 } 1273 pktsize -= fcslen; 1274 wakeup.packet = status->wake_packet; 1275 wakeup.packet_present_len = pktsize; 1276 wakeup.packet_len = pktlen - truncated; 1277 wakeup.packet_80211 = true; 1278 } 1279 } 1280 1281 report: 1282 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1283 kfree_skb(pkt); 1284 } 1285 1286 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, 1287 struct ieee80211_key_seq *seq) 1288 { 1289 u64 pn; 1290 1291 pn = le64_to_cpu(sc->pn); 1292 seq->ccmp.pn[0] = pn >> 40; 1293 seq->ccmp.pn[1] = pn >> 32; 1294 seq->ccmp.pn[2] = pn >> 24; 1295 seq->ccmp.pn[3] = pn >> 16; 1296 seq->ccmp.pn[4] = pn >> 8; 1297 seq->ccmp.pn[5] = pn; 1298 } 1299 1300 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, 1301 struct ieee80211_key_seq *seq) 1302 { 1303 seq->tkip.iv32 = le32_to_cpu(sc->iv32); 1304 seq->tkip.iv16 = le16_to_cpu(sc->iv16); 1305 } 1306 1307 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs, 1308 struct ieee80211_sta *sta, 1309 struct ieee80211_key_conf *key) 1310 { 1311 int tid; 1312 1313 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1314 1315 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 1316 struct iwl_mvm_sta *mvmsta; 1317 struct iwl_mvm_key_pn *ptk_pn; 1318 1319 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1320 1321 ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx], 1322 lockdep_is_held(&mvm->mutex)); 1323 if (WARN_ON(!ptk_pn)) 1324 return; 1325 1326 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 1327 struct ieee80211_key_seq seq = {}; 1328 int i; 1329 1330 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1331 ieee80211_set_key_rx_seq(key, tid, &seq); 1332 for (i = 1; i < mvm->trans->num_rx_queues; i++) 1333 memcpy(ptk_pn->q[i].pn[tid], 1334 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); 1335 } 1336 } else { 1337 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1338 struct ieee80211_key_seq seq = {}; 1339 1340 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1341 ieee80211_set_key_rx_seq(key, tid, &seq); 1342 } 1343 } 1344 } 1345 1346 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, 1347 struct ieee80211_key_conf *key) 1348 { 1349 int tid; 1350 1351 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1352 1353 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1354 struct ieee80211_key_seq seq = {}; 1355 1356 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); 1357 ieee80211_set_key_rx_seq(key, tid, &seq); 1358 } 1359 } 1360 1361 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, 1362 struct ieee80211_key_conf *key, 1363 struct iwl_wowlan_status *status) 1364 { 1365 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc; 1366 1367 switch (key->cipher) { 1368 case WLAN_CIPHER_SUITE_CCMP: 1369 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); 1370 break; 1371 case WLAN_CIPHER_SUITE_TKIP: 1372 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); 1373 break; 1374 default: 1375 WARN_ON(1); 1376 } 1377 } 1378 1379 struct iwl_mvm_d3_gtk_iter_data { 1380 struct iwl_mvm *mvm; 1381 struct iwl_wowlan_status *status; 1382 void *last_gtk; 1383 u32 cipher; 1384 bool find_phase, unhandled_cipher; 1385 int num_keys; 1386 }; 1387 1388 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, 1389 struct ieee80211_vif *vif, 1390 struct ieee80211_sta *sta, 1391 struct ieee80211_key_conf *key, 1392 void *_data) 1393 { 1394 struct iwl_mvm_d3_gtk_iter_data *data = _data; 1395 1396 if (data->unhandled_cipher) 1397 return; 1398 1399 switch (key->cipher) { 1400 case WLAN_CIPHER_SUITE_WEP40: 1401 case WLAN_CIPHER_SUITE_WEP104: 1402 /* ignore WEP completely, nothing to do */ 1403 return; 1404 case WLAN_CIPHER_SUITE_CCMP: 1405 case WLAN_CIPHER_SUITE_TKIP: 1406 /* we support these */ 1407 break; 1408 default: 1409 /* everything else (even CMAC for MFP) - disconnect from AP */ 1410 data->unhandled_cipher = true; 1411 return; 1412 } 1413 1414 data->num_keys++; 1415 1416 /* 1417 * pairwise key - update sequence counters only; 1418 * note that this assumes no TDLS sessions are active 1419 */ 1420 if (sta) { 1421 struct ieee80211_key_seq seq = {}; 1422 union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc; 1423 1424 if (data->find_phase) 1425 return; 1426 1427 switch (key->cipher) { 1428 case WLAN_CIPHER_SUITE_CCMP: 1429 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, 1430 sta, key); 1431 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); 1432 break; 1433 case WLAN_CIPHER_SUITE_TKIP: 1434 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); 1435 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); 1436 atomic64_set(&key->tx_pn, 1437 (u64)seq.tkip.iv16 | 1438 ((u64)seq.tkip.iv32 << 16)); 1439 break; 1440 } 1441 1442 /* that's it for this key */ 1443 return; 1444 } 1445 1446 if (data->find_phase) { 1447 data->last_gtk = key; 1448 data->cipher = key->cipher; 1449 return; 1450 } 1451 1452 if (data->status->num_of_gtk_rekeys) 1453 ieee80211_remove_key(key); 1454 else if (data->last_gtk == key) 1455 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status); 1456 } 1457 1458 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 1459 struct ieee80211_vif *vif, 1460 struct iwl_wowlan_status *status) 1461 { 1462 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1463 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1464 .mvm = mvm, 1465 .status = status, 1466 }; 1467 u32 disconnection_reasons = 1468 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1469 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; 1470 1471 if (!status || !vif->bss_conf.bssid) 1472 return false; 1473 1474 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) 1475 return false; 1476 1477 /* find last GTK that we used initially, if any */ 1478 gtkdata.find_phase = true; 1479 ieee80211_iter_keys(mvm->hw, vif, 1480 iwl_mvm_d3_update_keys, >kdata); 1481 /* not trying to keep connections with MFP/unhandled ciphers */ 1482 if (gtkdata.unhandled_cipher) 1483 return false; 1484 if (!gtkdata.num_keys) 1485 goto out; 1486 if (!gtkdata.last_gtk) 1487 return false; 1488 1489 /* 1490 * invalidate all other GTKs that might still exist and update 1491 * the one that we used 1492 */ 1493 gtkdata.find_phase = false; 1494 ieee80211_iter_keys(mvm->hw, vif, 1495 iwl_mvm_d3_update_keys, >kdata); 1496 1497 if (status->num_of_gtk_rekeys) { 1498 struct ieee80211_key_conf *key; 1499 struct { 1500 struct ieee80211_key_conf conf; 1501 u8 key[32]; 1502 } conf = { 1503 .conf.cipher = gtkdata.cipher, 1504 .conf.keyidx = status->gtk.key_index, 1505 }; 1506 1507 switch (gtkdata.cipher) { 1508 case WLAN_CIPHER_SUITE_CCMP: 1509 conf.conf.keylen = WLAN_KEY_LEN_CCMP; 1510 memcpy(conf.conf.key, status->gtk.decrypt_key, 1511 WLAN_KEY_LEN_CCMP); 1512 break; 1513 case WLAN_CIPHER_SUITE_TKIP: 1514 conf.conf.keylen = WLAN_KEY_LEN_TKIP; 1515 memcpy(conf.conf.key, status->gtk.decrypt_key, 16); 1516 /* leave TX MIC key zeroed, we don't use it anyway */ 1517 memcpy(conf.conf.key + 1518 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, 1519 status->gtk.tkip_mic_key, 8); 1520 break; 1521 } 1522 1523 key = ieee80211_gtk_rekey_add(vif, &conf.conf); 1524 if (IS_ERR(key)) 1525 return false; 1526 iwl_mvm_set_key_rx_seq(mvm, key, status); 1527 } 1528 1529 if (status->num_of_gtk_rekeys) { 1530 __be64 replay_ctr = 1531 cpu_to_be64(le64_to_cpu(status->replay_ctr)); 1532 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, 1533 (void *)&replay_ctr, GFP_KERNEL); 1534 } 1535 1536 out: 1537 mvmvif->seqno_valid = true; 1538 /* +0x10 because the set API expects next-to-use, not last-used */ 1539 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; 1540 1541 return true; 1542 } 1543 1544 static struct iwl_wowlan_status * 1545 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1546 { 1547 u32 base = mvm->error_event_table[0]; 1548 struct error_table_start { 1549 /* cf. struct iwl_error_event_table */ 1550 u32 valid; 1551 u32 error_id; 1552 } err_info; 1553 struct iwl_host_cmd cmd = { 1554 .id = WOWLAN_GET_STATUSES, 1555 .flags = CMD_WANT_SKB, 1556 }; 1557 struct iwl_wowlan_status *status, *fw_status; 1558 int ret, len, status_size; 1559 1560 iwl_trans_read_mem_bytes(mvm->trans, base, 1561 &err_info, sizeof(err_info)); 1562 1563 if (err_info.valid) { 1564 IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n", 1565 err_info.valid, err_info.error_id); 1566 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1567 struct cfg80211_wowlan_wakeup wakeup = { 1568 .rfkill_release = true, 1569 }; 1570 ieee80211_report_wowlan_wakeup(vif, &wakeup, 1571 GFP_KERNEL); 1572 } 1573 return ERR_PTR(-EIO); 1574 } 1575 1576 /* only for tracing for now */ 1577 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); 1578 if (ret) 1579 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); 1580 1581 ret = iwl_mvm_send_cmd(mvm, &cmd); 1582 if (ret) { 1583 IWL_ERR(mvm, "failed to query status (%d)\n", ret); 1584 return ERR_PTR(ret); 1585 } 1586 1587 status_size = sizeof(*fw_status); 1588 1589 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1590 if (len < status_size) { 1591 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1592 fw_status = ERR_PTR(-EIO); 1593 goto out_free_resp; 1594 } 1595 1596 status = (void *)cmd.resp_pkt->data; 1597 if (len != (status_size + 1598 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) { 1599 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1600 fw_status = ERR_PTR(-EIO); 1601 goto out_free_resp; 1602 } 1603 1604 fw_status = kmemdup(status, len, GFP_KERNEL); 1605 1606 out_free_resp: 1607 iwl_free_resp(&cmd); 1608 return fw_status; 1609 } 1610 1611 /* releases the MVM mutex */ 1612 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, 1613 struct ieee80211_vif *vif) 1614 { 1615 struct iwl_wowlan_status_data status; 1616 struct iwl_wowlan_status *fw_status; 1617 int i; 1618 bool keep; 1619 struct iwl_mvm_sta *mvm_ap_sta; 1620 1621 fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1622 if (IS_ERR_OR_NULL(fw_status)) 1623 goto out_unlock; 1624 1625 status.pattern_number = le16_to_cpu(fw_status->pattern_number); 1626 for (i = 0; i < 8; i++) 1627 status.qos_seq_ctr[i] = 1628 le16_to_cpu(fw_status->qos_seq_ctr[i]); 1629 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); 1630 status.wake_packet_length = 1631 le32_to_cpu(fw_status->wake_packet_length); 1632 status.wake_packet_bufsize = 1633 le32_to_cpu(fw_status->wake_packet_bufsize); 1634 status.wake_packet = fw_status->wake_packet; 1635 1636 /* still at hard-coded place 0 for D3 image */ 1637 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); 1638 if (!mvm_ap_sta) 1639 goto out_free; 1640 1641 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1642 u16 seq = status.qos_seq_ctr[i]; 1643 /* firmware stores last-used value, we store next value */ 1644 seq += 0x10; 1645 mvm_ap_sta->tid_data[i].seq_number = seq; 1646 } 1647 1648 /* now we have all the data we need, unlock to avoid mac80211 issues */ 1649 mutex_unlock(&mvm->mutex); 1650 1651 iwl_mvm_report_wakeup_reasons(mvm, vif, &status); 1652 1653 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); 1654 1655 kfree(fw_status); 1656 return keep; 1657 1658 out_free: 1659 kfree(fw_status); 1660 out_unlock: 1661 mutex_unlock(&mvm->mutex); 1662 return false; 1663 } 1664 1665 void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, 1666 struct ieee80211_vif *vif, 1667 struct iwl_wowlan_status *status) 1668 { 1669 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1670 .mvm = mvm, 1671 .status = status, 1672 }; 1673 1674 /* 1675 * rekey handling requires taking locks that can't be taken now. 1676 * however, d0i3 doesn't offload rekey, so we're fine. 1677 */ 1678 if (WARN_ON_ONCE(status->num_of_gtk_rekeys)) 1679 return; 1680 1681 /* find last GTK that we used initially, if any */ 1682 gtkdata.find_phase = true; 1683 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); 1684 1685 gtkdata.find_phase = false; 1686 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); 1687 } 1688 1689 struct iwl_mvm_nd_query_results { 1690 u32 matched_profiles; 1691 struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; 1692 }; 1693 1694 static int 1695 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, 1696 struct iwl_mvm_nd_query_results *results) 1697 { 1698 struct iwl_scan_offload_profiles_query *query; 1699 struct iwl_host_cmd cmd = { 1700 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, 1701 .flags = CMD_WANT_SKB, 1702 }; 1703 int ret, len; 1704 1705 ret = iwl_mvm_send_cmd(mvm, &cmd); 1706 if (ret) { 1707 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); 1708 return ret; 1709 } 1710 1711 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1712 if (len < sizeof(*query)) { 1713 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); 1714 ret = -EIO; 1715 goto out_free_resp; 1716 } 1717 1718 query = (void *)cmd.resp_pkt->data; 1719 1720 results->matched_profiles = le32_to_cpu(query->matched_profiles); 1721 memcpy(results->matches, query->matches, sizeof(results->matches)); 1722 1723 #ifdef CONFIG_IWLWIFI_DEBUGFS 1724 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); 1725 #endif 1726 1727 out_free_resp: 1728 iwl_free_resp(&cmd); 1729 return ret; 1730 } 1731 1732 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, 1733 struct ieee80211_vif *vif) 1734 { 1735 struct cfg80211_wowlan_nd_info *net_detect = NULL; 1736 struct cfg80211_wowlan_wakeup wakeup = { 1737 .pattern_idx = -1, 1738 }; 1739 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1740 struct iwl_mvm_nd_query_results query; 1741 struct iwl_wowlan_status *fw_status; 1742 unsigned long matched_profiles; 1743 u32 reasons = 0; 1744 int i, j, n_matches, ret; 1745 1746 fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1747 if (!IS_ERR_OR_NULL(fw_status)) { 1748 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1749 kfree(fw_status); 1750 } 1751 1752 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1753 wakeup.rfkill_release = true; 1754 1755 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) 1756 goto out; 1757 1758 ret = iwl_mvm_netdetect_query_results(mvm, &query); 1759 if (ret || !query.matched_profiles) { 1760 wakeup_report = NULL; 1761 goto out; 1762 } 1763 1764 matched_profiles = query.matched_profiles; 1765 if (mvm->n_nd_match_sets) { 1766 n_matches = hweight_long(matched_profiles); 1767 } else { 1768 IWL_ERR(mvm, "no net detect match information available\n"); 1769 n_matches = 0; 1770 } 1771 1772 net_detect = kzalloc(sizeof(*net_detect) + 1773 (n_matches * sizeof(net_detect->matches[0])), 1774 GFP_KERNEL); 1775 if (!net_detect || !n_matches) 1776 goto out_report_nd; 1777 1778 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { 1779 struct iwl_scan_offload_profile_match *fw_match; 1780 struct cfg80211_wowlan_nd_match *match; 1781 int idx, n_channels = 0; 1782 1783 fw_match = &query.matches[i]; 1784 1785 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) 1786 n_channels += hweight8(fw_match->matching_channels[j]); 1787 1788 match = kzalloc(sizeof(*match) + 1789 (n_channels * sizeof(*match->channels)), 1790 GFP_KERNEL); 1791 if (!match) 1792 goto out_report_nd; 1793 1794 net_detect->matches[net_detect->n_matches++] = match; 1795 1796 /* We inverted the order of the SSIDs in the scan 1797 * request, so invert the index here. 1798 */ 1799 idx = mvm->n_nd_match_sets - i - 1; 1800 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; 1801 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, 1802 match->ssid.ssid_len); 1803 1804 if (mvm->n_nd_channels < n_channels) 1805 continue; 1806 1807 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++) 1808 if (fw_match->matching_channels[j / 8] & (BIT(j % 8))) 1809 match->channels[match->n_channels++] = 1810 mvm->nd_channels[j]->center_freq; 1811 } 1812 1813 out_report_nd: 1814 wakeup.net_detect = net_detect; 1815 out: 1816 iwl_mvm_free_nd(mvm); 1817 1818 mutex_unlock(&mvm->mutex); 1819 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1820 1821 if (net_detect) { 1822 for (i = 0; i < net_detect->n_matches; i++) 1823 kfree(net_detect->matches[i]); 1824 kfree(net_detect); 1825 } 1826 } 1827 1828 static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) 1829 { 1830 #ifdef CONFIG_IWLWIFI_DEBUGFS 1831 const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN]; 1832 u32 len = img->sec[IWL_UCODE_SECTION_DATA].len; 1833 u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset; 1834 1835 if (!mvm->store_d3_resume_sram) 1836 return; 1837 1838 if (!mvm->d3_resume_sram) { 1839 mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL); 1840 if (!mvm->d3_resume_sram) 1841 return; 1842 } 1843 1844 iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len); 1845 #endif 1846 } 1847 1848 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, 1849 struct ieee80211_vif *vif) 1850 { 1851 /* skip the one we keep connection on */ 1852 if (data == vif) 1853 return; 1854 1855 if (vif->type == NL80211_IFTYPE_STATION) 1856 ieee80211_resume_disconnect(vif); 1857 } 1858 1859 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 1860 { 1861 struct ieee80211_vif *vif = NULL; 1862 int ret = 1; 1863 enum iwl_d3_status d3_status; 1864 bool keep = false; 1865 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1866 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1867 bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, 1868 IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); 1869 1870 mutex_lock(&mvm->mutex); 1871 1872 /* get the BSS vif pointer again */ 1873 vif = iwl_mvm_get_bss_vif(mvm); 1874 if (IS_ERR_OR_NULL(vif)) 1875 goto err; 1876 1877 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); 1878 if (ret) 1879 goto err; 1880 1881 if (d3_status != IWL_D3_STATUS_ALIVE) { 1882 IWL_INFO(mvm, "Device was reset during suspend\n"); 1883 goto err; 1884 } 1885 1886 /* query SRAM first in case we want event logging */ 1887 iwl_mvm_read_d3_sram(mvm); 1888 1889 if (d0i3_first) { 1890 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); 1891 if (ret < 0) { 1892 IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", 1893 ret); 1894 goto err; 1895 } 1896 } 1897 1898 /* 1899 * Query the current location and source from the D3 firmware so we 1900 * can play it back when we re-intiailize the D0 firmware 1901 */ 1902 iwl_mvm_update_changed_regdom(mvm); 1903 1904 if (!unified_image) 1905 /* Re-configure default SAR profile */ 1906 iwl_mvm_sar_select_profile(mvm, 1, 1); 1907 1908 if (mvm->net_detect) { 1909 /* If this is a non-unified image, we restart the FW, 1910 * so no need to stop the netdetect scan. If that 1911 * fails, continue and try to get the wake-up reasons, 1912 * but trigger a HW restart by keeping a failure code 1913 * in ret. 1914 */ 1915 if (unified_image) 1916 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, 1917 false); 1918 1919 iwl_mvm_query_netdetect_reasons(mvm, vif); 1920 /* has unlocked the mutex, so skip that */ 1921 goto out; 1922 } else { 1923 keep = iwl_mvm_query_wakeup_reasons(mvm, vif); 1924 #ifdef CONFIG_IWLWIFI_DEBUGFS 1925 if (keep) 1926 mvm->keep_vif = vif; 1927 #endif 1928 /* has unlocked the mutex, so skip that */ 1929 goto out_iterate; 1930 } 1931 1932 err: 1933 iwl_mvm_free_nd(mvm); 1934 mutex_unlock(&mvm->mutex); 1935 1936 out_iterate: 1937 if (!test) 1938 ieee80211_iterate_active_interfaces_rtnl(mvm->hw, 1939 IEEE80211_IFACE_ITER_NORMAL, 1940 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); 1941 1942 out: 1943 /* no need to reset the device in unified images, if successful */ 1944 if (unified_image && !ret) { 1945 /* nothing else to do if we already sent D0I3_END_CMD */ 1946 if (d0i3_first) 1947 return 0; 1948 1949 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); 1950 if (!ret) 1951 return 0; 1952 } 1953 1954 /* 1955 * Reconfigure the device in one of the following cases: 1956 * 1. We are not using a unified image 1957 * 2. We are using a unified image but had an error while exiting D3 1958 */ 1959 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1960 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); 1961 /* 1962 * When switching images we return 1, which causes mac80211 1963 * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. 1964 * This type of reconfig calls iwl_mvm_restart_complete(), 1965 * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need 1966 * to take the reference here. 1967 */ 1968 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1969 1970 return 1; 1971 } 1972 1973 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) 1974 { 1975 iwl_trans_resume(mvm->trans); 1976 1977 return __iwl_mvm_resume(mvm, false); 1978 } 1979 1980 static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) 1981 { 1982 bool exit_now; 1983 enum iwl_d3_status d3_status; 1984 struct iwl_trans *trans = mvm->trans; 1985 1986 iwl_trans_d3_resume(trans, &d3_status, false, false); 1987 1988 /* 1989 * make sure to clear D0I3_DEFER_WAKEUP before 1990 * calling iwl_trans_resume(), which might wait 1991 * for d0i3 exit completion. 1992 */ 1993 mutex_lock(&mvm->d0i3_suspend_mutex); 1994 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); 1995 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, 1996 &mvm->d0i3_suspend_flags); 1997 mutex_unlock(&mvm->d0i3_suspend_mutex); 1998 if (exit_now) { 1999 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); 2000 _iwl_mvm_exit_d0i3(mvm); 2001 } 2002 2003 iwl_trans_resume(trans); 2004 2005 if (iwl_mvm_enter_d0i3_on_suspend(mvm)) { 2006 int ret = iwl_mvm_exit_d0i3(mvm->hw->priv); 2007 2008 if (ret) 2009 return ret; 2010 /* 2011 * d0i3 exit will be deferred until reconfig_complete. 2012 * make sure there we are out of d0i3. 2013 */ 2014 } 2015 return 0; 2016 } 2017 2018 int iwl_mvm_resume(struct ieee80211_hw *hw) 2019 { 2020 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2021 int ret; 2022 2023 if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2024 ret = iwl_mvm_resume_d0i3(mvm); 2025 else 2026 ret = iwl_mvm_resume_d3(mvm); 2027 2028 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2029 2030 iwl_mvm_resume_tcm(mvm); 2031 2032 iwl_fw_runtime_resume(&mvm->fwrt); 2033 2034 return ret; 2035 } 2036 2037 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) 2038 { 2039 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2040 2041 device_set_wakeup_enable(mvm->trans->dev, enabled); 2042 } 2043 2044 #ifdef CONFIG_IWLWIFI_DEBUGFS 2045 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) 2046 { 2047 struct iwl_mvm *mvm = inode->i_private; 2048 int err; 2049 2050 if (mvm->d3_test_active) 2051 return -EBUSY; 2052 2053 file->private_data = inode->i_private; 2054 2055 ieee80211_stop_queues(mvm->hw); 2056 synchronize_net(); 2057 2058 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 2059 2060 iwl_mvm_pause_tcm(mvm, true); 2061 2062 iwl_fw_runtime_suspend(&mvm->fwrt); 2063 2064 /* start pseudo D3 */ 2065 rtnl_lock(); 2066 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); 2067 rtnl_unlock(); 2068 if (err > 0) 2069 err = -EINVAL; 2070 if (err) { 2071 ieee80211_wake_queues(mvm->hw); 2072 return err; 2073 } 2074 mvm->d3_test_active = true; 2075 mvm->keep_vif = NULL; 2076 return 0; 2077 } 2078 2079 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, 2080 size_t count, loff_t *ppos) 2081 { 2082 struct iwl_mvm *mvm = file->private_data; 2083 u32 pme_asserted; 2084 2085 while (true) { 2086 /* read pme_ptr if available */ 2087 if (mvm->d3_test_pme_ptr) { 2088 pme_asserted = iwl_trans_read_mem32(mvm->trans, 2089 mvm->d3_test_pme_ptr); 2090 if (pme_asserted) 2091 break; 2092 } 2093 2094 if (msleep_interruptible(100)) 2095 break; 2096 } 2097 2098 return 0; 2099 } 2100 2101 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, 2102 struct ieee80211_vif *vif) 2103 { 2104 /* skip the one we keep connection on */ 2105 if (_data == vif) 2106 return; 2107 2108 if (vif->type == NL80211_IFTYPE_STATION) 2109 ieee80211_connection_loss(vif); 2110 } 2111 2112 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) 2113 { 2114 struct iwl_mvm *mvm = inode->i_private; 2115 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2116 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2117 2118 mvm->d3_test_active = false; 2119 2120 rtnl_lock(); 2121 __iwl_mvm_resume(mvm, true); 2122 rtnl_unlock(); 2123 2124 iwl_mvm_resume_tcm(mvm); 2125 2126 iwl_fw_runtime_resume(&mvm->fwrt); 2127 2128 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2129 2130 iwl_abort_notification_waits(&mvm->notif_wait); 2131 if (!unified_image) { 2132 int remaining_time = 10; 2133 2134 ieee80211_restart_hw(mvm->hw); 2135 2136 /* wait for restart and disconnect all interfaces */ 2137 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2138 remaining_time > 0) { 2139 remaining_time--; 2140 msleep(1000); 2141 } 2142 2143 if (remaining_time == 0) 2144 IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); 2145 } 2146 2147 ieee80211_iterate_active_interfaces_atomic( 2148 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2149 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); 2150 2151 ieee80211_wake_queues(mvm->hw); 2152 2153 return 0; 2154 } 2155 2156 const struct file_operations iwl_dbgfs_d3_test_ops = { 2157 .llseek = no_llseek, 2158 .open = iwl_mvm_d3_test_open, 2159 .read = iwl_mvm_d3_test_read, 2160 .release = iwl_mvm_d3_test_release, 2161 }; 2162 #endif 2163