1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2014, 2018-2021 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/etherdevice.h> 8 #include <linux/ip.h> 9 #include <linux/fs.h> 10 #include <net/cfg80211.h> 11 #include <net/ipv6.h> 12 #include <net/tcp.h> 13 #include <net/addrconf.h> 14 #include "iwl-modparams.h" 15 #include "fw-api.h" 16 #include "mvm.h" 17 #include "fw/img.h" 18 19 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, 20 struct ieee80211_vif *vif, 21 struct cfg80211_gtk_rekey_data *data) 22 { 23 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 24 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 25 26 mutex_lock(&mvm->mutex); 27 28 mvmvif->rekey_data.kek_len = data->kek_len; 29 mvmvif->rekey_data.kck_len = data->kck_len; 30 memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len); 31 memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len); 32 mvmvif->rekey_data.akm = data->akm & 0xFF; 33 mvmvif->rekey_data.replay_ctr = 34 cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); 35 mvmvif->rekey_data.valid = true; 36 37 mutex_unlock(&mvm->mutex); 38 } 39 40 #if IS_ENABLED(CONFIG_IPV6) 41 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, 42 struct ieee80211_vif *vif, 43 struct inet6_dev *idev) 44 { 45 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 46 struct inet6_ifaddr *ifa; 47 int idx = 0; 48 49 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); 50 51 read_lock_bh(&idev->lock); 52 list_for_each_entry(ifa, &idev->addr_list, if_list) { 53 mvmvif->target_ipv6_addrs[idx] = ifa->addr; 54 if (ifa->flags & IFA_F_TENTATIVE) 55 __set_bit(idx, mvmvif->tentative_addrs); 56 idx++; 57 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) 58 break; 59 } 60 read_unlock_bh(&idev->lock); 61 62 mvmvif->num_target_ipv6_addrs = idx; 63 } 64 #endif 65 66 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 67 struct ieee80211_vif *vif, int idx) 68 { 69 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 70 71 mvmvif->tx_key_idx = idx; 72 } 73 74 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) 75 { 76 int i; 77 78 for (i = 0; i < IWL_P1K_SIZE; i++) 79 out[i] = cpu_to_le16(p1k[i]); 80 } 81 82 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, 83 struct iwl_mvm_key_pn *ptk_pn, 84 struct ieee80211_key_seq *seq, 85 int tid, int queues) 86 { 87 const u8 *ret = seq->ccmp.pn; 88 int i; 89 90 /* get the PN from mac80211, used on the default queue */ 91 ieee80211_get_key_rx_seq(key, tid, seq); 92 93 /* and use the internal data for the other queues */ 94 for (i = 1; i < queues; i++) { 95 const u8 *tmp = ptk_pn->q[i].pn[tid]; 96 97 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) 98 ret = tmp; 99 } 100 101 return ret; 102 } 103 104 struct wowlan_key_data { 105 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; 106 struct iwl_wowlan_tkip_params_cmd *tkip; 107 struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; 108 bool error, use_rsc_tsc, use_tkip, configure_keys; 109 int wep_key_idx; 110 }; 111 112 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, 113 struct ieee80211_vif *vif, 114 struct ieee80211_sta *sta, 115 struct ieee80211_key_conf *key, 116 void *_data) 117 { 118 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 119 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 120 struct wowlan_key_data *data = _data; 121 struct aes_sc *aes_sc, *aes_tx_sc = NULL; 122 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; 123 struct iwl_p1k_cache *rx_p1ks; 124 u8 *rx_mic_key; 125 struct ieee80211_key_seq seq; 126 u32 cur_rx_iv32 = 0; 127 u16 p1k[IWL_P1K_SIZE]; 128 int ret, i; 129 130 switch (key->cipher) { 131 case WLAN_CIPHER_SUITE_WEP40: 132 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ 133 struct { 134 struct iwl_mvm_wep_key_cmd wep_key_cmd; 135 struct iwl_mvm_wep_key wep_key; 136 } __packed wkc = { 137 .wep_key_cmd.mac_id_n_color = 138 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 139 mvmvif->color)), 140 .wep_key_cmd.num_keys = 1, 141 /* firmware sets STA_KEY_FLG_WEP_13BYTES */ 142 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, 143 .wep_key.key_index = key->keyidx, 144 .wep_key.key_size = key->keylen, 145 }; 146 147 /* 148 * This will fail -- the key functions don't set support 149 * pairwise WEP keys. However, that's better than silently 150 * failing WoWLAN. Or maybe not? 151 */ 152 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 153 break; 154 155 memcpy(&wkc.wep_key.key[3], key->key, key->keylen); 156 if (key->keyidx == mvmvif->tx_key_idx) { 157 /* TX key must be at offset 0 */ 158 wkc.wep_key.key_offset = 0; 159 } else { 160 /* others start at 1 */ 161 data->wep_key_idx++; 162 wkc.wep_key.key_offset = data->wep_key_idx; 163 } 164 165 if (data->configure_keys) { 166 mutex_lock(&mvm->mutex); 167 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, 168 sizeof(wkc), &wkc); 169 data->error = ret != 0; 170 171 mvm->ptk_ivlen = key->iv_len; 172 mvm->ptk_icvlen = key->icv_len; 173 mvm->gtk_ivlen = key->iv_len; 174 mvm->gtk_icvlen = key->icv_len; 175 mutex_unlock(&mvm->mutex); 176 } 177 178 /* don't upload key again */ 179 return; 180 } 181 default: 182 data->error = true; 183 return; 184 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 185 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 186 data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); 187 return; 188 case WLAN_CIPHER_SUITE_AES_CMAC: 189 data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); 190 /* 191 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them 192 * but we also shouldn't abort suspend due to that. It does have 193 * support for the IGTK key renewal, but doesn't really use the 194 * IGTK for anything. This means we could spuriously wake up or 195 * be deauthenticated, but that was considered acceptable. 196 */ 197 return; 198 case WLAN_CIPHER_SUITE_TKIP: 199 if (sta) { 200 u64 pn64; 201 202 tkip_sc = 203 data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc; 204 tkip_tx_sc = 205 &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc; 206 207 rx_p1ks = data->tkip->rx_uni; 208 209 pn64 = atomic64_read(&key->tx_pn); 210 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); 211 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); 212 213 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), 214 p1k); 215 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); 216 217 memcpy(data->tkip->mic_keys.tx, 218 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 219 IWL_MIC_KEY_SIZE); 220 221 rx_mic_key = data->tkip->mic_keys.rx_unicast; 222 } else { 223 tkip_sc = 224 data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc; 225 rx_p1ks = data->tkip->rx_multi; 226 rx_mic_key = data->tkip->mic_keys.rx_mcast; 227 data->kek_kck_cmd->gtk_cipher = 228 cpu_to_le32(STA_KEY_FLG_TKIP); 229 } 230 231 /* 232 * For non-QoS this relies on the fact that both the uCode and 233 * mac80211 use TID 0 (as they need to to avoid replay attacks) 234 * for checking the IV in the frames. 235 */ 236 for (i = 0; i < IWL_NUM_RSC; i++) { 237 ieee80211_get_key_rx_seq(key, i, &seq); 238 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); 239 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); 240 /* wrapping isn't allowed, AP must rekey */ 241 if (seq.tkip.iv32 > cur_rx_iv32) 242 cur_rx_iv32 = seq.tkip.iv32; 243 } 244 245 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 246 cur_rx_iv32, p1k); 247 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); 248 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 249 cur_rx_iv32 + 1, p1k); 250 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); 251 252 memcpy(rx_mic_key, 253 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 254 IWL_MIC_KEY_SIZE); 255 256 data->use_tkip = true; 257 data->use_rsc_tsc = true; 258 break; 259 case WLAN_CIPHER_SUITE_CCMP: 260 case WLAN_CIPHER_SUITE_GCMP: 261 case WLAN_CIPHER_SUITE_GCMP_256: 262 if (sta) { 263 u64 pn64; 264 265 aes_sc = 266 data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc; 267 aes_tx_sc = 268 &data->rsc_tsc->params.all_tsc_rsc.aes.tsc; 269 270 pn64 = atomic64_read(&key->tx_pn); 271 aes_tx_sc->pn = cpu_to_le64(pn64); 272 } else { 273 aes_sc = 274 data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc; 275 data->kek_kck_cmd->gtk_cipher = 276 key->cipher == WLAN_CIPHER_SUITE_CCMP ? 277 cpu_to_le32(STA_KEY_FLG_CCM) : 278 cpu_to_le32(STA_KEY_FLG_GCMP); 279 } 280 281 /* 282 * For non-QoS this relies on the fact that both the uCode and 283 * mac80211/our RX code use TID 0 for checking the PN. 284 */ 285 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 286 struct iwl_mvm_sta *mvmsta; 287 struct iwl_mvm_key_pn *ptk_pn; 288 const u8 *pn; 289 290 mvmsta = iwl_mvm_sta_from_mac80211(sta); 291 rcu_read_lock(); 292 ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); 293 if (WARN_ON(!ptk_pn)) { 294 rcu_read_unlock(); 295 break; 296 } 297 298 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 299 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, 300 mvm->trans->num_rx_queues); 301 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 302 ((u64)pn[4] << 8) | 303 ((u64)pn[3] << 16) | 304 ((u64)pn[2] << 24) | 305 ((u64)pn[1] << 32) | 306 ((u64)pn[0] << 40)); 307 } 308 309 rcu_read_unlock(); 310 } else { 311 for (i = 0; i < IWL_NUM_RSC; i++) { 312 u8 *pn = seq.ccmp.pn; 313 314 ieee80211_get_key_rx_seq(key, i, &seq); 315 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 316 ((u64)pn[4] << 8) | 317 ((u64)pn[3] << 16) | 318 ((u64)pn[2] << 24) | 319 ((u64)pn[1] << 32) | 320 ((u64)pn[0] << 40)); 321 } 322 } 323 data->use_rsc_tsc = true; 324 break; 325 } 326 327 IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher); 328 329 if (data->configure_keys) { 330 mutex_lock(&mvm->mutex); 331 /* 332 * The D3 firmware hardcodes the key offset 0 as the key it 333 * uses to transmit packets to the AP, i.e. the PTK. 334 */ 335 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 336 mvm->ptk_ivlen = key->iv_len; 337 mvm->ptk_icvlen = key->icv_len; 338 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); 339 } else { 340 /* 341 * firmware only supports TSC/RSC for a single key, 342 * so if there are multiple keep overwriting them 343 * with new ones -- this relies on mac80211 doing 344 * list_add_tail(). 345 */ 346 mvm->gtk_ivlen = key->iv_len; 347 mvm->gtk_icvlen = key->icv_len; 348 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); 349 } 350 mutex_unlock(&mvm->mutex); 351 data->error = ret != 0; 352 } 353 } 354 355 static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm, 356 struct cfg80211_wowlan *wowlan) 357 { 358 struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd; 359 struct iwl_host_cmd cmd = { 360 .id = WOWLAN_PATTERNS, 361 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 362 }; 363 int i, err; 364 365 if (!wowlan->n_patterns) 366 return 0; 367 368 cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns); 369 370 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); 371 if (!pattern_cmd) 372 return -ENOMEM; 373 374 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); 375 376 for (i = 0; i < wowlan->n_patterns; i++) { 377 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 378 379 memcpy(&pattern_cmd->patterns[i].mask, 380 wowlan->patterns[i].mask, mask_len); 381 memcpy(&pattern_cmd->patterns[i].pattern, 382 wowlan->patterns[i].pattern, 383 wowlan->patterns[i].pattern_len); 384 pattern_cmd->patterns[i].mask_size = mask_len; 385 pattern_cmd->patterns[i].pattern_size = 386 wowlan->patterns[i].pattern_len; 387 } 388 389 cmd.data[0] = pattern_cmd; 390 err = iwl_mvm_send_cmd(mvm, &cmd); 391 kfree(pattern_cmd); 392 return err; 393 } 394 395 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, 396 struct ieee80211_vif *vif, 397 struct cfg80211_wowlan *wowlan) 398 { 399 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 400 struct iwl_wowlan_patterns_cmd *pattern_cmd; 401 struct iwl_host_cmd cmd = { 402 .id = WOWLAN_PATTERNS, 403 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 404 }; 405 int i, err; 406 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 407 WOWLAN_PATTERNS, 408 IWL_FW_CMD_VER_UNKNOWN); 409 410 if (!wowlan->n_patterns) 411 return 0; 412 413 cmd.len[0] = sizeof(*pattern_cmd) + 414 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2); 415 416 pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL); 417 if (!pattern_cmd) 418 return -ENOMEM; 419 420 pattern_cmd->n_patterns = wowlan->n_patterns; 421 if (ver >= 3) 422 pattern_cmd->sta_id = mvmvif->ap_sta_id; 423 424 for (i = 0; i < wowlan->n_patterns; i++) { 425 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 426 427 pattern_cmd->patterns[i].pattern_type = 428 WOWLAN_PATTERN_TYPE_BITMASK; 429 430 memcpy(&pattern_cmd->patterns[i].u.bitmask.mask, 431 wowlan->patterns[i].mask, mask_len); 432 memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern, 433 wowlan->patterns[i].pattern, 434 wowlan->patterns[i].pattern_len); 435 pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len; 436 pattern_cmd->patterns[i].u.bitmask.pattern_size = 437 wowlan->patterns[i].pattern_len; 438 } 439 440 cmd.data[0] = pattern_cmd; 441 err = iwl_mvm_send_cmd(mvm, &cmd); 442 kfree(pattern_cmd); 443 return err; 444 } 445 446 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 447 struct ieee80211_sta *ap_sta) 448 { 449 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 450 struct ieee80211_chanctx_conf *ctx; 451 u8 chains_static, chains_dynamic; 452 struct cfg80211_chan_def chandef; 453 int ret, i; 454 struct iwl_binding_cmd_v1 binding_cmd = {}; 455 struct iwl_time_quota_cmd quota_cmd = {}; 456 struct iwl_time_quota_data *quota; 457 u32 status; 458 459 if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) 460 return -EINVAL; 461 462 /* add back the PHY */ 463 if (WARN_ON(!mvmvif->phy_ctxt)) 464 return -EINVAL; 465 466 rcu_read_lock(); 467 ctx = rcu_dereference(vif->chanctx_conf); 468 if (WARN_ON(!ctx)) { 469 rcu_read_unlock(); 470 return -EINVAL; 471 } 472 chandef = ctx->def; 473 chains_static = ctx->rx_chains_static; 474 chains_dynamic = ctx->rx_chains_dynamic; 475 rcu_read_unlock(); 476 477 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, 478 chains_static, chains_dynamic); 479 if (ret) 480 return ret; 481 482 /* add back the MAC */ 483 mvmvif->uploaded = false; 484 485 if (WARN_ON(!vif->bss_conf.assoc)) 486 return -EINVAL; 487 488 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 489 if (ret) 490 return ret; 491 492 /* add back binding - XXX refactor? */ 493 binding_cmd.id_and_color = 494 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 495 mvmvif->phy_ctxt->color)); 496 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 497 binding_cmd.phy = 498 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 499 mvmvif->phy_ctxt->color)); 500 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 501 mvmvif->color)); 502 for (i = 1; i < MAX_MACS_IN_BINDING; i++) 503 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); 504 505 status = 0; 506 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, 507 IWL_BINDING_CMD_SIZE_V1, &binding_cmd, 508 &status); 509 if (ret) { 510 IWL_ERR(mvm, "Failed to add binding: %d\n", ret); 511 return ret; 512 } 513 514 if (status) { 515 IWL_ERR(mvm, "Binding command failed: %u\n", status); 516 return -EIO; 517 } 518 519 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); 520 if (ret) 521 return ret; 522 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); 523 524 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 525 if (ret) 526 return ret; 527 528 /* and some quota */ 529 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); 530 quota->id_and_color = 531 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 532 mvmvif->phy_ctxt->color)); 533 quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); 534 quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); 535 536 for (i = 1; i < MAX_BINDINGS; i++) { 537 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i); 538 quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID); 539 } 540 541 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, 542 iwl_mvm_quota_cmd_size(mvm), "a_cmd); 543 if (ret) 544 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 545 546 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) 547 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); 548 549 return 0; 550 } 551 552 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, 553 struct ieee80211_vif *vif) 554 { 555 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 556 struct iwl_nonqos_seq_query_cmd query_cmd = { 557 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), 558 .mac_id_n_color = 559 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 560 mvmvif->color)), 561 }; 562 struct iwl_host_cmd cmd = { 563 .id = NON_QOS_TX_COUNTER_CMD, 564 .flags = CMD_WANT_SKB, 565 }; 566 int err; 567 u32 size; 568 569 cmd.data[0] = &query_cmd; 570 cmd.len[0] = sizeof(query_cmd); 571 572 err = iwl_mvm_send_cmd(mvm, &cmd); 573 if (err) 574 return err; 575 576 size = iwl_rx_packet_payload_len(cmd.resp_pkt); 577 if (size < sizeof(__le16)) { 578 err = -EINVAL; 579 } else { 580 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); 581 /* firmware returns next, not last-used seqno */ 582 err = (u16) (err - 0x10); 583 } 584 585 iwl_free_resp(&cmd); 586 return err; 587 } 588 589 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 590 { 591 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 592 struct iwl_nonqos_seq_query_cmd query_cmd = { 593 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), 594 .mac_id_n_color = 595 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 596 mvmvif->color)), 597 .value = cpu_to_le16(mvmvif->seqno), 598 }; 599 600 /* return if called during restart, not resume from D3 */ 601 if (!mvmvif->seqno_valid) 602 return; 603 604 mvmvif->seqno_valid = false; 605 606 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, 607 sizeof(query_cmd), &query_cmd)) 608 IWL_ERR(mvm, "failed to set non-QoS seqno\n"); 609 } 610 611 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) 612 { 613 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 614 615 iwl_mvm_stop_device(mvm); 616 /* 617 * Set the HW restart bit -- this is mostly true as we're 618 * going to load new firmware and reprogram that, though 619 * the reprogramming is going to be manual to avoid adding 620 * all the MACs that aren't support. 621 * We don't have to clear up everything though because the 622 * reprogramming is manual. When we resume, we'll actually 623 * go through a proper restart sequence again to switch 624 * back to the runtime firmware image. 625 */ 626 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 627 628 /* the fw is reset, so all the keys are cleared */ 629 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 630 631 mvm->ptk_ivlen = 0; 632 mvm->ptk_icvlen = 0; 633 mvm->ptk_ivlen = 0; 634 mvm->ptk_icvlen = 0; 635 636 return iwl_mvm_load_d3_fw(mvm); 637 } 638 639 static int 640 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, 641 struct cfg80211_wowlan *wowlan, 642 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 643 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 644 struct ieee80211_sta *ap_sta) 645 { 646 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); 647 648 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ 649 650 wowlan_config_cmd->is_11n_connection = 651 ap_sta->ht_cap.ht_supported; 652 wowlan_config_cmd->flags = ENABLE_L3_FILTERING | 653 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; 654 655 if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 656 WOWLAN_CONFIGURATION, 0) < 6) { 657 /* Query the last used seqno and set it */ 658 int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); 659 660 if (ret < 0) 661 return ret; 662 663 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); 664 } 665 666 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); 667 668 if (wowlan->disconnect) 669 wowlan_config_cmd->wakeup_filter |= 670 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 671 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 672 if (wowlan->magic_pkt) 673 wowlan_config_cmd->wakeup_filter |= 674 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); 675 if (wowlan->gtk_rekey_failure) 676 wowlan_config_cmd->wakeup_filter |= 677 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); 678 if (wowlan->eap_identity_req) 679 wowlan_config_cmd->wakeup_filter |= 680 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); 681 if (wowlan->four_way_handshake) 682 wowlan_config_cmd->wakeup_filter |= 683 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); 684 if (wowlan->n_patterns) 685 wowlan_config_cmd->wakeup_filter |= 686 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); 687 688 if (wowlan->rfkill_release) 689 wowlan_config_cmd->wakeup_filter |= 690 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 691 692 if (wowlan->tcp) { 693 /* 694 * Set the "link change" (really "link lost") flag as well 695 * since that implies losing the TCP connection. 696 */ 697 wowlan_config_cmd->wakeup_filter |= 698 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | 699 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | 700 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | 701 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 702 } 703 704 if (wowlan->any) { 705 wowlan_config_cmd->wakeup_filter |= 706 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 707 IWL_WOWLAN_WAKEUP_LINK_CHANGE | 708 IWL_WOWLAN_WAKEUP_RX_FRAME | 709 IWL_WOWLAN_WAKEUP_BCN_FILTERING); 710 } 711 712 return 0; 713 } 714 715 static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, 716 struct ieee80211_vif *vif, 717 u32 cmd_flags) 718 { 719 struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; 720 struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd; 721 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 722 bool unified = fw_has_capa(&mvm->fw->ucode_capa, 723 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 724 struct wowlan_key_data key_data = { 725 .configure_keys = !unified, 726 .use_rsc_tsc = false, 727 .tkip = &tkip_cmd, 728 .use_tkip = false, 729 .kek_kck_cmd = _kek_kck_cmd, 730 }; 731 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 732 int ret; 733 u8 cmd_ver; 734 size_t cmd_size; 735 736 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); 737 if (!key_data.rsc_tsc) 738 return -ENOMEM; 739 740 /* 741 * if we have to configure keys, call ieee80211_iter_keys(), 742 * as we need non-atomic context in order to take the 743 * required locks. 744 */ 745 /* 746 * Note that currently we don't propagate cmd_flags 747 * to the iterator. In case of key_data.configure_keys, 748 * all the configured commands are SYNC, and 749 * iwl_mvm_wowlan_program_keys() will take care of 750 * locking/unlocking mvm->mutex. 751 */ 752 ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, 753 &key_data); 754 755 if (key_data.error) { 756 ret = -EIO; 757 goto out; 758 } 759 760 if (key_data.use_rsc_tsc) { 761 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 762 WOWLAN_TSC_RSC_PARAM, 763 IWL_FW_CMD_VER_UNKNOWN); 764 int size; 765 766 if (ver == 4) { 767 size = sizeof(*key_data.rsc_tsc); 768 key_data.rsc_tsc->sta_id = 769 cpu_to_le32(mvmvif->ap_sta_id); 770 771 } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { 772 size = sizeof(key_data.rsc_tsc->params); 773 } else { 774 ret = 0; 775 WARN_ON_ONCE(1); 776 goto out; 777 } 778 779 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, 780 cmd_flags, 781 size, 782 key_data.rsc_tsc); 783 784 if (ret) 785 goto out; 786 } 787 788 if (key_data.use_tkip && 789 !fw_has_api(&mvm->fw->ucode_capa, 790 IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { 791 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 792 WOWLAN_TKIP_PARAM, 793 IWL_FW_CMD_VER_UNKNOWN); 794 int size; 795 796 if (ver == 2) { 797 size = sizeof(tkip_cmd); 798 key_data.tkip->sta_id = 799 cpu_to_le32(mvmvif->ap_sta_id); 800 } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { 801 size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); 802 } else { 803 ret = -EINVAL; 804 WARN_ON_ONCE(1); 805 goto out; 806 } 807 808 /* send relevant data according to CMD version */ 809 ret = iwl_mvm_send_cmd_pdu(mvm, 810 WOWLAN_TKIP_PARAM, 811 cmd_flags, size, 812 &tkip_cmd); 813 if (ret) 814 goto out; 815 } 816 817 /* configure rekey data only if offloaded rekey is supported (d3) */ 818 if (mvmvif->rekey_data.valid) { 819 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 820 IWL_ALWAYS_LONG_GROUP, 821 WOWLAN_KEK_KCK_MATERIAL, 822 IWL_FW_CMD_VER_UNKNOWN); 823 if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 && 824 cmd_ver != IWL_FW_CMD_VER_UNKNOWN)) 825 return -EINVAL; 826 827 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, 828 mvmvif->rekey_data.kck_len); 829 kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len); 830 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, 831 mvmvif->rekey_data.kek_len); 832 kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len); 833 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; 834 kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm); 835 kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id); 836 837 if (cmd_ver == 4) { 838 cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4); 839 } else { 840 if (cmd_ver == 3) 841 cmd_size = 842 sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3); 843 else 844 cmd_size = 845 sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2); 846 /* skip the sta_id at the beginning */ 847 _kek_kck_cmd = (void *) 848 ((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id); 849 } 850 851 IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n", 852 mvmvif->rekey_data.akm); 853 854 ret = iwl_mvm_send_cmd_pdu(mvm, 855 WOWLAN_KEK_KCK_MATERIAL, cmd_flags, 856 cmd_size, 857 _kek_kck_cmd); 858 if (ret) 859 goto out; 860 } 861 ret = 0; 862 out: 863 kfree(key_data.rsc_tsc); 864 return ret; 865 } 866 867 static int 868 iwl_mvm_wowlan_config(struct iwl_mvm *mvm, 869 struct cfg80211_wowlan *wowlan, 870 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 871 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 872 struct ieee80211_sta *ap_sta) 873 { 874 int ret; 875 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 876 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 877 878 mvm->offload_tid = wowlan_config_cmd->offloading_tid; 879 880 if (!unified_image) { 881 ret = iwl_mvm_switch_to_d3(mvm); 882 if (ret) 883 return ret; 884 885 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); 886 if (ret) 887 return ret; 888 } 889 890 /* 891 * This needs to be unlocked due to lock ordering 892 * constraints. Since we're in the suspend path 893 * that isn't really a problem though. 894 */ 895 mutex_unlock(&mvm->mutex); 896 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); 897 mutex_lock(&mvm->mutex); 898 if (ret) 899 return ret; 900 901 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, 902 sizeof(*wowlan_config_cmd), 903 wowlan_config_cmd); 904 if (ret) 905 return ret; 906 907 if (fw_has_api(&mvm->fw->ucode_capa, 908 IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE)) 909 ret = iwl_mvm_send_patterns(mvm, vif, wowlan); 910 else 911 ret = iwl_mvm_send_patterns_v1(mvm, wowlan); 912 if (ret) 913 return ret; 914 915 return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); 916 } 917 918 static int 919 iwl_mvm_netdetect_config(struct iwl_mvm *mvm, 920 struct cfg80211_wowlan *wowlan, 921 struct cfg80211_sched_scan_request *nd_config, 922 struct ieee80211_vif *vif) 923 { 924 int ret; 925 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 926 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 927 928 if (!unified_image) { 929 ret = iwl_mvm_switch_to_d3(mvm); 930 if (ret) 931 return ret; 932 } else { 933 /* In theory, we wouldn't have to stop a running sched 934 * scan in order to start another one (for 935 * net-detect). But in practice this doesn't seem to 936 * work properly, so stop any running sched_scan now. 937 */ 938 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 939 if (ret) 940 return ret; 941 } 942 943 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, 944 IWL_MVM_SCAN_NETDETECT); 945 if (ret) 946 return ret; 947 948 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) 949 return -EBUSY; 950 951 /* save the sched scan matchsets... */ 952 if (nd_config->n_match_sets) { 953 mvm->nd_match_sets = kmemdup(nd_config->match_sets, 954 sizeof(*nd_config->match_sets) * 955 nd_config->n_match_sets, 956 GFP_KERNEL); 957 if (mvm->nd_match_sets) 958 mvm->n_nd_match_sets = nd_config->n_match_sets; 959 } 960 961 /* ...and the sched scan channels for later reporting */ 962 mvm->nd_channels = kmemdup(nd_config->channels, 963 sizeof(*nd_config->channels) * 964 nd_config->n_channels, 965 GFP_KERNEL); 966 if (mvm->nd_channels) 967 mvm->n_nd_channels = nd_config->n_channels; 968 969 return 0; 970 } 971 972 static void iwl_mvm_free_nd(struct iwl_mvm *mvm) 973 { 974 kfree(mvm->nd_match_sets); 975 mvm->nd_match_sets = NULL; 976 mvm->n_nd_match_sets = 0; 977 kfree(mvm->nd_channels); 978 mvm->nd_channels = NULL; 979 mvm->n_nd_channels = 0; 980 } 981 982 static int __iwl_mvm_suspend(struct ieee80211_hw *hw, 983 struct cfg80211_wowlan *wowlan, 984 bool test) 985 { 986 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 987 struct ieee80211_vif *vif = NULL; 988 struct iwl_mvm_vif *mvmvif = NULL; 989 struct ieee80211_sta *ap_sta = NULL; 990 struct iwl_d3_manager_config d3_cfg_cmd_data = { 991 /* 992 * Program the minimum sleep time to 10 seconds, as many 993 * platforms have issues processing a wakeup signal while 994 * still being in the process of suspending. 995 */ 996 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), 997 }; 998 struct iwl_host_cmd d3_cfg_cmd = { 999 .id = D3_CONFIG_CMD, 1000 .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, 1001 .data[0] = &d3_cfg_cmd_data, 1002 .len[0] = sizeof(d3_cfg_cmd_data), 1003 }; 1004 int ret; 1005 int len __maybe_unused; 1006 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1007 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1008 1009 if (!wowlan) { 1010 /* 1011 * mac80211 shouldn't get here, but for D3 test 1012 * it doesn't warrant a warning 1013 */ 1014 WARN_ON(!test); 1015 return -EINVAL; 1016 } 1017 1018 mutex_lock(&mvm->mutex); 1019 1020 set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 1021 1022 synchronize_net(); 1023 1024 vif = iwl_mvm_get_bss_vif(mvm); 1025 if (IS_ERR_OR_NULL(vif)) { 1026 ret = 1; 1027 goto out_noreset; 1028 } 1029 1030 mvmvif = iwl_mvm_vif_from_mac80211(vif); 1031 1032 if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { 1033 /* if we're not associated, this must be netdetect */ 1034 if (!wowlan->nd_config) { 1035 ret = 1; 1036 goto out_noreset; 1037 } 1038 1039 ret = iwl_mvm_netdetect_config( 1040 mvm, wowlan, wowlan->nd_config, vif); 1041 if (ret) 1042 goto out; 1043 1044 mvm->net_detect = true; 1045 } else { 1046 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 1047 1048 wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; 1049 1050 ap_sta = rcu_dereference_protected( 1051 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], 1052 lockdep_is_held(&mvm->mutex)); 1053 if (IS_ERR_OR_NULL(ap_sta)) { 1054 ret = -EINVAL; 1055 goto out_noreset; 1056 } 1057 1058 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1059 vif, mvmvif, ap_sta); 1060 if (ret) 1061 goto out_noreset; 1062 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1063 vif, mvmvif, ap_sta); 1064 if (ret) 1065 goto out; 1066 1067 mvm->net_detect = false; 1068 } 1069 1070 ret = iwl_mvm_power_update_device(mvm); 1071 if (ret) 1072 goto out; 1073 1074 ret = iwl_mvm_power_update_mac(mvm); 1075 if (ret) 1076 goto out; 1077 1078 #ifdef CONFIG_IWLWIFI_DEBUGFS 1079 if (mvm->d3_wake_sysassert) 1080 d3_cfg_cmd_data.wakeup_flags |= 1081 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); 1082 #endif 1083 1084 /* 1085 * Prior to 9000 device family the driver needs to stop the dbg 1086 * recording before entering D3. In later devices the FW stops the 1087 * recording automatically. 1088 */ 1089 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) 1090 iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); 1091 1092 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 1093 1094 /* must be last -- this switches firmware state */ 1095 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); 1096 if (ret) 1097 goto out; 1098 #ifdef CONFIG_IWLWIFI_DEBUGFS 1099 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); 1100 if (len >= sizeof(u32)) { 1101 mvm->d3_test_pme_ptr = 1102 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); 1103 } 1104 #endif 1105 iwl_free_resp(&d3_cfg_cmd); 1106 1107 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1108 1109 ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image); 1110 out: 1111 if (ret < 0) { 1112 iwl_mvm_free_nd(mvm); 1113 1114 if (!unified_image) { 1115 if (mvm->fw_restart > 0) { 1116 mvm->fw_restart--; 1117 ieee80211_restart_hw(mvm->hw); 1118 } 1119 } 1120 1121 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 1122 } 1123 out_noreset: 1124 mutex_unlock(&mvm->mutex); 1125 1126 return ret; 1127 } 1128 1129 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 1130 { 1131 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1132 1133 iwl_mvm_pause_tcm(mvm, true); 1134 1135 iwl_fw_runtime_suspend(&mvm->fwrt); 1136 1137 return __iwl_mvm_suspend(hw, wowlan, false); 1138 } 1139 1140 /* converted data from the different status responses */ 1141 struct iwl_wowlan_status_data { 1142 u16 pattern_number; 1143 u16 qos_seq_ctr[8]; 1144 u32 wakeup_reasons; 1145 u32 wake_packet_length; 1146 u32 wake_packet_bufsize; 1147 const u8 *wake_packet; 1148 }; 1149 1150 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, 1151 struct ieee80211_vif *vif, 1152 struct iwl_wowlan_status_data *status) 1153 { 1154 struct sk_buff *pkt = NULL; 1155 struct cfg80211_wowlan_wakeup wakeup = { 1156 .pattern_idx = -1, 1157 }; 1158 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1159 u32 reasons = status->wakeup_reasons; 1160 1161 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { 1162 wakeup_report = NULL; 1163 goto report; 1164 } 1165 1166 pm_wakeup_event(mvm->dev, 0); 1167 1168 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) 1169 wakeup.magic_pkt = true; 1170 1171 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) 1172 wakeup.pattern_idx = 1173 status->pattern_number; 1174 1175 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1176 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) 1177 wakeup.disconnect = true; 1178 1179 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) 1180 wakeup.gtk_rekey_failure = true; 1181 1182 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1183 wakeup.rfkill_release = true; 1184 1185 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) 1186 wakeup.eap_identity_req = true; 1187 1188 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) 1189 wakeup.four_way_handshake = true; 1190 1191 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) 1192 wakeup.tcp_connlost = true; 1193 1194 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) 1195 wakeup.tcp_nomoretokens = true; 1196 1197 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) 1198 wakeup.tcp_match = true; 1199 1200 if (status->wake_packet_bufsize) { 1201 int pktsize = status->wake_packet_bufsize; 1202 int pktlen = status->wake_packet_length; 1203 const u8 *pktdata = status->wake_packet; 1204 struct ieee80211_hdr *hdr = (void *)pktdata; 1205 int truncated = pktlen - pktsize; 1206 1207 /* this would be a firmware bug */ 1208 if (WARN_ON_ONCE(truncated < 0)) 1209 truncated = 0; 1210 1211 if (ieee80211_is_data(hdr->frame_control)) { 1212 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 1213 int ivlen = 0, icvlen = 4; /* also FCS */ 1214 1215 pkt = alloc_skb(pktsize, GFP_KERNEL); 1216 if (!pkt) 1217 goto report; 1218 1219 skb_put_data(pkt, pktdata, hdrlen); 1220 pktdata += hdrlen; 1221 pktsize -= hdrlen; 1222 1223 if (ieee80211_has_protected(hdr->frame_control)) { 1224 /* 1225 * This is unlocked and using gtk_i(c)vlen, 1226 * but since everything is under RTNL still 1227 * that's not really a problem - changing 1228 * it would be difficult. 1229 */ 1230 if (is_multicast_ether_addr(hdr->addr1)) { 1231 ivlen = mvm->gtk_ivlen; 1232 icvlen += mvm->gtk_icvlen; 1233 } else { 1234 ivlen = mvm->ptk_ivlen; 1235 icvlen += mvm->ptk_icvlen; 1236 } 1237 } 1238 1239 /* if truncated, FCS/ICV is (partially) gone */ 1240 if (truncated >= icvlen) { 1241 icvlen = 0; 1242 truncated -= icvlen; 1243 } else { 1244 icvlen -= truncated; 1245 truncated = 0; 1246 } 1247 1248 pktsize -= ivlen + icvlen; 1249 pktdata += ivlen; 1250 1251 skb_put_data(pkt, pktdata, pktsize); 1252 1253 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) 1254 goto report; 1255 wakeup.packet = pkt->data; 1256 wakeup.packet_present_len = pkt->len; 1257 wakeup.packet_len = pkt->len - truncated; 1258 wakeup.packet_80211 = false; 1259 } else { 1260 int fcslen = 4; 1261 1262 if (truncated >= 4) { 1263 truncated -= 4; 1264 fcslen = 0; 1265 } else { 1266 fcslen -= truncated; 1267 truncated = 0; 1268 } 1269 pktsize -= fcslen; 1270 wakeup.packet = status->wake_packet; 1271 wakeup.packet_present_len = pktsize; 1272 wakeup.packet_len = pktlen - truncated; 1273 wakeup.packet_80211 = true; 1274 } 1275 } 1276 1277 report: 1278 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1279 kfree_skb(pkt); 1280 } 1281 1282 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, 1283 struct ieee80211_key_seq *seq) 1284 { 1285 u64 pn; 1286 1287 pn = le64_to_cpu(sc->pn); 1288 seq->ccmp.pn[0] = pn >> 40; 1289 seq->ccmp.pn[1] = pn >> 32; 1290 seq->ccmp.pn[2] = pn >> 24; 1291 seq->ccmp.pn[3] = pn >> 16; 1292 seq->ccmp.pn[4] = pn >> 8; 1293 seq->ccmp.pn[5] = pn; 1294 } 1295 1296 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, 1297 struct ieee80211_key_seq *seq) 1298 { 1299 seq->tkip.iv32 = le32_to_cpu(sc->iv32); 1300 seq->tkip.iv16 = le16_to_cpu(sc->iv16); 1301 } 1302 1303 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs, 1304 struct ieee80211_sta *sta, 1305 struct ieee80211_key_conf *key) 1306 { 1307 int tid; 1308 1309 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1310 1311 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 1312 struct iwl_mvm_sta *mvmsta; 1313 struct iwl_mvm_key_pn *ptk_pn; 1314 1315 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1316 1317 rcu_read_lock(); 1318 ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); 1319 if (WARN_ON(!ptk_pn)) { 1320 rcu_read_unlock(); 1321 return; 1322 } 1323 1324 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 1325 struct ieee80211_key_seq seq = {}; 1326 int i; 1327 1328 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1329 ieee80211_set_key_rx_seq(key, tid, &seq); 1330 for (i = 1; i < mvm->trans->num_rx_queues; i++) 1331 memcpy(ptk_pn->q[i].pn[tid], 1332 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); 1333 } 1334 rcu_read_unlock(); 1335 } else { 1336 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1337 struct ieee80211_key_seq seq = {}; 1338 1339 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1340 ieee80211_set_key_rx_seq(key, tid, &seq); 1341 } 1342 } 1343 } 1344 1345 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, 1346 struct ieee80211_key_conf *key) 1347 { 1348 int tid; 1349 1350 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1351 1352 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1353 struct ieee80211_key_seq seq = {}; 1354 1355 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); 1356 ieee80211_set_key_rx_seq(key, tid, &seq); 1357 } 1358 } 1359 1360 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, 1361 struct ieee80211_key_conf *key, 1362 struct iwl_wowlan_status *status) 1363 { 1364 union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc; 1365 1366 switch (key->cipher) { 1367 case WLAN_CIPHER_SUITE_CCMP: 1368 case WLAN_CIPHER_SUITE_GCMP: 1369 case WLAN_CIPHER_SUITE_GCMP_256: 1370 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); 1371 break; 1372 case WLAN_CIPHER_SUITE_TKIP: 1373 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); 1374 break; 1375 default: 1376 WARN_ON(1); 1377 } 1378 } 1379 1380 struct iwl_mvm_d3_gtk_iter_data { 1381 struct iwl_mvm *mvm; 1382 struct iwl_wowlan_status *status; 1383 void *last_gtk; 1384 u32 cipher; 1385 bool find_phase, unhandled_cipher; 1386 int num_keys; 1387 }; 1388 1389 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, 1390 struct ieee80211_vif *vif, 1391 struct ieee80211_sta *sta, 1392 struct ieee80211_key_conf *key, 1393 void *_data) 1394 { 1395 struct iwl_mvm_d3_gtk_iter_data *data = _data; 1396 1397 if (data->unhandled_cipher) 1398 return; 1399 1400 switch (key->cipher) { 1401 case WLAN_CIPHER_SUITE_WEP40: 1402 case WLAN_CIPHER_SUITE_WEP104: 1403 /* ignore WEP completely, nothing to do */ 1404 return; 1405 case WLAN_CIPHER_SUITE_CCMP: 1406 case WLAN_CIPHER_SUITE_GCMP: 1407 case WLAN_CIPHER_SUITE_GCMP_256: 1408 case WLAN_CIPHER_SUITE_TKIP: 1409 /* we support these */ 1410 break; 1411 default: 1412 /* everything else (even CMAC for MFP) - disconnect from AP */ 1413 data->unhandled_cipher = true; 1414 return; 1415 } 1416 1417 data->num_keys++; 1418 1419 /* 1420 * pairwise key - update sequence counters only; 1421 * note that this assumes no TDLS sessions are active 1422 */ 1423 if (sta) { 1424 struct ieee80211_key_seq seq = {}; 1425 union iwl_all_tsc_rsc *sc = 1426 &data->status->gtk[0].rsc.all_tsc_rsc; 1427 1428 if (data->find_phase) 1429 return; 1430 1431 switch (key->cipher) { 1432 case WLAN_CIPHER_SUITE_CCMP: 1433 case WLAN_CIPHER_SUITE_GCMP: 1434 case WLAN_CIPHER_SUITE_GCMP_256: 1435 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, 1436 sta, key); 1437 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); 1438 break; 1439 case WLAN_CIPHER_SUITE_TKIP: 1440 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); 1441 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); 1442 atomic64_set(&key->tx_pn, 1443 (u64)seq.tkip.iv16 | 1444 ((u64)seq.tkip.iv32 << 16)); 1445 break; 1446 } 1447 1448 /* that's it for this key */ 1449 return; 1450 } 1451 1452 if (data->find_phase) { 1453 data->last_gtk = key; 1454 data->cipher = key->cipher; 1455 return; 1456 } 1457 1458 if (data->status->num_of_gtk_rekeys) 1459 ieee80211_remove_key(key); 1460 else if (data->last_gtk == key) 1461 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status); 1462 } 1463 1464 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 1465 struct ieee80211_vif *vif, 1466 struct iwl_wowlan_status *status) 1467 { 1468 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1469 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1470 .mvm = mvm, 1471 .status = status, 1472 }; 1473 u32 disconnection_reasons = 1474 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1475 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; 1476 1477 if (!status || !vif->bss_conf.bssid) 1478 return false; 1479 1480 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) 1481 return false; 1482 1483 /* find last GTK that we used initially, if any */ 1484 gtkdata.find_phase = true; 1485 ieee80211_iter_keys(mvm->hw, vif, 1486 iwl_mvm_d3_update_keys, >kdata); 1487 /* not trying to keep connections with MFP/unhandled ciphers */ 1488 if (gtkdata.unhandled_cipher) 1489 return false; 1490 if (!gtkdata.num_keys) 1491 goto out; 1492 if (!gtkdata.last_gtk) 1493 return false; 1494 1495 /* 1496 * invalidate all other GTKs that might still exist and update 1497 * the one that we used 1498 */ 1499 gtkdata.find_phase = false; 1500 ieee80211_iter_keys(mvm->hw, vif, 1501 iwl_mvm_d3_update_keys, >kdata); 1502 1503 IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", 1504 le32_to_cpu(status->num_of_gtk_rekeys)); 1505 if (status->num_of_gtk_rekeys) { 1506 struct ieee80211_key_conf *key; 1507 struct { 1508 struct ieee80211_key_conf conf; 1509 u8 key[32]; 1510 } conf = { 1511 .conf.cipher = gtkdata.cipher, 1512 .conf.keyidx = 1513 iwlmvm_wowlan_gtk_idx(&status->gtk[0]), 1514 }; 1515 __be64 replay_ctr; 1516 1517 IWL_DEBUG_WOWLAN(mvm, 1518 "Received from FW GTK cipher %d, key index %d\n", 1519 conf.conf.cipher, conf.conf.keyidx); 1520 switch (gtkdata.cipher) { 1521 case WLAN_CIPHER_SUITE_CCMP: 1522 case WLAN_CIPHER_SUITE_GCMP: 1523 BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); 1524 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); 1525 conf.conf.keylen = WLAN_KEY_LEN_CCMP; 1526 memcpy(conf.conf.key, status->gtk[0].key, 1527 WLAN_KEY_LEN_CCMP); 1528 break; 1529 case WLAN_CIPHER_SUITE_GCMP_256: 1530 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); 1531 conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; 1532 memcpy(conf.conf.key, status->gtk[0].key, 1533 WLAN_KEY_LEN_GCMP_256); 1534 break; 1535 case WLAN_CIPHER_SUITE_TKIP: 1536 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); 1537 conf.conf.keylen = WLAN_KEY_LEN_TKIP; 1538 memcpy(conf.conf.key, status->gtk[0].key, 16); 1539 /* leave TX MIC key zeroed, we don't use it anyway */ 1540 memcpy(conf.conf.key + 1541 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, 1542 status->gtk[0].tkip_mic_key, 8); 1543 break; 1544 } 1545 1546 key = ieee80211_gtk_rekey_add(vif, &conf.conf); 1547 if (IS_ERR(key)) 1548 return false; 1549 iwl_mvm_set_key_rx_seq(mvm, key, status); 1550 1551 replay_ctr = 1552 cpu_to_be64(le64_to_cpu(status->replay_ctr)); 1553 1554 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, 1555 (void *)&replay_ctr, GFP_KERNEL); 1556 } 1557 1558 out: 1559 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, 1560 WOWLAN_GET_STATUSES, 0) < 10) { 1561 mvmvif->seqno_valid = true; 1562 /* +0x10 because the set API expects next-to-use, not last-used */ 1563 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; 1564 } 1565 1566 return true; 1567 } 1568 1569 /* Occasionally, templates would be nice. This is one of those times ... */ 1570 #define iwl_mvm_parse_wowlan_status_common(_ver) \ 1571 static struct iwl_wowlan_status * \ 1572 iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ 1573 void *_data, int len) \ 1574 { \ 1575 struct iwl_wowlan_status *status; \ 1576 struct iwl_wowlan_status_ ##_ver *data = _data; \ 1577 int data_size; \ 1578 \ 1579 if (len < sizeof(*data)) { \ 1580 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ 1581 return ERR_PTR(-EIO); \ 1582 } \ 1583 \ 1584 data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \ 1585 if (len != sizeof(*data) + data_size) { \ 1586 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ 1587 return ERR_PTR(-EIO); \ 1588 } \ 1589 \ 1590 status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \ 1591 if (!status) \ 1592 return ERR_PTR(-ENOMEM); \ 1593 \ 1594 /* copy all the common fields */ \ 1595 status->replay_ctr = data->replay_ctr; \ 1596 status->pattern_number = data->pattern_number; \ 1597 status->non_qos_seq_ctr = data->non_qos_seq_ctr; \ 1598 memcpy(status->qos_seq_ctr, data->qos_seq_ctr, \ 1599 sizeof(status->qos_seq_ctr)); \ 1600 status->wakeup_reasons = data->wakeup_reasons; \ 1601 status->num_of_gtk_rekeys = data->num_of_gtk_rekeys; \ 1602 status->received_beacons = data->received_beacons; \ 1603 status->wake_packet_length = data->wake_packet_length; \ 1604 status->wake_packet_bufsize = data->wake_packet_bufsize; \ 1605 memcpy(status->wake_packet, data->wake_packet, \ 1606 le32_to_cpu(status->wake_packet_bufsize)); \ 1607 \ 1608 return status; \ 1609 } 1610 1611 iwl_mvm_parse_wowlan_status_common(v6) 1612 iwl_mvm_parse_wowlan_status_common(v7) 1613 iwl_mvm_parse_wowlan_status_common(v9) 1614 1615 static struct iwl_wowlan_status * 1616 iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) 1617 { 1618 struct iwl_wowlan_status *status; 1619 struct iwl_wowlan_get_status_cmd get_status_cmd = { 1620 .sta_id = cpu_to_le32(sta_id), 1621 }; 1622 struct iwl_host_cmd cmd = { 1623 .id = WOWLAN_GET_STATUSES, 1624 .flags = CMD_WANT_SKB, 1625 .data = { &get_status_cmd, }, 1626 .len = { sizeof(get_status_cmd), }, 1627 }; 1628 int ret, len; 1629 u8 notif_ver; 1630 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 1631 WOWLAN_GET_STATUSES, 1632 IWL_FW_CMD_VER_UNKNOWN); 1633 1634 if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) 1635 cmd.len[0] = 0; 1636 1637 lockdep_assert_held(&mvm->mutex); 1638 1639 ret = iwl_mvm_send_cmd(mvm, &cmd); 1640 if (ret) { 1641 IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret); 1642 return ERR_PTR(ret); 1643 } 1644 1645 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1646 1647 /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ 1648 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, 1649 WOWLAN_GET_STATUSES, 0); 1650 if (!notif_ver) 1651 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, 1652 WOWLAN_GET_STATUSES, 7); 1653 1654 if (!fw_has_api(&mvm->fw->ucode_capa, 1655 IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { 1656 struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; 1657 1658 status = iwl_mvm_parse_wowlan_status_common_v6(mvm, 1659 cmd.resp_pkt->data, 1660 len); 1661 if (IS_ERR(status)) 1662 goto out_free_resp; 1663 1664 BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > 1665 sizeof(status->gtk[0].key)); 1666 BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) > 1667 sizeof(status->gtk[0].tkip_mic_key)); 1668 1669 /* copy GTK info to the right place */ 1670 memcpy(status->gtk[0].key, v6->gtk.decrypt_key, 1671 sizeof(v6->gtk.decrypt_key)); 1672 memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key, 1673 sizeof(v6->gtk.tkip_mic_key)); 1674 memcpy(&status->gtk[0].rsc, &v6->gtk.rsc, 1675 sizeof(status->gtk[0].rsc)); 1676 1677 /* hardcode the key length to 16 since v6 only supports 16 */ 1678 status->gtk[0].key_len = 16; 1679 1680 /* 1681 * The key index only uses 2 bits (values 0 to 3) and 1682 * we always set bit 7 which means this is the 1683 * currently used key. 1684 */ 1685 status->gtk[0].key_flags = v6->gtk.key_index | BIT(7); 1686 } else if (notif_ver == 7) { 1687 struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; 1688 1689 status = iwl_mvm_parse_wowlan_status_common_v7(mvm, 1690 cmd.resp_pkt->data, 1691 len); 1692 if (IS_ERR(status)) 1693 goto out_free_resp; 1694 1695 status->gtk[0] = v7->gtk[0]; 1696 status->igtk[0] = v7->igtk[0]; 1697 } else if (notif_ver == 9 || notif_ver == 10) { 1698 struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; 1699 1700 status = iwl_mvm_parse_wowlan_status_common_v9(mvm, 1701 cmd.resp_pkt->data, 1702 len); 1703 if (IS_ERR(status)) 1704 goto out_free_resp; 1705 1706 status->gtk[0] = v9->gtk[0]; 1707 status->igtk[0] = v9->igtk[0]; 1708 1709 status->tid_tear_down = v9->tid_tear_down; 1710 } else { 1711 IWL_ERR(mvm, 1712 "Firmware advertises unknown WoWLAN status response %d!\n", 1713 notif_ver); 1714 status = ERR_PTR(-EIO); 1715 } 1716 1717 out_free_resp: 1718 iwl_free_resp(&cmd); 1719 return status; 1720 } 1721 1722 static struct iwl_wowlan_status * 1723 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id) 1724 { 1725 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 1726 OFFLOADS_QUERY_CMD, 1727 IWL_FW_CMD_VER_UNKNOWN); 1728 __le32 station_id = cpu_to_le32(sta_id); 1729 u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0; 1730 1731 if (!mvm->net_detect) { 1732 /* only for tracing for now */ 1733 int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 1734 cmd_size, &station_id); 1735 if (ret) 1736 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); 1737 } 1738 1739 return iwl_mvm_send_wowlan_get_status(mvm, sta_id); 1740 } 1741 1742 /* releases the MVM mutex */ 1743 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, 1744 struct ieee80211_vif *vif) 1745 { 1746 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1747 struct iwl_wowlan_status_data status; 1748 struct iwl_wowlan_status *fw_status; 1749 int i; 1750 bool keep; 1751 struct iwl_mvm_sta *mvm_ap_sta; 1752 1753 fw_status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id); 1754 if (IS_ERR_OR_NULL(fw_status)) 1755 goto out_unlock; 1756 1757 IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", 1758 le32_to_cpu(fw_status->wakeup_reasons)); 1759 1760 status.pattern_number = le16_to_cpu(fw_status->pattern_number); 1761 for (i = 0; i < 8; i++) 1762 status.qos_seq_ctr[i] = 1763 le16_to_cpu(fw_status->qos_seq_ctr[i]); 1764 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); 1765 status.wake_packet_length = 1766 le32_to_cpu(fw_status->wake_packet_length); 1767 status.wake_packet_bufsize = 1768 le32_to_cpu(fw_status->wake_packet_bufsize); 1769 status.wake_packet = fw_status->wake_packet; 1770 1771 /* still at hard-coded place 0 for D3 image */ 1772 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); 1773 if (!mvm_ap_sta) 1774 goto out_free; 1775 1776 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1777 u16 seq = status.qos_seq_ctr[i]; 1778 /* firmware stores last-used value, we store next value */ 1779 seq += 0x10; 1780 mvm_ap_sta->tid_data[i].seq_number = seq; 1781 } 1782 1783 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { 1784 i = mvm->offload_tid; 1785 iwl_trans_set_q_ptrs(mvm->trans, 1786 mvm_ap_sta->tid_data[i].txq_id, 1787 mvm_ap_sta->tid_data[i].seq_number >> 4); 1788 } 1789 1790 /* now we have all the data we need, unlock to avoid mac80211 issues */ 1791 mutex_unlock(&mvm->mutex); 1792 1793 iwl_mvm_report_wakeup_reasons(mvm, vif, &status); 1794 1795 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); 1796 1797 kfree(fw_status); 1798 return keep; 1799 1800 out_free: 1801 kfree(fw_status); 1802 out_unlock: 1803 mutex_unlock(&mvm->mutex); 1804 return false; 1805 } 1806 1807 #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ 1808 IWL_SCAN_MAX_PROFILES) 1809 1810 struct iwl_mvm_nd_query_results { 1811 u32 matched_profiles; 1812 u8 matches[ND_QUERY_BUF_LEN]; 1813 }; 1814 1815 static int 1816 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, 1817 struct iwl_mvm_nd_query_results *results) 1818 { 1819 struct iwl_scan_offload_profiles_query *query; 1820 struct iwl_host_cmd cmd = { 1821 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, 1822 .flags = CMD_WANT_SKB, 1823 }; 1824 int ret, len; 1825 size_t query_len, matches_len; 1826 int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw); 1827 1828 ret = iwl_mvm_send_cmd(mvm, &cmd); 1829 if (ret) { 1830 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); 1831 return ret; 1832 } 1833 1834 if (fw_has_api(&mvm->fw->ucode_capa, 1835 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1836 query_len = sizeof(struct iwl_scan_offload_profiles_query); 1837 matches_len = sizeof(struct iwl_scan_offload_profile_match) * 1838 max_profiles; 1839 } else { 1840 query_len = sizeof(struct iwl_scan_offload_profiles_query_v1); 1841 matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) * 1842 max_profiles; 1843 } 1844 1845 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1846 if (len < query_len) { 1847 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); 1848 ret = -EIO; 1849 goto out_free_resp; 1850 } 1851 1852 query = (void *)cmd.resp_pkt->data; 1853 1854 results->matched_profiles = le32_to_cpu(query->matched_profiles); 1855 memcpy(results->matches, query->matches, matches_len); 1856 1857 #ifdef CONFIG_IWLWIFI_DEBUGFS 1858 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); 1859 #endif 1860 1861 out_free_resp: 1862 iwl_free_resp(&cmd); 1863 return ret; 1864 } 1865 1866 static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, 1867 struct iwl_mvm_nd_query_results *query, 1868 int idx) 1869 { 1870 int n_chans = 0, i; 1871 1872 if (fw_has_api(&mvm->fw->ucode_capa, 1873 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1874 struct iwl_scan_offload_profile_match *matches = 1875 (struct iwl_scan_offload_profile_match *)query->matches; 1876 1877 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) 1878 n_chans += hweight8(matches[idx].matching_channels[i]); 1879 } else { 1880 struct iwl_scan_offload_profile_match_v1 *matches = 1881 (struct iwl_scan_offload_profile_match_v1 *)query->matches; 1882 1883 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) 1884 n_chans += hweight8(matches[idx].matching_channels[i]); 1885 } 1886 1887 return n_chans; 1888 } 1889 1890 static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, 1891 struct iwl_mvm_nd_query_results *query, 1892 struct cfg80211_wowlan_nd_match *match, 1893 int idx) 1894 { 1895 int i; 1896 1897 if (fw_has_api(&mvm->fw->ucode_capa, 1898 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1899 struct iwl_scan_offload_profile_match *matches = 1900 (struct iwl_scan_offload_profile_match *)query->matches; 1901 1902 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) 1903 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) 1904 match->channels[match->n_channels++] = 1905 mvm->nd_channels[i]->center_freq; 1906 } else { 1907 struct iwl_scan_offload_profile_match_v1 *matches = 1908 (struct iwl_scan_offload_profile_match_v1 *)query->matches; 1909 1910 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) 1911 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) 1912 match->channels[match->n_channels++] = 1913 mvm->nd_channels[i]->center_freq; 1914 } 1915 } 1916 1917 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, 1918 struct ieee80211_vif *vif) 1919 { 1920 struct cfg80211_wowlan_nd_info *net_detect = NULL; 1921 struct cfg80211_wowlan_wakeup wakeup = { 1922 .pattern_idx = -1, 1923 }; 1924 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1925 struct iwl_mvm_nd_query_results query; 1926 struct iwl_wowlan_status *fw_status; 1927 unsigned long matched_profiles; 1928 u32 reasons = 0; 1929 int i, n_matches, ret; 1930 1931 fw_status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA); 1932 if (!IS_ERR_OR_NULL(fw_status)) { 1933 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1934 kfree(fw_status); 1935 } 1936 1937 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1938 wakeup.rfkill_release = true; 1939 1940 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) 1941 goto out; 1942 1943 ret = iwl_mvm_netdetect_query_results(mvm, &query); 1944 if (ret || !query.matched_profiles) { 1945 wakeup_report = NULL; 1946 goto out; 1947 } 1948 1949 matched_profiles = query.matched_profiles; 1950 if (mvm->n_nd_match_sets) { 1951 n_matches = hweight_long(matched_profiles); 1952 } else { 1953 IWL_ERR(mvm, "no net detect match information available\n"); 1954 n_matches = 0; 1955 } 1956 1957 net_detect = kzalloc(struct_size(net_detect, matches, n_matches), 1958 GFP_KERNEL); 1959 if (!net_detect || !n_matches) 1960 goto out_report_nd; 1961 1962 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { 1963 struct cfg80211_wowlan_nd_match *match; 1964 int idx, n_channels = 0; 1965 1966 n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); 1967 1968 match = kzalloc(struct_size(match, channels, n_channels), 1969 GFP_KERNEL); 1970 if (!match) 1971 goto out_report_nd; 1972 1973 net_detect->matches[net_detect->n_matches++] = match; 1974 1975 /* We inverted the order of the SSIDs in the scan 1976 * request, so invert the index here. 1977 */ 1978 idx = mvm->n_nd_match_sets - i - 1; 1979 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; 1980 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, 1981 match->ssid.ssid_len); 1982 1983 if (mvm->n_nd_channels < n_channels) 1984 continue; 1985 1986 iwl_mvm_query_set_freqs(mvm, &query, match, i); 1987 } 1988 1989 out_report_nd: 1990 wakeup.net_detect = net_detect; 1991 out: 1992 iwl_mvm_free_nd(mvm); 1993 1994 mutex_unlock(&mvm->mutex); 1995 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1996 1997 if (net_detect) { 1998 for (i = 0; i < net_detect->n_matches; i++) 1999 kfree(net_detect->matches[i]); 2000 kfree(net_detect); 2001 } 2002 } 2003 2004 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, 2005 struct ieee80211_vif *vif) 2006 { 2007 /* skip the one we keep connection on */ 2008 if (data == vif) 2009 return; 2010 2011 if (vif->type == NL80211_IFTYPE_STATION) 2012 ieee80211_resume_disconnect(vif); 2013 } 2014 2015 static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) 2016 { 2017 struct error_table_start { 2018 /* cf. struct iwl_error_event_table */ 2019 u32 valid; 2020 __le32 err_id; 2021 } err_info; 2022 2023 if (!base) 2024 return false; 2025 2026 iwl_trans_read_mem_bytes(trans, base, 2027 &err_info, sizeof(err_info)); 2028 if (err_info.valid && err_id) 2029 *err_id = le32_to_cpu(err_info.err_id); 2030 2031 return !!err_info.valid; 2032 } 2033 2034 static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 2035 struct ieee80211_vif *vif) 2036 { 2037 u32 err_id; 2038 2039 /* check for lmac1 error */ 2040 if (iwl_mvm_rt_status(mvm->trans, 2041 mvm->trans->dbg.lmac_error_event_table[0], 2042 &err_id)) { 2043 if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 2044 struct cfg80211_wowlan_wakeup wakeup = { 2045 .rfkill_release = true, 2046 }; 2047 ieee80211_report_wowlan_wakeup(vif, &wakeup, 2048 GFP_KERNEL); 2049 } 2050 return true; 2051 } 2052 2053 /* check if we have lmac2 set and check for error */ 2054 if (iwl_mvm_rt_status(mvm->trans, 2055 mvm->trans->dbg.lmac_error_event_table[1], NULL)) 2056 return true; 2057 2058 /* check for umac error */ 2059 if (iwl_mvm_rt_status(mvm->trans, 2060 mvm->trans->dbg.umac_error_event_table, NULL)) 2061 return true; 2062 2063 return false; 2064 } 2065 2066 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 2067 { 2068 struct ieee80211_vif *vif = NULL; 2069 int ret = 1; 2070 enum iwl_d3_status d3_status; 2071 bool keep = false; 2072 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2073 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2074 bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, 2075 IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); 2076 2077 mutex_lock(&mvm->mutex); 2078 2079 mvm->last_reset_or_resume_time_jiffies = jiffies; 2080 2081 /* get the BSS vif pointer again */ 2082 vif = iwl_mvm_get_bss_vif(mvm); 2083 if (IS_ERR_OR_NULL(vif)) 2084 goto err; 2085 2086 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 2087 2088 if (iwl_mvm_check_rt_status(mvm, vif)) { 2089 set_bit(STATUS_FW_ERROR, &mvm->trans->status); 2090 iwl_mvm_dump_nic_error_log(mvm); 2091 iwl_dbg_tlv_time_point(&mvm->fwrt, 2092 IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); 2093 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, 2094 false, 0); 2095 ret = 1; 2096 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2097 goto err; 2098 } 2099 2100 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); 2101 if (ret) 2102 goto err; 2103 2104 if (d3_status != IWL_D3_STATUS_ALIVE) { 2105 IWL_INFO(mvm, "Device was reset during suspend\n"); 2106 goto err; 2107 } 2108 2109 if (d0i3_first) { 2110 struct iwl_host_cmd cmd = { 2111 .id = D0I3_END_CMD, 2112 .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, 2113 }; 2114 int len; 2115 2116 ret = iwl_mvm_send_cmd(mvm, &cmd); 2117 if (ret < 0) { 2118 IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", 2119 ret); 2120 goto err; 2121 } 2122 switch (mvm->cmd_ver.d0i3_resp) { 2123 case 0: 2124 break; 2125 case 1: 2126 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 2127 if (len != sizeof(u32)) { 2128 IWL_ERR(mvm, 2129 "Error with D0I3_END_CMD response size (%d)\n", 2130 len); 2131 goto err; 2132 } 2133 if (IWL_D0I3_RESET_REQUIRE & 2134 le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) { 2135 iwl_write32(mvm->trans, CSR_RESET, 2136 CSR_RESET_REG_FLAG_FORCE_NMI); 2137 iwl_free_resp(&cmd); 2138 } 2139 break; 2140 default: 2141 WARN_ON(1); 2142 } 2143 } 2144 2145 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2146 2147 /* 2148 * Query the current location and source from the D3 firmware so we 2149 * can play it back when we re-intiailize the D0 firmware 2150 */ 2151 iwl_mvm_update_changed_regdom(mvm); 2152 2153 /* Re-configure PPAG settings */ 2154 iwl_mvm_ppag_send_cmd(mvm); 2155 2156 if (!unified_image) 2157 /* Re-configure default SAR profile */ 2158 iwl_mvm_sar_select_profile(mvm, 1, 1); 2159 2160 if (mvm->net_detect) { 2161 /* If this is a non-unified image, we restart the FW, 2162 * so no need to stop the netdetect scan. If that 2163 * fails, continue and try to get the wake-up reasons, 2164 * but trigger a HW restart by keeping a failure code 2165 * in ret. 2166 */ 2167 if (unified_image) 2168 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, 2169 false); 2170 2171 iwl_mvm_query_netdetect_reasons(mvm, vif); 2172 /* has unlocked the mutex, so skip that */ 2173 goto out; 2174 } else { 2175 keep = iwl_mvm_query_wakeup_reasons(mvm, vif); 2176 #ifdef CONFIG_IWLWIFI_DEBUGFS 2177 if (keep) 2178 mvm->keep_vif = vif; 2179 #endif 2180 /* has unlocked the mutex, so skip that */ 2181 goto out_iterate; 2182 } 2183 2184 err: 2185 iwl_mvm_free_nd(mvm); 2186 mutex_unlock(&mvm->mutex); 2187 2188 out_iterate: 2189 if (!test) 2190 ieee80211_iterate_active_interfaces_mtx(mvm->hw, 2191 IEEE80211_IFACE_ITER_NORMAL, 2192 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); 2193 2194 out: 2195 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 2196 2197 /* no need to reset the device in unified images, if successful */ 2198 if (unified_image && !ret) { 2199 /* nothing else to do if we already sent D0I3_END_CMD */ 2200 if (d0i3_first) 2201 return 0; 2202 2203 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); 2204 if (!ret) 2205 return 0; 2206 } 2207 2208 /* 2209 * Reconfigure the device in one of the following cases: 2210 * 1. We are not using a unified image 2211 * 2. We are using a unified image but had an error while exiting D3 2212 */ 2213 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 2214 2215 return 1; 2216 } 2217 2218 int iwl_mvm_resume(struct ieee80211_hw *hw) 2219 { 2220 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2221 int ret; 2222 2223 ret = __iwl_mvm_resume(mvm, false); 2224 2225 iwl_mvm_resume_tcm(mvm); 2226 2227 iwl_fw_runtime_resume(&mvm->fwrt); 2228 2229 return ret; 2230 } 2231 2232 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) 2233 { 2234 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2235 2236 device_set_wakeup_enable(mvm->trans->dev, enabled); 2237 } 2238 2239 #ifdef CONFIG_IWLWIFI_DEBUGFS 2240 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) 2241 { 2242 struct iwl_mvm *mvm = inode->i_private; 2243 int err; 2244 2245 if (mvm->d3_test_active) 2246 return -EBUSY; 2247 2248 file->private_data = inode->i_private; 2249 2250 iwl_mvm_pause_tcm(mvm, true); 2251 2252 iwl_fw_runtime_suspend(&mvm->fwrt); 2253 2254 /* start pseudo D3 */ 2255 rtnl_lock(); 2256 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); 2257 rtnl_unlock(); 2258 if (err > 0) 2259 err = -EINVAL; 2260 if (err) 2261 return err; 2262 2263 mvm->d3_test_active = true; 2264 mvm->keep_vif = NULL; 2265 return 0; 2266 } 2267 2268 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, 2269 size_t count, loff_t *ppos) 2270 { 2271 struct iwl_mvm *mvm = file->private_data; 2272 u32 pme_asserted; 2273 2274 while (true) { 2275 /* read pme_ptr if available */ 2276 if (mvm->d3_test_pme_ptr) { 2277 pme_asserted = iwl_trans_read_mem32(mvm->trans, 2278 mvm->d3_test_pme_ptr); 2279 if (pme_asserted) 2280 break; 2281 } 2282 2283 if (msleep_interruptible(100)) 2284 break; 2285 } 2286 2287 return 0; 2288 } 2289 2290 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, 2291 struct ieee80211_vif *vif) 2292 { 2293 /* skip the one we keep connection on */ 2294 if (_data == vif) 2295 return; 2296 2297 if (vif->type == NL80211_IFTYPE_STATION) 2298 ieee80211_connection_loss(vif); 2299 } 2300 2301 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) 2302 { 2303 struct iwl_mvm *mvm = inode->i_private; 2304 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2305 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2306 2307 mvm->d3_test_active = false; 2308 2309 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 2310 2311 rtnl_lock(); 2312 __iwl_mvm_resume(mvm, true); 2313 rtnl_unlock(); 2314 2315 iwl_mvm_resume_tcm(mvm); 2316 2317 iwl_fw_runtime_resume(&mvm->fwrt); 2318 2319 iwl_abort_notification_waits(&mvm->notif_wait); 2320 if (!unified_image) { 2321 int remaining_time = 10; 2322 2323 ieee80211_restart_hw(mvm->hw); 2324 2325 /* wait for restart and disconnect all interfaces */ 2326 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2327 remaining_time > 0) { 2328 remaining_time--; 2329 msleep(1000); 2330 } 2331 2332 if (remaining_time == 0) 2333 IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); 2334 } 2335 2336 ieee80211_iterate_active_interfaces_atomic( 2337 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2338 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); 2339 2340 return 0; 2341 } 2342 2343 const struct file_operations iwl_dbgfs_d3_test_ops = { 2344 .llseek = no_llseek, 2345 .open = iwl_mvm_d3_test_open, 2346 .read = iwl_mvm_d3_test_read, 2347 .release = iwl_mvm_d3_test_release, 2348 }; 2349 #endif 2350