1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2014, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/etherdevice.h> 8 #include <linux/ip.h> 9 #include <linux/fs.h> 10 #include <net/cfg80211.h> 11 #include <net/ipv6.h> 12 #include <net/tcp.h> 13 #include <net/addrconf.h> 14 #include "iwl-modparams.h" 15 #include "fw-api.h" 16 #include "mvm.h" 17 #include "fw/img.h" 18 19 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, 20 struct ieee80211_vif *vif, 21 struct cfg80211_gtk_rekey_data *data) 22 { 23 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 24 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 25 26 mutex_lock(&mvm->mutex); 27 28 mvmvif->rekey_data.kek_len = data->kek_len; 29 mvmvif->rekey_data.kck_len = data->kck_len; 30 memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len); 31 memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len); 32 mvmvif->rekey_data.akm = data->akm & 0xFF; 33 mvmvif->rekey_data.replay_ctr = 34 cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); 35 mvmvif->rekey_data.valid = true; 36 37 mutex_unlock(&mvm->mutex); 38 } 39 40 #if IS_ENABLED(CONFIG_IPV6) 41 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, 42 struct ieee80211_vif *vif, 43 struct inet6_dev *idev) 44 { 45 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 46 struct inet6_ifaddr *ifa; 47 int idx = 0; 48 49 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); 50 51 read_lock_bh(&idev->lock); 52 list_for_each_entry(ifa, &idev->addr_list, if_list) { 53 mvmvif->target_ipv6_addrs[idx] = ifa->addr; 54 if (ifa->flags & IFA_F_TENTATIVE) 55 __set_bit(idx, mvmvif->tentative_addrs); 56 idx++; 57 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) 58 break; 59 } 60 read_unlock_bh(&idev->lock); 61 62 mvmvif->num_target_ipv6_addrs = idx; 63 } 64 #endif 65 66 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 67 struct ieee80211_vif *vif, int idx) 68 { 69 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 70 71 mvmvif->tx_key_idx = idx; 72 } 73 74 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) 75 { 76 int i; 77 78 for (i = 0; i < IWL_P1K_SIZE; i++) 79 out[i] = cpu_to_le16(p1k[i]); 80 } 81 82 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, 83 struct iwl_mvm_key_pn *ptk_pn, 84 struct ieee80211_key_seq *seq, 85 int tid, int queues) 86 { 87 const u8 *ret = seq->ccmp.pn; 88 int i; 89 90 /* get the PN from mac80211, used on the default queue */ 91 ieee80211_get_key_rx_seq(key, tid, seq); 92 93 /* and use the internal data for the other queues */ 94 for (i = 1; i < queues; i++) { 95 const u8 *tmp = ptk_pn->q[i].pn[tid]; 96 97 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) 98 ret = tmp; 99 } 100 101 return ret; 102 } 103 104 struct wowlan_key_data { 105 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; 106 struct iwl_wowlan_tkip_params_cmd *tkip; 107 struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd; 108 bool error, use_rsc_tsc, use_tkip, configure_keys; 109 int wep_key_idx; 110 }; 111 112 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, 113 struct ieee80211_vif *vif, 114 struct ieee80211_sta *sta, 115 struct ieee80211_key_conf *key, 116 void *_data) 117 { 118 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 119 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 120 struct wowlan_key_data *data = _data; 121 struct aes_sc *aes_sc, *aes_tx_sc = NULL; 122 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; 123 struct iwl_p1k_cache *rx_p1ks; 124 u8 *rx_mic_key; 125 struct ieee80211_key_seq seq; 126 u32 cur_rx_iv32 = 0; 127 u16 p1k[IWL_P1K_SIZE]; 128 int ret, i; 129 130 switch (key->cipher) { 131 case WLAN_CIPHER_SUITE_WEP40: 132 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ 133 struct { 134 struct iwl_mvm_wep_key_cmd wep_key_cmd; 135 struct iwl_mvm_wep_key wep_key; 136 } __packed wkc = { 137 .wep_key_cmd.mac_id_n_color = 138 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 139 mvmvif->color)), 140 .wep_key_cmd.num_keys = 1, 141 /* firmware sets STA_KEY_FLG_WEP_13BYTES */ 142 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, 143 .wep_key.key_index = key->keyidx, 144 .wep_key.key_size = key->keylen, 145 }; 146 147 /* 148 * This will fail -- the key functions don't set support 149 * pairwise WEP keys. However, that's better than silently 150 * failing WoWLAN. Or maybe not? 151 */ 152 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 153 break; 154 155 memcpy(&wkc.wep_key.key[3], key->key, key->keylen); 156 if (key->keyidx == mvmvif->tx_key_idx) { 157 /* TX key must be at offset 0 */ 158 wkc.wep_key.key_offset = 0; 159 } else { 160 /* others start at 1 */ 161 data->wep_key_idx++; 162 wkc.wep_key.key_offset = data->wep_key_idx; 163 } 164 165 if (data->configure_keys) { 166 mutex_lock(&mvm->mutex); 167 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, 168 sizeof(wkc), &wkc); 169 data->error = ret != 0; 170 171 mvm->ptk_ivlen = key->iv_len; 172 mvm->ptk_icvlen = key->icv_len; 173 mvm->gtk_ivlen = key->iv_len; 174 mvm->gtk_icvlen = key->icv_len; 175 mutex_unlock(&mvm->mutex); 176 } 177 178 /* don't upload key again */ 179 return; 180 } 181 default: 182 data->error = true; 183 return; 184 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 185 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 186 data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); 187 return; 188 case WLAN_CIPHER_SUITE_AES_CMAC: 189 data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); 190 /* 191 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them 192 * but we also shouldn't abort suspend due to that. It does have 193 * support for the IGTK key renewal, but doesn't really use the 194 * IGTK for anything. This means we could spuriously wake up or 195 * be deauthenticated, but that was considered acceptable. 196 */ 197 return; 198 case WLAN_CIPHER_SUITE_TKIP: 199 if (sta) { 200 u64 pn64; 201 202 tkip_sc = 203 data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc; 204 tkip_tx_sc = 205 &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc; 206 207 rx_p1ks = data->tkip->rx_uni; 208 209 pn64 = atomic64_read(&key->tx_pn); 210 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); 211 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); 212 213 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), 214 p1k); 215 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); 216 217 memcpy(data->tkip->mic_keys.tx, 218 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 219 IWL_MIC_KEY_SIZE); 220 221 rx_mic_key = data->tkip->mic_keys.rx_unicast; 222 } else { 223 tkip_sc = 224 data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc; 225 rx_p1ks = data->tkip->rx_multi; 226 rx_mic_key = data->tkip->mic_keys.rx_mcast; 227 data->kek_kck_cmd->gtk_cipher = 228 cpu_to_le32(STA_KEY_FLG_TKIP); 229 } 230 231 /* 232 * For non-QoS this relies on the fact that both the uCode and 233 * mac80211 use TID 0 (as they need to to avoid replay attacks) 234 * for checking the IV in the frames. 235 */ 236 for (i = 0; i < IWL_NUM_RSC; i++) { 237 ieee80211_get_key_rx_seq(key, i, &seq); 238 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); 239 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); 240 /* wrapping isn't allowed, AP must rekey */ 241 if (seq.tkip.iv32 > cur_rx_iv32) 242 cur_rx_iv32 = seq.tkip.iv32; 243 } 244 245 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 246 cur_rx_iv32, p1k); 247 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); 248 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 249 cur_rx_iv32 + 1, p1k); 250 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); 251 252 memcpy(rx_mic_key, 253 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 254 IWL_MIC_KEY_SIZE); 255 256 data->use_tkip = true; 257 data->use_rsc_tsc = true; 258 break; 259 case WLAN_CIPHER_SUITE_CCMP: 260 case WLAN_CIPHER_SUITE_GCMP: 261 case WLAN_CIPHER_SUITE_GCMP_256: 262 if (sta) { 263 u64 pn64; 264 265 aes_sc = 266 data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc; 267 aes_tx_sc = 268 &data->rsc_tsc->params.all_tsc_rsc.aes.tsc; 269 270 pn64 = atomic64_read(&key->tx_pn); 271 aes_tx_sc->pn = cpu_to_le64(pn64); 272 } else { 273 aes_sc = 274 data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc; 275 data->kek_kck_cmd->gtk_cipher = 276 key->cipher == WLAN_CIPHER_SUITE_CCMP ? 277 cpu_to_le32(STA_KEY_FLG_CCM) : 278 cpu_to_le32(STA_KEY_FLG_GCMP); 279 } 280 281 /* 282 * For non-QoS this relies on the fact that both the uCode and 283 * mac80211/our RX code use TID 0 for checking the PN. 284 */ 285 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 286 struct iwl_mvm_sta *mvmsta; 287 struct iwl_mvm_key_pn *ptk_pn; 288 const u8 *pn; 289 290 mvmsta = iwl_mvm_sta_from_mac80211(sta); 291 rcu_read_lock(); 292 ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); 293 if (WARN_ON(!ptk_pn)) { 294 rcu_read_unlock(); 295 break; 296 } 297 298 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 299 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, 300 mvm->trans->num_rx_queues); 301 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 302 ((u64)pn[4] << 8) | 303 ((u64)pn[3] << 16) | 304 ((u64)pn[2] << 24) | 305 ((u64)pn[1] << 32) | 306 ((u64)pn[0] << 40)); 307 } 308 309 rcu_read_unlock(); 310 } else { 311 for (i = 0; i < IWL_NUM_RSC; i++) { 312 u8 *pn = seq.ccmp.pn; 313 314 ieee80211_get_key_rx_seq(key, i, &seq); 315 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 316 ((u64)pn[4] << 8) | 317 ((u64)pn[3] << 16) | 318 ((u64)pn[2] << 24) | 319 ((u64)pn[1] << 32) | 320 ((u64)pn[0] << 40)); 321 } 322 } 323 data->use_rsc_tsc = true; 324 break; 325 } 326 327 IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher); 328 329 if (data->configure_keys) { 330 mutex_lock(&mvm->mutex); 331 /* 332 * The D3 firmware hardcodes the key offset 0 as the key it 333 * uses to transmit packets to the AP, i.e. the PTK. 334 */ 335 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 336 mvm->ptk_ivlen = key->iv_len; 337 mvm->ptk_icvlen = key->icv_len; 338 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); 339 } else { 340 /* 341 * firmware only supports TSC/RSC for a single key, 342 * so if there are multiple keep overwriting them 343 * with new ones -- this relies on mac80211 doing 344 * list_add_tail(). 345 */ 346 mvm->gtk_ivlen = key->iv_len; 347 mvm->gtk_icvlen = key->icv_len; 348 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); 349 } 350 mutex_unlock(&mvm->mutex); 351 data->error = ret != 0; 352 } 353 } 354 355 static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm, 356 struct cfg80211_wowlan *wowlan) 357 { 358 struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd; 359 struct iwl_host_cmd cmd = { 360 .id = WOWLAN_PATTERNS, 361 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 362 }; 363 int i, err; 364 365 if (!wowlan->n_patterns) 366 return 0; 367 368 cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns); 369 370 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); 371 if (!pattern_cmd) 372 return -ENOMEM; 373 374 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); 375 376 for (i = 0; i < wowlan->n_patterns; i++) { 377 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 378 379 memcpy(&pattern_cmd->patterns[i].mask, 380 wowlan->patterns[i].mask, mask_len); 381 memcpy(&pattern_cmd->patterns[i].pattern, 382 wowlan->patterns[i].pattern, 383 wowlan->patterns[i].pattern_len); 384 pattern_cmd->patterns[i].mask_size = mask_len; 385 pattern_cmd->patterns[i].pattern_size = 386 wowlan->patterns[i].pattern_len; 387 } 388 389 cmd.data[0] = pattern_cmd; 390 err = iwl_mvm_send_cmd(mvm, &cmd); 391 kfree(pattern_cmd); 392 return err; 393 } 394 395 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, 396 struct cfg80211_wowlan *wowlan) 397 { 398 struct iwl_wowlan_patterns_cmd *pattern_cmd; 399 struct iwl_host_cmd cmd = { 400 .id = WOWLAN_PATTERNS, 401 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 402 }; 403 int i, err; 404 405 if (!wowlan->n_patterns) 406 return 0; 407 408 cmd.len[0] = sizeof(*pattern_cmd) + 409 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2); 410 411 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); 412 if (!pattern_cmd) 413 return -ENOMEM; 414 415 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); 416 417 for (i = 0; i < wowlan->n_patterns; i++) { 418 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 419 420 pattern_cmd->patterns[i].pattern_type = 421 WOWLAN_PATTERN_TYPE_BITMASK; 422 423 memcpy(&pattern_cmd->patterns[i].u.bitmask.mask, 424 wowlan->patterns[i].mask, mask_len); 425 memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern, 426 wowlan->patterns[i].pattern, 427 wowlan->patterns[i].pattern_len); 428 pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len; 429 pattern_cmd->patterns[i].u.bitmask.pattern_size = 430 wowlan->patterns[i].pattern_len; 431 } 432 433 cmd.data[0] = pattern_cmd; 434 err = iwl_mvm_send_cmd(mvm, &cmd); 435 kfree(pattern_cmd); 436 return err; 437 } 438 439 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 440 struct ieee80211_sta *ap_sta) 441 { 442 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 443 struct ieee80211_chanctx_conf *ctx; 444 u8 chains_static, chains_dynamic; 445 struct cfg80211_chan_def chandef; 446 int ret, i; 447 struct iwl_binding_cmd_v1 binding_cmd = {}; 448 struct iwl_time_quota_cmd quota_cmd = {}; 449 struct iwl_time_quota_data *quota; 450 u32 status; 451 452 if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) 453 return -EINVAL; 454 455 /* add back the PHY */ 456 if (WARN_ON(!mvmvif->phy_ctxt)) 457 return -EINVAL; 458 459 rcu_read_lock(); 460 ctx = rcu_dereference(vif->chanctx_conf); 461 if (WARN_ON(!ctx)) { 462 rcu_read_unlock(); 463 return -EINVAL; 464 } 465 chandef = ctx->def; 466 chains_static = ctx->rx_chains_static; 467 chains_dynamic = ctx->rx_chains_dynamic; 468 rcu_read_unlock(); 469 470 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, 471 chains_static, chains_dynamic); 472 if (ret) 473 return ret; 474 475 /* add back the MAC */ 476 mvmvif->uploaded = false; 477 478 if (WARN_ON(!vif->bss_conf.assoc)) 479 return -EINVAL; 480 481 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 482 if (ret) 483 return ret; 484 485 /* add back binding - XXX refactor? */ 486 binding_cmd.id_and_color = 487 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 488 mvmvif->phy_ctxt->color)); 489 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 490 binding_cmd.phy = 491 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 492 mvmvif->phy_ctxt->color)); 493 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 494 mvmvif->color)); 495 for (i = 1; i < MAX_MACS_IN_BINDING; i++) 496 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); 497 498 status = 0; 499 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, 500 IWL_BINDING_CMD_SIZE_V1, &binding_cmd, 501 &status); 502 if (ret) { 503 IWL_ERR(mvm, "Failed to add binding: %d\n", ret); 504 return ret; 505 } 506 507 if (status) { 508 IWL_ERR(mvm, "Binding command failed: %u\n", status); 509 return -EIO; 510 } 511 512 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); 513 if (ret) 514 return ret; 515 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); 516 517 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 518 if (ret) 519 return ret; 520 521 /* and some quota */ 522 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); 523 quota->id_and_color = 524 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 525 mvmvif->phy_ctxt->color)); 526 quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); 527 quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); 528 529 for (i = 1; i < MAX_BINDINGS; i++) { 530 quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i); 531 quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID); 532 } 533 534 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, 535 iwl_mvm_quota_cmd_size(mvm), "a_cmd); 536 if (ret) 537 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 538 539 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) 540 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); 541 542 return 0; 543 } 544 545 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, 546 struct ieee80211_vif *vif) 547 { 548 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 549 struct iwl_nonqos_seq_query_cmd query_cmd = { 550 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), 551 .mac_id_n_color = 552 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 553 mvmvif->color)), 554 }; 555 struct iwl_host_cmd cmd = { 556 .id = NON_QOS_TX_COUNTER_CMD, 557 .flags = CMD_WANT_SKB, 558 }; 559 int err; 560 u32 size; 561 562 cmd.data[0] = &query_cmd; 563 cmd.len[0] = sizeof(query_cmd); 564 565 err = iwl_mvm_send_cmd(mvm, &cmd); 566 if (err) 567 return err; 568 569 size = iwl_rx_packet_payload_len(cmd.resp_pkt); 570 if (size < sizeof(__le16)) { 571 err = -EINVAL; 572 } else { 573 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); 574 /* firmware returns next, not last-used seqno */ 575 err = (u16) (err - 0x10); 576 } 577 578 iwl_free_resp(&cmd); 579 return err; 580 } 581 582 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 583 { 584 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 585 struct iwl_nonqos_seq_query_cmd query_cmd = { 586 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), 587 .mac_id_n_color = 588 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 589 mvmvif->color)), 590 .value = cpu_to_le16(mvmvif->seqno), 591 }; 592 593 /* return if called during restart, not resume from D3 */ 594 if (!mvmvif->seqno_valid) 595 return; 596 597 mvmvif->seqno_valid = false; 598 599 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, 600 sizeof(query_cmd), &query_cmd)) 601 IWL_ERR(mvm, "failed to set non-QoS seqno\n"); 602 } 603 604 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) 605 { 606 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 607 608 iwl_mvm_stop_device(mvm); 609 /* 610 * Set the HW restart bit -- this is mostly true as we're 611 * going to load new firmware and reprogram that, though 612 * the reprogramming is going to be manual to avoid adding 613 * all the MACs that aren't support. 614 * We don't have to clear up everything though because the 615 * reprogramming is manual. When we resume, we'll actually 616 * go through a proper restart sequence again to switch 617 * back to the runtime firmware image. 618 */ 619 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 620 621 /* the fw is reset, so all the keys are cleared */ 622 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 623 624 mvm->ptk_ivlen = 0; 625 mvm->ptk_icvlen = 0; 626 mvm->ptk_ivlen = 0; 627 mvm->ptk_icvlen = 0; 628 629 return iwl_mvm_load_d3_fw(mvm); 630 } 631 632 static int 633 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, 634 struct cfg80211_wowlan *wowlan, 635 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 636 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 637 struct ieee80211_sta *ap_sta) 638 { 639 int ret; 640 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); 641 642 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ 643 644 wowlan_config_cmd->is_11n_connection = 645 ap_sta->ht_cap.ht_supported; 646 wowlan_config_cmd->flags = ENABLE_L3_FILTERING | 647 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; 648 649 /* Query the last used seqno and set it */ 650 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); 651 if (ret < 0) 652 return ret; 653 654 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); 655 656 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); 657 658 if (wowlan->disconnect) 659 wowlan_config_cmd->wakeup_filter |= 660 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 661 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 662 if (wowlan->magic_pkt) 663 wowlan_config_cmd->wakeup_filter |= 664 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); 665 if (wowlan->gtk_rekey_failure) 666 wowlan_config_cmd->wakeup_filter |= 667 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); 668 if (wowlan->eap_identity_req) 669 wowlan_config_cmd->wakeup_filter |= 670 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); 671 if (wowlan->four_way_handshake) 672 wowlan_config_cmd->wakeup_filter |= 673 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); 674 if (wowlan->n_patterns) 675 wowlan_config_cmd->wakeup_filter |= 676 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); 677 678 if (wowlan->rfkill_release) 679 wowlan_config_cmd->wakeup_filter |= 680 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 681 682 if (wowlan->tcp) { 683 /* 684 * Set the "link change" (really "link lost") flag as well 685 * since that implies losing the TCP connection. 686 */ 687 wowlan_config_cmd->wakeup_filter |= 688 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | 689 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | 690 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | 691 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 692 } 693 694 if (wowlan->any) { 695 wowlan_config_cmd->wakeup_filter |= 696 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 697 IWL_WOWLAN_WAKEUP_LINK_CHANGE | 698 IWL_WOWLAN_WAKEUP_RX_FRAME | 699 IWL_WOWLAN_WAKEUP_BCN_FILTERING); 700 } 701 702 return 0; 703 } 704 705 static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, 706 struct ieee80211_vif *vif, 707 u32 cmd_flags) 708 { 709 struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {}; 710 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 711 bool unified = fw_has_capa(&mvm->fw->ucode_capa, 712 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 713 struct wowlan_key_data key_data = { 714 .configure_keys = !unified, 715 .use_rsc_tsc = false, 716 .tkip = &tkip_cmd, 717 .use_tkip = false, 718 .kek_kck_cmd = &kek_kck_cmd, 719 }; 720 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 721 int ret; 722 u8 cmd_ver; 723 size_t cmd_size; 724 725 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); 726 if (!key_data.rsc_tsc) 727 return -ENOMEM; 728 729 /* 730 * if we have to configure keys, call ieee80211_iter_keys(), 731 * as we need non-atomic context in order to take the 732 * required locks. 733 */ 734 /* 735 * Note that currently we don't propagate cmd_flags 736 * to the iterator. In case of key_data.configure_keys, 737 * all the configured commands are SYNC, and 738 * iwl_mvm_wowlan_program_keys() will take care of 739 * locking/unlocking mvm->mutex. 740 */ 741 ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, 742 &key_data); 743 744 if (key_data.error) { 745 ret = -EIO; 746 goto out; 747 } 748 749 if (key_data.use_rsc_tsc) { 750 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 751 WOWLAN_TSC_RSC_PARAM, 752 IWL_FW_CMD_VER_UNKNOWN); 753 int size; 754 755 if (ver == 4) { 756 size = sizeof(*key_data.rsc_tsc); 757 key_data.rsc_tsc->sta_id = 758 cpu_to_le32(mvmvif->ap_sta_id); 759 760 } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { 761 size = sizeof(key_data.rsc_tsc->params); 762 } else { 763 ret = 0; 764 WARN_ON_ONCE(1); 765 goto out; 766 } 767 768 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, 769 cmd_flags, 770 size, 771 key_data.rsc_tsc); 772 773 if (ret) 774 goto out; 775 } 776 777 if (key_data.use_tkip && 778 !fw_has_api(&mvm->fw->ucode_capa, 779 IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { 780 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 781 WOWLAN_TKIP_PARAM, 782 IWL_FW_CMD_VER_UNKNOWN); 783 int size; 784 785 if (ver == 2) { 786 size = sizeof(tkip_cmd); 787 key_data.tkip->sta_id = 788 cpu_to_le32(mvmvif->ap_sta_id); 789 } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { 790 size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); 791 } else { 792 ret = -EINVAL; 793 WARN_ON_ONCE(1); 794 goto out; 795 } 796 797 /* send relevant data according to CMD version */ 798 ret = iwl_mvm_send_cmd_pdu(mvm, 799 WOWLAN_TKIP_PARAM, 800 cmd_flags, size, 801 &tkip_cmd); 802 if (ret) 803 goto out; 804 } 805 806 /* configure rekey data only if offloaded rekey is supported (d3) */ 807 if (mvmvif->rekey_data.valid) { 808 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 809 IWL_ALWAYS_LONG_GROUP, 810 WOWLAN_KEK_KCK_MATERIAL, 811 IWL_FW_CMD_VER_UNKNOWN); 812 if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && 813 cmd_ver != IWL_FW_CMD_VER_UNKNOWN)) 814 return -EINVAL; 815 if (cmd_ver == 3) 816 cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3); 817 else 818 cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2); 819 820 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, 821 mvmvif->rekey_data.kck_len); 822 kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len); 823 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, 824 mvmvif->rekey_data.kek_len); 825 kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len); 826 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; 827 kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm); 828 829 IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n", 830 mvmvif->rekey_data.akm); 831 832 ret = iwl_mvm_send_cmd_pdu(mvm, 833 WOWLAN_KEK_KCK_MATERIAL, cmd_flags, 834 cmd_size, 835 &kek_kck_cmd); 836 if (ret) 837 goto out; 838 } 839 ret = 0; 840 out: 841 kfree(key_data.rsc_tsc); 842 return ret; 843 } 844 845 static int 846 iwl_mvm_wowlan_config(struct iwl_mvm *mvm, 847 struct cfg80211_wowlan *wowlan, 848 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 849 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 850 struct ieee80211_sta *ap_sta) 851 { 852 int ret; 853 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 854 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 855 856 mvm->offload_tid = wowlan_config_cmd->offloading_tid; 857 858 if (!unified_image) { 859 ret = iwl_mvm_switch_to_d3(mvm); 860 if (ret) 861 return ret; 862 863 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); 864 if (ret) 865 return ret; 866 } 867 868 /* 869 * This needs to be unlocked due to lock ordering 870 * constraints. Since we're in the suspend path 871 * that isn't really a problem though. 872 */ 873 mutex_unlock(&mvm->mutex); 874 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); 875 mutex_lock(&mvm->mutex); 876 if (ret) 877 return ret; 878 879 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, 880 sizeof(*wowlan_config_cmd), 881 wowlan_config_cmd); 882 if (ret) 883 return ret; 884 885 if (fw_has_api(&mvm->fw->ucode_capa, 886 IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE)) 887 ret = iwl_mvm_send_patterns(mvm, wowlan); 888 else 889 ret = iwl_mvm_send_patterns_v1(mvm, wowlan); 890 if (ret) 891 return ret; 892 893 return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); 894 } 895 896 static int 897 iwl_mvm_netdetect_config(struct iwl_mvm *mvm, 898 struct cfg80211_wowlan *wowlan, 899 struct cfg80211_sched_scan_request *nd_config, 900 struct ieee80211_vif *vif) 901 { 902 int ret; 903 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 904 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 905 906 if (!unified_image) { 907 ret = iwl_mvm_switch_to_d3(mvm); 908 if (ret) 909 return ret; 910 } else { 911 /* In theory, we wouldn't have to stop a running sched 912 * scan in order to start another one (for 913 * net-detect). But in practice this doesn't seem to 914 * work properly, so stop any running sched_scan now. 915 */ 916 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 917 if (ret) 918 return ret; 919 } 920 921 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, 922 IWL_MVM_SCAN_NETDETECT); 923 if (ret) 924 return ret; 925 926 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) 927 return -EBUSY; 928 929 /* save the sched scan matchsets... */ 930 if (nd_config->n_match_sets) { 931 mvm->nd_match_sets = kmemdup(nd_config->match_sets, 932 sizeof(*nd_config->match_sets) * 933 nd_config->n_match_sets, 934 GFP_KERNEL); 935 if (mvm->nd_match_sets) 936 mvm->n_nd_match_sets = nd_config->n_match_sets; 937 } 938 939 /* ...and the sched scan channels for later reporting */ 940 mvm->nd_channels = kmemdup(nd_config->channels, 941 sizeof(*nd_config->channels) * 942 nd_config->n_channels, 943 GFP_KERNEL); 944 if (mvm->nd_channels) 945 mvm->n_nd_channels = nd_config->n_channels; 946 947 return 0; 948 } 949 950 static void iwl_mvm_free_nd(struct iwl_mvm *mvm) 951 { 952 kfree(mvm->nd_match_sets); 953 mvm->nd_match_sets = NULL; 954 mvm->n_nd_match_sets = 0; 955 kfree(mvm->nd_channels); 956 mvm->nd_channels = NULL; 957 mvm->n_nd_channels = 0; 958 } 959 960 static int __iwl_mvm_suspend(struct ieee80211_hw *hw, 961 struct cfg80211_wowlan *wowlan, 962 bool test) 963 { 964 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 965 struct ieee80211_vif *vif = NULL; 966 struct iwl_mvm_vif *mvmvif = NULL; 967 struct ieee80211_sta *ap_sta = NULL; 968 struct iwl_d3_manager_config d3_cfg_cmd_data = { 969 /* 970 * Program the minimum sleep time to 10 seconds, as many 971 * platforms have issues processing a wakeup signal while 972 * still being in the process of suspending. 973 */ 974 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), 975 }; 976 struct iwl_host_cmd d3_cfg_cmd = { 977 .id = D3_CONFIG_CMD, 978 .flags = CMD_WANT_SKB, 979 .data[0] = &d3_cfg_cmd_data, 980 .len[0] = sizeof(d3_cfg_cmd_data), 981 }; 982 int ret; 983 int len __maybe_unused; 984 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 985 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 986 987 if (!wowlan) { 988 /* 989 * mac80211 shouldn't get here, but for D3 test 990 * it doesn't warrant a warning 991 */ 992 WARN_ON(!test); 993 return -EINVAL; 994 } 995 996 mutex_lock(&mvm->mutex); 997 998 set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 999 1000 vif = iwl_mvm_get_bss_vif(mvm); 1001 if (IS_ERR_OR_NULL(vif)) { 1002 ret = 1; 1003 goto out_noreset; 1004 } 1005 1006 mvmvif = iwl_mvm_vif_from_mac80211(vif); 1007 1008 if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { 1009 /* if we're not associated, this must be netdetect */ 1010 if (!wowlan->nd_config) { 1011 ret = 1; 1012 goto out_noreset; 1013 } 1014 1015 ret = iwl_mvm_netdetect_config( 1016 mvm, wowlan, wowlan->nd_config, vif); 1017 if (ret) 1018 goto out; 1019 1020 mvm->net_detect = true; 1021 } else { 1022 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 1023 1024 wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; 1025 1026 ap_sta = rcu_dereference_protected( 1027 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], 1028 lockdep_is_held(&mvm->mutex)); 1029 if (IS_ERR_OR_NULL(ap_sta)) { 1030 ret = -EINVAL; 1031 goto out_noreset; 1032 } 1033 1034 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1035 vif, mvmvif, ap_sta); 1036 if (ret) 1037 goto out_noreset; 1038 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1039 vif, mvmvif, ap_sta); 1040 if (ret) 1041 goto out; 1042 1043 mvm->net_detect = false; 1044 } 1045 1046 ret = iwl_mvm_power_update_device(mvm); 1047 if (ret) 1048 goto out; 1049 1050 ret = iwl_mvm_power_update_mac(mvm); 1051 if (ret) 1052 goto out; 1053 1054 #ifdef CONFIG_IWLWIFI_DEBUGFS 1055 if (mvm->d3_wake_sysassert) 1056 d3_cfg_cmd_data.wakeup_flags |= 1057 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); 1058 #endif 1059 1060 /* 1061 * Prior to 9000 device family the driver needs to stop the dbg 1062 * recording before entering D3. In later devices the FW stops the 1063 * recording automatically. 1064 */ 1065 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) 1066 iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); 1067 1068 /* must be last -- this switches firmware state */ 1069 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); 1070 if (ret) 1071 goto out; 1072 #ifdef CONFIG_IWLWIFI_DEBUGFS 1073 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); 1074 if (len >= sizeof(u32)) { 1075 mvm->d3_test_pme_ptr = 1076 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); 1077 } 1078 #endif 1079 iwl_free_resp(&d3_cfg_cmd); 1080 1081 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1082 1083 ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image); 1084 out: 1085 if (ret < 0) { 1086 iwl_mvm_free_nd(mvm); 1087 1088 if (!unified_image) { 1089 if (mvm->fw_restart > 0) { 1090 mvm->fw_restart--; 1091 ieee80211_restart_hw(mvm->hw); 1092 } 1093 } 1094 1095 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 1096 } 1097 out_noreset: 1098 mutex_unlock(&mvm->mutex); 1099 1100 return ret; 1101 } 1102 1103 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 1104 { 1105 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1106 struct iwl_trans *trans = mvm->trans; 1107 int ret; 1108 1109 iwl_mvm_pause_tcm(mvm, true); 1110 1111 iwl_fw_runtime_suspend(&mvm->fwrt); 1112 1113 ret = iwl_trans_suspend(trans); 1114 if (ret) 1115 return ret; 1116 1117 trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 1118 1119 return __iwl_mvm_suspend(hw, wowlan, false); 1120 } 1121 1122 /* converted data from the different status responses */ 1123 struct iwl_wowlan_status_data { 1124 u16 pattern_number; 1125 u16 qos_seq_ctr[8]; 1126 u32 wakeup_reasons; 1127 u32 wake_packet_length; 1128 u32 wake_packet_bufsize; 1129 const u8 *wake_packet; 1130 }; 1131 1132 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, 1133 struct ieee80211_vif *vif, 1134 struct iwl_wowlan_status_data *status) 1135 { 1136 struct sk_buff *pkt = NULL; 1137 struct cfg80211_wowlan_wakeup wakeup = { 1138 .pattern_idx = -1, 1139 }; 1140 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1141 u32 reasons = status->wakeup_reasons; 1142 1143 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { 1144 wakeup_report = NULL; 1145 goto report; 1146 } 1147 1148 pm_wakeup_event(mvm->dev, 0); 1149 1150 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) 1151 wakeup.magic_pkt = true; 1152 1153 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) 1154 wakeup.pattern_idx = 1155 status->pattern_number; 1156 1157 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1158 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) 1159 wakeup.disconnect = true; 1160 1161 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) 1162 wakeup.gtk_rekey_failure = true; 1163 1164 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1165 wakeup.rfkill_release = true; 1166 1167 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) 1168 wakeup.eap_identity_req = true; 1169 1170 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) 1171 wakeup.four_way_handshake = true; 1172 1173 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) 1174 wakeup.tcp_connlost = true; 1175 1176 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) 1177 wakeup.tcp_nomoretokens = true; 1178 1179 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) 1180 wakeup.tcp_match = true; 1181 1182 if (status->wake_packet_bufsize) { 1183 int pktsize = status->wake_packet_bufsize; 1184 int pktlen = status->wake_packet_length; 1185 const u8 *pktdata = status->wake_packet; 1186 struct ieee80211_hdr *hdr = (void *)pktdata; 1187 int truncated = pktlen - pktsize; 1188 1189 /* this would be a firmware bug */ 1190 if (WARN_ON_ONCE(truncated < 0)) 1191 truncated = 0; 1192 1193 if (ieee80211_is_data(hdr->frame_control)) { 1194 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 1195 int ivlen = 0, icvlen = 4; /* also FCS */ 1196 1197 pkt = alloc_skb(pktsize, GFP_KERNEL); 1198 if (!pkt) 1199 goto report; 1200 1201 skb_put_data(pkt, pktdata, hdrlen); 1202 pktdata += hdrlen; 1203 pktsize -= hdrlen; 1204 1205 if (ieee80211_has_protected(hdr->frame_control)) { 1206 /* 1207 * This is unlocked and using gtk_i(c)vlen, 1208 * but since everything is under RTNL still 1209 * that's not really a problem - changing 1210 * it would be difficult. 1211 */ 1212 if (is_multicast_ether_addr(hdr->addr1)) { 1213 ivlen = mvm->gtk_ivlen; 1214 icvlen += mvm->gtk_icvlen; 1215 } else { 1216 ivlen = mvm->ptk_ivlen; 1217 icvlen += mvm->ptk_icvlen; 1218 } 1219 } 1220 1221 /* if truncated, FCS/ICV is (partially) gone */ 1222 if (truncated >= icvlen) { 1223 icvlen = 0; 1224 truncated -= icvlen; 1225 } else { 1226 icvlen -= truncated; 1227 truncated = 0; 1228 } 1229 1230 pktsize -= ivlen + icvlen; 1231 pktdata += ivlen; 1232 1233 skb_put_data(pkt, pktdata, pktsize); 1234 1235 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) 1236 goto report; 1237 wakeup.packet = pkt->data; 1238 wakeup.packet_present_len = pkt->len; 1239 wakeup.packet_len = pkt->len - truncated; 1240 wakeup.packet_80211 = false; 1241 } else { 1242 int fcslen = 4; 1243 1244 if (truncated >= 4) { 1245 truncated -= 4; 1246 fcslen = 0; 1247 } else { 1248 fcslen -= truncated; 1249 truncated = 0; 1250 } 1251 pktsize -= fcslen; 1252 wakeup.packet = status->wake_packet; 1253 wakeup.packet_present_len = pktsize; 1254 wakeup.packet_len = pktlen - truncated; 1255 wakeup.packet_80211 = true; 1256 } 1257 } 1258 1259 report: 1260 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1261 kfree_skb(pkt); 1262 } 1263 1264 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, 1265 struct ieee80211_key_seq *seq) 1266 { 1267 u64 pn; 1268 1269 pn = le64_to_cpu(sc->pn); 1270 seq->ccmp.pn[0] = pn >> 40; 1271 seq->ccmp.pn[1] = pn >> 32; 1272 seq->ccmp.pn[2] = pn >> 24; 1273 seq->ccmp.pn[3] = pn >> 16; 1274 seq->ccmp.pn[4] = pn >> 8; 1275 seq->ccmp.pn[5] = pn; 1276 } 1277 1278 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, 1279 struct ieee80211_key_seq *seq) 1280 { 1281 seq->tkip.iv32 = le32_to_cpu(sc->iv32); 1282 seq->tkip.iv16 = le16_to_cpu(sc->iv16); 1283 } 1284 1285 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs, 1286 struct ieee80211_sta *sta, 1287 struct ieee80211_key_conf *key) 1288 { 1289 int tid; 1290 1291 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1292 1293 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 1294 struct iwl_mvm_sta *mvmsta; 1295 struct iwl_mvm_key_pn *ptk_pn; 1296 1297 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1298 1299 rcu_read_lock(); 1300 ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); 1301 if (WARN_ON(!ptk_pn)) { 1302 rcu_read_unlock(); 1303 return; 1304 } 1305 1306 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 1307 struct ieee80211_key_seq seq = {}; 1308 int i; 1309 1310 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1311 ieee80211_set_key_rx_seq(key, tid, &seq); 1312 for (i = 1; i < mvm->trans->num_rx_queues; i++) 1313 memcpy(ptk_pn->q[i].pn[tid], 1314 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); 1315 } 1316 rcu_read_unlock(); 1317 } else { 1318 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1319 struct ieee80211_key_seq seq = {}; 1320 1321 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1322 ieee80211_set_key_rx_seq(key, tid, &seq); 1323 } 1324 } 1325 } 1326 1327 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, 1328 struct ieee80211_key_conf *key) 1329 { 1330 int tid; 1331 1332 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1333 1334 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1335 struct ieee80211_key_seq seq = {}; 1336 1337 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); 1338 ieee80211_set_key_rx_seq(key, tid, &seq); 1339 } 1340 } 1341 1342 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, 1343 struct ieee80211_key_conf *key, 1344 struct iwl_wowlan_status *status) 1345 { 1346 union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc; 1347 1348 switch (key->cipher) { 1349 case WLAN_CIPHER_SUITE_CCMP: 1350 case WLAN_CIPHER_SUITE_GCMP: 1351 case WLAN_CIPHER_SUITE_GCMP_256: 1352 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); 1353 break; 1354 case WLAN_CIPHER_SUITE_TKIP: 1355 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); 1356 break; 1357 default: 1358 WARN_ON(1); 1359 } 1360 } 1361 1362 struct iwl_mvm_d3_gtk_iter_data { 1363 struct iwl_mvm *mvm; 1364 struct iwl_wowlan_status *status; 1365 void *last_gtk; 1366 u32 cipher; 1367 bool find_phase, unhandled_cipher; 1368 int num_keys; 1369 }; 1370 1371 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, 1372 struct ieee80211_vif *vif, 1373 struct ieee80211_sta *sta, 1374 struct ieee80211_key_conf *key, 1375 void *_data) 1376 { 1377 struct iwl_mvm_d3_gtk_iter_data *data = _data; 1378 1379 if (data->unhandled_cipher) 1380 return; 1381 1382 switch (key->cipher) { 1383 case WLAN_CIPHER_SUITE_WEP40: 1384 case WLAN_CIPHER_SUITE_WEP104: 1385 /* ignore WEP completely, nothing to do */ 1386 return; 1387 case WLAN_CIPHER_SUITE_CCMP: 1388 case WLAN_CIPHER_SUITE_GCMP: 1389 case WLAN_CIPHER_SUITE_GCMP_256: 1390 case WLAN_CIPHER_SUITE_TKIP: 1391 /* we support these */ 1392 break; 1393 default: 1394 /* everything else (even CMAC for MFP) - disconnect from AP */ 1395 data->unhandled_cipher = true; 1396 return; 1397 } 1398 1399 data->num_keys++; 1400 1401 /* 1402 * pairwise key - update sequence counters only; 1403 * note that this assumes no TDLS sessions are active 1404 */ 1405 if (sta) { 1406 struct ieee80211_key_seq seq = {}; 1407 union iwl_all_tsc_rsc *sc = 1408 &data->status->gtk[0].rsc.all_tsc_rsc; 1409 1410 if (data->find_phase) 1411 return; 1412 1413 switch (key->cipher) { 1414 case WLAN_CIPHER_SUITE_CCMP: 1415 case WLAN_CIPHER_SUITE_GCMP: 1416 case WLAN_CIPHER_SUITE_GCMP_256: 1417 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, 1418 sta, key); 1419 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); 1420 break; 1421 case WLAN_CIPHER_SUITE_TKIP: 1422 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); 1423 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); 1424 atomic64_set(&key->tx_pn, 1425 (u64)seq.tkip.iv16 | 1426 ((u64)seq.tkip.iv32 << 16)); 1427 break; 1428 } 1429 1430 /* that's it for this key */ 1431 return; 1432 } 1433 1434 if (data->find_phase) { 1435 data->last_gtk = key; 1436 data->cipher = key->cipher; 1437 return; 1438 } 1439 1440 if (data->status->num_of_gtk_rekeys) 1441 ieee80211_remove_key(key); 1442 else if (data->last_gtk == key) 1443 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status); 1444 } 1445 1446 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 1447 struct ieee80211_vif *vif, 1448 struct iwl_wowlan_status *status) 1449 { 1450 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1451 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1452 .mvm = mvm, 1453 .status = status, 1454 }; 1455 u32 disconnection_reasons = 1456 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1457 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; 1458 1459 if (!status || !vif->bss_conf.bssid) 1460 return false; 1461 1462 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) 1463 return false; 1464 1465 /* find last GTK that we used initially, if any */ 1466 gtkdata.find_phase = true; 1467 ieee80211_iter_keys(mvm->hw, vif, 1468 iwl_mvm_d3_update_keys, >kdata); 1469 /* not trying to keep connections with MFP/unhandled ciphers */ 1470 if (gtkdata.unhandled_cipher) 1471 return false; 1472 if (!gtkdata.num_keys) 1473 goto out; 1474 if (!gtkdata.last_gtk) 1475 return false; 1476 1477 /* 1478 * invalidate all other GTKs that might still exist and update 1479 * the one that we used 1480 */ 1481 gtkdata.find_phase = false; 1482 ieee80211_iter_keys(mvm->hw, vif, 1483 iwl_mvm_d3_update_keys, >kdata); 1484 1485 IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", 1486 le32_to_cpu(status->num_of_gtk_rekeys)); 1487 if (status->num_of_gtk_rekeys) { 1488 struct ieee80211_key_conf *key; 1489 struct { 1490 struct ieee80211_key_conf conf; 1491 u8 key[32]; 1492 } conf = { 1493 .conf.cipher = gtkdata.cipher, 1494 .conf.keyidx = 1495 iwlmvm_wowlan_gtk_idx(&status->gtk[0]), 1496 }; 1497 __be64 replay_ctr; 1498 1499 IWL_DEBUG_WOWLAN(mvm, 1500 "Received from FW GTK cipher %d, key index %d\n", 1501 conf.conf.cipher, conf.conf.keyidx); 1502 switch (gtkdata.cipher) { 1503 case WLAN_CIPHER_SUITE_CCMP: 1504 case WLAN_CIPHER_SUITE_GCMP: 1505 BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); 1506 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); 1507 conf.conf.keylen = WLAN_KEY_LEN_CCMP; 1508 memcpy(conf.conf.key, status->gtk[0].key, 1509 WLAN_KEY_LEN_CCMP); 1510 break; 1511 case WLAN_CIPHER_SUITE_GCMP_256: 1512 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); 1513 conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; 1514 memcpy(conf.conf.key, status->gtk[0].key, 1515 WLAN_KEY_LEN_GCMP_256); 1516 break; 1517 case WLAN_CIPHER_SUITE_TKIP: 1518 BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); 1519 conf.conf.keylen = WLAN_KEY_LEN_TKIP; 1520 memcpy(conf.conf.key, status->gtk[0].key, 16); 1521 /* leave TX MIC key zeroed, we don't use it anyway */ 1522 memcpy(conf.conf.key + 1523 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, 1524 status->gtk[0].tkip_mic_key, 8); 1525 break; 1526 } 1527 1528 key = ieee80211_gtk_rekey_add(vif, &conf.conf); 1529 if (IS_ERR(key)) 1530 return false; 1531 iwl_mvm_set_key_rx_seq(mvm, key, status); 1532 1533 replay_ctr = 1534 cpu_to_be64(le64_to_cpu(status->replay_ctr)); 1535 1536 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, 1537 (void *)&replay_ctr, GFP_KERNEL); 1538 } 1539 1540 out: 1541 mvmvif->seqno_valid = true; 1542 /* +0x10 because the set API expects next-to-use, not last-used */ 1543 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; 1544 1545 return true; 1546 } 1547 1548 /* Occasionally, templates would be nice. This is one of those times ... */ 1549 #define iwl_mvm_parse_wowlan_status_common(_ver) \ 1550 static struct iwl_wowlan_status * \ 1551 iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ 1552 void *_data, int len) \ 1553 { \ 1554 struct iwl_wowlan_status *status; \ 1555 struct iwl_wowlan_status_ ##_ver *data = _data; \ 1556 int data_size; \ 1557 \ 1558 if (len < sizeof(*data)) { \ 1559 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ 1560 return ERR_PTR(-EIO); \ 1561 } \ 1562 \ 1563 data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \ 1564 if (len != sizeof(*data) + data_size) { \ 1565 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ 1566 return ERR_PTR(-EIO); \ 1567 } \ 1568 \ 1569 status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \ 1570 if (!status) \ 1571 return ERR_PTR(-ENOMEM); \ 1572 \ 1573 /* copy all the common fields */ \ 1574 status->replay_ctr = data->replay_ctr; \ 1575 status->pattern_number = data->pattern_number; \ 1576 status->non_qos_seq_ctr = data->non_qos_seq_ctr; \ 1577 memcpy(status->qos_seq_ctr, data->qos_seq_ctr, \ 1578 sizeof(status->qos_seq_ctr)); \ 1579 status->wakeup_reasons = data->wakeup_reasons; \ 1580 status->num_of_gtk_rekeys = data->num_of_gtk_rekeys; \ 1581 status->received_beacons = data->received_beacons; \ 1582 status->wake_packet_length = data->wake_packet_length; \ 1583 status->wake_packet_bufsize = data->wake_packet_bufsize; \ 1584 memcpy(status->wake_packet, data->wake_packet, \ 1585 le32_to_cpu(status->wake_packet_bufsize)); \ 1586 \ 1587 return status; \ 1588 } 1589 1590 iwl_mvm_parse_wowlan_status_common(v6) 1591 iwl_mvm_parse_wowlan_status_common(v7) 1592 iwl_mvm_parse_wowlan_status_common(v9) 1593 1594 struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm) 1595 { 1596 struct iwl_wowlan_status *status; 1597 struct iwl_host_cmd cmd = { 1598 .id = WOWLAN_GET_STATUSES, 1599 .flags = CMD_WANT_SKB, 1600 }; 1601 int ret, len; 1602 u8 notif_ver; 1603 1604 lockdep_assert_held(&mvm->mutex); 1605 1606 ret = iwl_mvm_send_cmd(mvm, &cmd); 1607 if (ret) { 1608 IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret); 1609 return ERR_PTR(ret); 1610 } 1611 1612 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1613 1614 /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ 1615 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, 1616 WOWLAN_GET_STATUSES, 7); 1617 1618 if (!fw_has_api(&mvm->fw->ucode_capa, 1619 IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { 1620 struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; 1621 1622 status = iwl_mvm_parse_wowlan_status_common_v6(mvm, 1623 cmd.resp_pkt->data, 1624 len); 1625 if (IS_ERR(status)) 1626 goto out_free_resp; 1627 1628 BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > 1629 sizeof(status->gtk[0].key)); 1630 BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) > 1631 sizeof(status->gtk[0].tkip_mic_key)); 1632 1633 /* copy GTK info to the right place */ 1634 memcpy(status->gtk[0].key, v6->gtk.decrypt_key, 1635 sizeof(v6->gtk.decrypt_key)); 1636 memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key, 1637 sizeof(v6->gtk.tkip_mic_key)); 1638 memcpy(&status->gtk[0].rsc, &v6->gtk.rsc, 1639 sizeof(status->gtk[0].rsc)); 1640 1641 /* hardcode the key length to 16 since v6 only supports 16 */ 1642 status->gtk[0].key_len = 16; 1643 1644 /* 1645 * The key index only uses 2 bits (values 0 to 3) and 1646 * we always set bit 7 which means this is the 1647 * currently used key. 1648 */ 1649 status->gtk[0].key_flags = v6->gtk.key_index | BIT(7); 1650 } else if (notif_ver == 7) { 1651 struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; 1652 1653 status = iwl_mvm_parse_wowlan_status_common_v7(mvm, 1654 cmd.resp_pkt->data, 1655 len); 1656 if (IS_ERR(status)) 1657 goto out_free_resp; 1658 1659 status->gtk[0] = v7->gtk[0]; 1660 status->igtk[0] = v7->igtk[0]; 1661 } else if (notif_ver == 9) { 1662 struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; 1663 1664 status = iwl_mvm_parse_wowlan_status_common_v9(mvm, 1665 cmd.resp_pkt->data, 1666 len); 1667 if (IS_ERR(status)) 1668 goto out_free_resp; 1669 1670 status->gtk[0] = v9->gtk[0]; 1671 status->igtk[0] = v9->igtk[0]; 1672 1673 status->tid_tear_down = v9->tid_tear_down; 1674 } else { 1675 IWL_ERR(mvm, 1676 "Firmware advertises unknown WoWLAN status response %d!\n", 1677 notif_ver); 1678 status = ERR_PTR(-EIO); 1679 } 1680 1681 out_free_resp: 1682 iwl_free_resp(&cmd); 1683 return status; 1684 } 1685 1686 static struct iwl_wowlan_status * 1687 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm) 1688 { 1689 int ret; 1690 1691 /* only for tracing for now */ 1692 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); 1693 if (ret) 1694 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); 1695 1696 return iwl_mvm_send_wowlan_get_status(mvm); 1697 } 1698 1699 /* releases the MVM mutex */ 1700 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, 1701 struct ieee80211_vif *vif) 1702 { 1703 struct iwl_wowlan_status_data status; 1704 struct iwl_wowlan_status *fw_status; 1705 int i; 1706 bool keep; 1707 struct iwl_mvm_sta *mvm_ap_sta; 1708 1709 fw_status = iwl_mvm_get_wakeup_status(mvm); 1710 if (IS_ERR_OR_NULL(fw_status)) 1711 goto out_unlock; 1712 1713 IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", 1714 le32_to_cpu(fw_status->wakeup_reasons)); 1715 1716 status.pattern_number = le16_to_cpu(fw_status->pattern_number); 1717 for (i = 0; i < 8; i++) 1718 status.qos_seq_ctr[i] = 1719 le16_to_cpu(fw_status->qos_seq_ctr[i]); 1720 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); 1721 status.wake_packet_length = 1722 le32_to_cpu(fw_status->wake_packet_length); 1723 status.wake_packet_bufsize = 1724 le32_to_cpu(fw_status->wake_packet_bufsize); 1725 status.wake_packet = fw_status->wake_packet; 1726 1727 /* still at hard-coded place 0 for D3 image */ 1728 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); 1729 if (!mvm_ap_sta) 1730 goto out_free; 1731 1732 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1733 u16 seq = status.qos_seq_ctr[i]; 1734 /* firmware stores last-used value, we store next value */ 1735 seq += 0x10; 1736 mvm_ap_sta->tid_data[i].seq_number = seq; 1737 } 1738 1739 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { 1740 i = mvm->offload_tid; 1741 iwl_trans_set_q_ptrs(mvm->trans, 1742 mvm_ap_sta->tid_data[i].txq_id, 1743 mvm_ap_sta->tid_data[i].seq_number >> 4); 1744 } 1745 1746 /* now we have all the data we need, unlock to avoid mac80211 issues */ 1747 mutex_unlock(&mvm->mutex); 1748 1749 iwl_mvm_report_wakeup_reasons(mvm, vif, &status); 1750 1751 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); 1752 1753 kfree(fw_status); 1754 return keep; 1755 1756 out_free: 1757 kfree(fw_status); 1758 out_unlock: 1759 mutex_unlock(&mvm->mutex); 1760 return false; 1761 } 1762 1763 #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ 1764 IWL_SCAN_MAX_PROFILES) 1765 1766 struct iwl_mvm_nd_query_results { 1767 u32 matched_profiles; 1768 u8 matches[ND_QUERY_BUF_LEN]; 1769 }; 1770 1771 static int 1772 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, 1773 struct iwl_mvm_nd_query_results *results) 1774 { 1775 struct iwl_scan_offload_profiles_query *query; 1776 struct iwl_host_cmd cmd = { 1777 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, 1778 .flags = CMD_WANT_SKB, 1779 }; 1780 int ret, len; 1781 size_t query_len, matches_len; 1782 int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw); 1783 1784 ret = iwl_mvm_send_cmd(mvm, &cmd); 1785 if (ret) { 1786 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); 1787 return ret; 1788 } 1789 1790 if (fw_has_api(&mvm->fw->ucode_capa, 1791 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1792 query_len = sizeof(struct iwl_scan_offload_profiles_query); 1793 matches_len = sizeof(struct iwl_scan_offload_profile_match) * 1794 max_profiles; 1795 } else { 1796 query_len = sizeof(struct iwl_scan_offload_profiles_query_v1); 1797 matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) * 1798 max_profiles; 1799 } 1800 1801 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1802 if (len < query_len) { 1803 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); 1804 ret = -EIO; 1805 goto out_free_resp; 1806 } 1807 1808 query = (void *)cmd.resp_pkt->data; 1809 1810 results->matched_profiles = le32_to_cpu(query->matched_profiles); 1811 memcpy(results->matches, query->matches, matches_len); 1812 1813 #ifdef CONFIG_IWLWIFI_DEBUGFS 1814 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); 1815 #endif 1816 1817 out_free_resp: 1818 iwl_free_resp(&cmd); 1819 return ret; 1820 } 1821 1822 static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, 1823 struct iwl_mvm_nd_query_results *query, 1824 int idx) 1825 { 1826 int n_chans = 0, i; 1827 1828 if (fw_has_api(&mvm->fw->ucode_capa, 1829 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1830 struct iwl_scan_offload_profile_match *matches = 1831 (struct iwl_scan_offload_profile_match *)query->matches; 1832 1833 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) 1834 n_chans += hweight8(matches[idx].matching_channels[i]); 1835 } else { 1836 struct iwl_scan_offload_profile_match_v1 *matches = 1837 (struct iwl_scan_offload_profile_match_v1 *)query->matches; 1838 1839 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) 1840 n_chans += hweight8(matches[idx].matching_channels[i]); 1841 } 1842 1843 return n_chans; 1844 } 1845 1846 static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, 1847 struct iwl_mvm_nd_query_results *query, 1848 struct cfg80211_wowlan_nd_match *match, 1849 int idx) 1850 { 1851 int i; 1852 1853 if (fw_has_api(&mvm->fw->ucode_capa, 1854 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { 1855 struct iwl_scan_offload_profile_match *matches = 1856 (struct iwl_scan_offload_profile_match *)query->matches; 1857 1858 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) 1859 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) 1860 match->channels[match->n_channels++] = 1861 mvm->nd_channels[i]->center_freq; 1862 } else { 1863 struct iwl_scan_offload_profile_match_v1 *matches = 1864 (struct iwl_scan_offload_profile_match_v1 *)query->matches; 1865 1866 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) 1867 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) 1868 match->channels[match->n_channels++] = 1869 mvm->nd_channels[i]->center_freq; 1870 } 1871 } 1872 1873 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, 1874 struct ieee80211_vif *vif) 1875 { 1876 struct cfg80211_wowlan_nd_info *net_detect = NULL; 1877 struct cfg80211_wowlan_wakeup wakeup = { 1878 .pattern_idx = -1, 1879 }; 1880 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1881 struct iwl_mvm_nd_query_results query; 1882 struct iwl_wowlan_status *fw_status; 1883 unsigned long matched_profiles; 1884 u32 reasons = 0; 1885 int i, n_matches, ret; 1886 1887 fw_status = iwl_mvm_get_wakeup_status(mvm); 1888 if (!IS_ERR_OR_NULL(fw_status)) { 1889 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1890 kfree(fw_status); 1891 } 1892 1893 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1894 wakeup.rfkill_release = true; 1895 1896 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) 1897 goto out; 1898 1899 ret = iwl_mvm_netdetect_query_results(mvm, &query); 1900 if (ret || !query.matched_profiles) { 1901 wakeup_report = NULL; 1902 goto out; 1903 } 1904 1905 matched_profiles = query.matched_profiles; 1906 if (mvm->n_nd_match_sets) { 1907 n_matches = hweight_long(matched_profiles); 1908 } else { 1909 IWL_ERR(mvm, "no net detect match information available\n"); 1910 n_matches = 0; 1911 } 1912 1913 net_detect = kzalloc(struct_size(net_detect, matches, n_matches), 1914 GFP_KERNEL); 1915 if (!net_detect || !n_matches) 1916 goto out_report_nd; 1917 1918 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { 1919 struct cfg80211_wowlan_nd_match *match; 1920 int idx, n_channels = 0; 1921 1922 n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); 1923 1924 match = kzalloc(struct_size(match, channels, n_channels), 1925 GFP_KERNEL); 1926 if (!match) 1927 goto out_report_nd; 1928 1929 net_detect->matches[net_detect->n_matches++] = match; 1930 1931 /* We inverted the order of the SSIDs in the scan 1932 * request, so invert the index here. 1933 */ 1934 idx = mvm->n_nd_match_sets - i - 1; 1935 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; 1936 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, 1937 match->ssid.ssid_len); 1938 1939 if (mvm->n_nd_channels < n_channels) 1940 continue; 1941 1942 iwl_mvm_query_set_freqs(mvm, &query, match, i); 1943 } 1944 1945 out_report_nd: 1946 wakeup.net_detect = net_detect; 1947 out: 1948 iwl_mvm_free_nd(mvm); 1949 1950 mutex_unlock(&mvm->mutex); 1951 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1952 1953 if (net_detect) { 1954 for (i = 0; i < net_detect->n_matches; i++) 1955 kfree(net_detect->matches[i]); 1956 kfree(net_detect); 1957 } 1958 } 1959 1960 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, 1961 struct ieee80211_vif *vif) 1962 { 1963 /* skip the one we keep connection on */ 1964 if (data == vif) 1965 return; 1966 1967 if (vif->type == NL80211_IFTYPE_STATION) 1968 ieee80211_resume_disconnect(vif); 1969 } 1970 1971 static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) 1972 { 1973 struct error_table_start { 1974 /* cf. struct iwl_error_event_table */ 1975 u32 valid; 1976 __le32 err_id; 1977 } err_info; 1978 1979 if (!base) 1980 return false; 1981 1982 iwl_trans_read_mem_bytes(trans, base, 1983 &err_info, sizeof(err_info)); 1984 if (err_info.valid && err_id) 1985 *err_id = le32_to_cpu(err_info.err_id); 1986 1987 return !!err_info.valid; 1988 } 1989 1990 static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 1991 struct ieee80211_vif *vif) 1992 { 1993 u32 err_id; 1994 1995 /* check for lmac1 error */ 1996 if (iwl_mvm_rt_status(mvm->trans, 1997 mvm->trans->dbg.lmac_error_event_table[0], 1998 &err_id)) { 1999 if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 2000 struct cfg80211_wowlan_wakeup wakeup = { 2001 .rfkill_release = true, 2002 }; 2003 ieee80211_report_wowlan_wakeup(vif, &wakeup, 2004 GFP_KERNEL); 2005 } 2006 return true; 2007 } 2008 2009 /* check if we have lmac2 set and check for error */ 2010 if (iwl_mvm_rt_status(mvm->trans, 2011 mvm->trans->dbg.lmac_error_event_table[1], NULL)) 2012 return true; 2013 2014 /* check for umac error */ 2015 if (iwl_mvm_rt_status(mvm->trans, 2016 mvm->trans->dbg.umac_error_event_table, NULL)) 2017 return true; 2018 2019 return false; 2020 } 2021 2022 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 2023 { 2024 struct ieee80211_vif *vif = NULL; 2025 int ret = 1; 2026 enum iwl_d3_status d3_status; 2027 bool keep = false; 2028 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2029 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2030 bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, 2031 IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); 2032 2033 mutex_lock(&mvm->mutex); 2034 2035 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 2036 2037 /* get the BSS vif pointer again */ 2038 vif = iwl_mvm_get_bss_vif(mvm); 2039 if (IS_ERR_OR_NULL(vif)) 2040 goto err; 2041 2042 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 2043 2044 if (iwl_mvm_check_rt_status(mvm, vif)) { 2045 set_bit(STATUS_FW_ERROR, &mvm->trans->status); 2046 iwl_mvm_dump_nic_error_log(mvm); 2047 iwl_dbg_tlv_time_point(&mvm->fwrt, 2048 IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); 2049 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, 2050 false, 0); 2051 ret = 1; 2052 goto err; 2053 } 2054 2055 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END, 2056 NULL); 2057 2058 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); 2059 if (ret) 2060 goto err; 2061 2062 if (d3_status != IWL_D3_STATUS_ALIVE) { 2063 IWL_INFO(mvm, "Device was reset during suspend\n"); 2064 goto err; 2065 } 2066 2067 if (d0i3_first) { 2068 struct iwl_host_cmd cmd = { 2069 .id = D0I3_END_CMD, 2070 .flags = CMD_WANT_SKB, 2071 }; 2072 int len; 2073 2074 ret = iwl_mvm_send_cmd(mvm, &cmd); 2075 if (ret < 0) { 2076 IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", 2077 ret); 2078 goto err; 2079 } 2080 switch (mvm->cmd_ver.d0i3_resp) { 2081 case 0: 2082 break; 2083 case 1: 2084 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 2085 if (len != sizeof(u32)) { 2086 IWL_ERR(mvm, 2087 "Error with D0I3_END_CMD response size (%d)\n", 2088 len); 2089 goto err; 2090 } 2091 if (IWL_D0I3_RESET_REQUIRE & 2092 le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) { 2093 iwl_write32(mvm->trans, CSR_RESET, 2094 CSR_RESET_REG_FLAG_FORCE_NMI); 2095 iwl_free_resp(&cmd); 2096 } 2097 break; 2098 default: 2099 WARN_ON(1); 2100 } 2101 } 2102 2103 /* 2104 * Query the current location and source from the D3 firmware so we 2105 * can play it back when we re-intiailize the D0 firmware 2106 */ 2107 iwl_mvm_update_changed_regdom(mvm); 2108 2109 /* Re-configure PPAG settings */ 2110 iwl_mvm_ppag_send_cmd(mvm); 2111 2112 if (!unified_image) 2113 /* Re-configure default SAR profile */ 2114 iwl_mvm_sar_select_profile(mvm, 1, 1); 2115 2116 if (mvm->net_detect) { 2117 /* If this is a non-unified image, we restart the FW, 2118 * so no need to stop the netdetect scan. If that 2119 * fails, continue and try to get the wake-up reasons, 2120 * but trigger a HW restart by keeping a failure code 2121 * in ret. 2122 */ 2123 if (unified_image) 2124 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, 2125 false); 2126 2127 iwl_mvm_query_netdetect_reasons(mvm, vif); 2128 /* has unlocked the mutex, so skip that */ 2129 goto out; 2130 } else { 2131 keep = iwl_mvm_query_wakeup_reasons(mvm, vif); 2132 #ifdef CONFIG_IWLWIFI_DEBUGFS 2133 if (keep) 2134 mvm->keep_vif = vif; 2135 #endif 2136 /* has unlocked the mutex, so skip that */ 2137 goto out_iterate; 2138 } 2139 2140 err: 2141 iwl_mvm_free_nd(mvm); 2142 mutex_unlock(&mvm->mutex); 2143 2144 out_iterate: 2145 if (!test) 2146 ieee80211_iterate_active_interfaces_rtnl(mvm->hw, 2147 IEEE80211_IFACE_ITER_NORMAL, 2148 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); 2149 2150 out: 2151 /* no need to reset the device in unified images, if successful */ 2152 if (unified_image && !ret) { 2153 /* nothing else to do if we already sent D0I3_END_CMD */ 2154 if (d0i3_first) 2155 return 0; 2156 2157 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); 2158 if (!ret) 2159 return 0; 2160 } 2161 2162 /* 2163 * Reconfigure the device in one of the following cases: 2164 * 1. We are not using a unified image 2165 * 2. We are using a unified image but had an error while exiting D3 2166 */ 2167 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 2168 2169 return 1; 2170 } 2171 2172 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) 2173 { 2174 iwl_trans_resume(mvm->trans); 2175 2176 return __iwl_mvm_resume(mvm, false); 2177 } 2178 2179 int iwl_mvm_resume(struct ieee80211_hw *hw) 2180 { 2181 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2182 int ret; 2183 2184 ret = iwl_mvm_resume_d3(mvm); 2185 2186 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2187 2188 iwl_mvm_resume_tcm(mvm); 2189 2190 iwl_fw_runtime_resume(&mvm->fwrt); 2191 2192 return ret; 2193 } 2194 2195 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) 2196 { 2197 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2198 2199 device_set_wakeup_enable(mvm->trans->dev, enabled); 2200 } 2201 2202 #ifdef CONFIG_IWLWIFI_DEBUGFS 2203 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) 2204 { 2205 struct iwl_mvm *mvm = inode->i_private; 2206 int err; 2207 2208 if (mvm->d3_test_active) 2209 return -EBUSY; 2210 2211 file->private_data = inode->i_private; 2212 2213 synchronize_net(); 2214 2215 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 2216 2217 iwl_mvm_pause_tcm(mvm, true); 2218 2219 iwl_fw_runtime_suspend(&mvm->fwrt); 2220 2221 /* start pseudo D3 */ 2222 rtnl_lock(); 2223 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); 2224 rtnl_unlock(); 2225 if (err > 0) 2226 err = -EINVAL; 2227 if (err) 2228 return err; 2229 2230 mvm->d3_test_active = true; 2231 mvm->keep_vif = NULL; 2232 return 0; 2233 } 2234 2235 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, 2236 size_t count, loff_t *ppos) 2237 { 2238 struct iwl_mvm *mvm = file->private_data; 2239 u32 pme_asserted; 2240 2241 while (true) { 2242 /* read pme_ptr if available */ 2243 if (mvm->d3_test_pme_ptr) { 2244 pme_asserted = iwl_trans_read_mem32(mvm->trans, 2245 mvm->d3_test_pme_ptr); 2246 if (pme_asserted) 2247 break; 2248 } 2249 2250 if (msleep_interruptible(100)) 2251 break; 2252 } 2253 2254 return 0; 2255 } 2256 2257 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, 2258 struct ieee80211_vif *vif) 2259 { 2260 /* skip the one we keep connection on */ 2261 if (_data == vif) 2262 return; 2263 2264 if (vif->type == NL80211_IFTYPE_STATION) 2265 ieee80211_connection_loss(vif); 2266 } 2267 2268 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) 2269 { 2270 struct iwl_mvm *mvm = inode->i_private; 2271 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2272 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2273 2274 mvm->d3_test_active = false; 2275 2276 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 2277 2278 rtnl_lock(); 2279 __iwl_mvm_resume(mvm, true); 2280 rtnl_unlock(); 2281 2282 iwl_mvm_resume_tcm(mvm); 2283 2284 iwl_fw_runtime_resume(&mvm->fwrt); 2285 2286 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2287 2288 iwl_abort_notification_waits(&mvm->notif_wait); 2289 if (!unified_image) { 2290 int remaining_time = 10; 2291 2292 ieee80211_restart_hw(mvm->hw); 2293 2294 /* wait for restart and disconnect all interfaces */ 2295 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2296 remaining_time > 0) { 2297 remaining_time--; 2298 msleep(1000); 2299 } 2300 2301 if (remaining_time == 0) 2302 IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); 2303 } 2304 2305 ieee80211_iterate_active_interfaces_atomic( 2306 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2307 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); 2308 2309 return 0; 2310 } 2311 2312 const struct file_operations iwl_dbgfs_d3_test_ops = { 2313 .llseek = no_llseek, 2314 .open = iwl_mvm_d3_test_open, 2315 .read = iwl_mvm_d3_test_read, 2316 .release = iwl_mvm_d3_test_release, 2317 }; 2318 #endif 2319