1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 68 #include <linux/etherdevice.h> 69 #include <linux/ip.h> 70 #include <linux/fs.h> 71 #include <net/cfg80211.h> 72 #include <net/ipv6.h> 73 #include <net/tcp.h> 74 #include <net/addrconf.h> 75 #include "iwl-modparams.h" 76 #include "fw-api.h" 77 #include "mvm.h" 78 79 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, 80 struct ieee80211_vif *vif, 81 struct cfg80211_gtk_rekey_data *data) 82 { 83 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 84 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 85 86 if (iwlwifi_mod_params.sw_crypto) 87 return; 88 89 mutex_lock(&mvm->mutex); 90 91 memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); 92 memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); 93 mvmvif->rekey_data.replay_ctr = 94 cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); 95 mvmvif->rekey_data.valid = true; 96 97 mutex_unlock(&mvm->mutex); 98 } 99 100 #if IS_ENABLED(CONFIG_IPV6) 101 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, 102 struct ieee80211_vif *vif, 103 struct inet6_dev *idev) 104 { 105 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 106 struct inet6_ifaddr *ifa; 107 int idx = 0; 108 109 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); 110 111 read_lock_bh(&idev->lock); 112 list_for_each_entry(ifa, &idev->addr_list, if_list) { 113 mvmvif->target_ipv6_addrs[idx] = ifa->addr; 114 if (ifa->flags & IFA_F_TENTATIVE) 115 __set_bit(idx, mvmvif->tentative_addrs); 116 idx++; 117 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) 118 break; 119 } 120 read_unlock_bh(&idev->lock); 121 122 mvmvif->num_target_ipv6_addrs = idx; 123 } 124 #endif 125 126 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 127 struct ieee80211_vif *vif, int idx) 128 { 129 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 130 131 mvmvif->tx_key_idx = idx; 132 } 133 134 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) 135 { 136 int i; 137 138 for (i = 0; i < IWL_P1K_SIZE; i++) 139 out[i] = cpu_to_le16(p1k[i]); 140 } 141 142 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, 143 struct iwl_mvm_key_pn *ptk_pn, 144 struct ieee80211_key_seq *seq, 145 int tid, int queues) 146 { 147 const u8 *ret = seq->ccmp.pn; 148 int i; 149 150 /* get the PN from mac80211, used on the default queue */ 151 ieee80211_get_key_rx_seq(key, tid, seq); 152 153 /* and use the internal data for the other queues */ 154 for (i = 1; i < queues; i++) { 155 const u8 *tmp = ptk_pn->q[i].pn[tid]; 156 157 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) 158 ret = tmp; 159 } 160 161 return ret; 162 } 163 164 struct wowlan_key_data { 165 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; 166 struct iwl_wowlan_tkip_params_cmd *tkip; 167 bool error, use_rsc_tsc, use_tkip, configure_keys; 168 int wep_key_idx; 169 }; 170 171 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, 172 struct ieee80211_vif *vif, 173 struct ieee80211_sta *sta, 174 struct ieee80211_key_conf *key, 175 void *_data) 176 { 177 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 178 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 179 struct wowlan_key_data *data = _data; 180 struct aes_sc *aes_sc, *aes_tx_sc = NULL; 181 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; 182 struct iwl_p1k_cache *rx_p1ks; 183 u8 *rx_mic_key; 184 struct ieee80211_key_seq seq; 185 u32 cur_rx_iv32 = 0; 186 u16 p1k[IWL_P1K_SIZE]; 187 int ret, i; 188 189 switch (key->cipher) { 190 case WLAN_CIPHER_SUITE_WEP40: 191 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ 192 struct { 193 struct iwl_mvm_wep_key_cmd wep_key_cmd; 194 struct iwl_mvm_wep_key wep_key; 195 } __packed wkc = { 196 .wep_key_cmd.mac_id_n_color = 197 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 198 mvmvif->color)), 199 .wep_key_cmd.num_keys = 1, 200 /* firmware sets STA_KEY_FLG_WEP_13BYTES */ 201 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, 202 .wep_key.key_index = key->keyidx, 203 .wep_key.key_size = key->keylen, 204 }; 205 206 /* 207 * This will fail -- the key functions don't set support 208 * pairwise WEP keys. However, that's better than silently 209 * failing WoWLAN. Or maybe not? 210 */ 211 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 212 break; 213 214 memcpy(&wkc.wep_key.key[3], key->key, key->keylen); 215 if (key->keyidx == mvmvif->tx_key_idx) { 216 /* TX key must be at offset 0 */ 217 wkc.wep_key.key_offset = 0; 218 } else { 219 /* others start at 1 */ 220 data->wep_key_idx++; 221 wkc.wep_key.key_offset = data->wep_key_idx; 222 } 223 224 if (data->configure_keys) { 225 mutex_lock(&mvm->mutex); 226 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, 227 sizeof(wkc), &wkc); 228 data->error = ret != 0; 229 230 mvm->ptk_ivlen = key->iv_len; 231 mvm->ptk_icvlen = key->icv_len; 232 mvm->gtk_ivlen = key->iv_len; 233 mvm->gtk_icvlen = key->icv_len; 234 mutex_unlock(&mvm->mutex); 235 } 236 237 /* don't upload key again */ 238 return; 239 } 240 default: 241 data->error = true; 242 return; 243 case WLAN_CIPHER_SUITE_AES_CMAC: 244 /* 245 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them 246 * but we also shouldn't abort suspend due to that. It does have 247 * support for the IGTK key renewal, but doesn't really use the 248 * IGTK for anything. This means we could spuriously wake up or 249 * be deauthenticated, but that was considered acceptable. 250 */ 251 return; 252 case WLAN_CIPHER_SUITE_TKIP: 253 if (sta) { 254 u64 pn64; 255 256 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; 257 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; 258 259 rx_p1ks = data->tkip->rx_uni; 260 261 pn64 = atomic64_read(&key->tx_pn); 262 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); 263 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); 264 265 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), 266 p1k); 267 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); 268 269 memcpy(data->tkip->mic_keys.tx, 270 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 271 IWL_MIC_KEY_SIZE); 272 273 rx_mic_key = data->tkip->mic_keys.rx_unicast; 274 } else { 275 tkip_sc = 276 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; 277 rx_p1ks = data->tkip->rx_multi; 278 rx_mic_key = data->tkip->mic_keys.rx_mcast; 279 } 280 281 /* 282 * For non-QoS this relies on the fact that both the uCode and 283 * mac80211 use TID 0 (as they need to to avoid replay attacks) 284 * for checking the IV in the frames. 285 */ 286 for (i = 0; i < IWL_NUM_RSC; i++) { 287 ieee80211_get_key_rx_seq(key, i, &seq); 288 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); 289 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); 290 /* wrapping isn't allowed, AP must rekey */ 291 if (seq.tkip.iv32 > cur_rx_iv32) 292 cur_rx_iv32 = seq.tkip.iv32; 293 } 294 295 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 296 cur_rx_iv32, p1k); 297 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); 298 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, 299 cur_rx_iv32 + 1, p1k); 300 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); 301 302 memcpy(rx_mic_key, 303 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 304 IWL_MIC_KEY_SIZE); 305 306 data->use_tkip = true; 307 data->use_rsc_tsc = true; 308 break; 309 case WLAN_CIPHER_SUITE_CCMP: 310 if (sta) { 311 u64 pn64; 312 313 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; 314 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; 315 316 pn64 = atomic64_read(&key->tx_pn); 317 aes_tx_sc->pn = cpu_to_le64(pn64); 318 } else { 319 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; 320 } 321 322 /* 323 * For non-QoS this relies on the fact that both the uCode and 324 * mac80211/our RX code use TID 0 for checking the PN. 325 */ 326 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 327 struct iwl_mvm_sta *mvmsta; 328 struct iwl_mvm_key_pn *ptk_pn; 329 const u8 *pn; 330 331 mvmsta = iwl_mvm_sta_from_mac80211(sta); 332 ptk_pn = rcu_dereference_protected( 333 mvmsta->ptk_pn[key->keyidx], 334 lockdep_is_held(&mvm->mutex)); 335 if (WARN_ON(!ptk_pn)) 336 break; 337 338 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 339 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, 340 mvm->trans->num_rx_queues); 341 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 342 ((u64)pn[4] << 8) | 343 ((u64)pn[3] << 16) | 344 ((u64)pn[2] << 24) | 345 ((u64)pn[1] << 32) | 346 ((u64)pn[0] << 40)); 347 } 348 } else { 349 for (i = 0; i < IWL_NUM_RSC; i++) { 350 u8 *pn = seq.ccmp.pn; 351 352 ieee80211_get_key_rx_seq(key, i, &seq); 353 aes_sc[i].pn = cpu_to_le64((u64)pn[5] | 354 ((u64)pn[4] << 8) | 355 ((u64)pn[3] << 16) | 356 ((u64)pn[2] << 24) | 357 ((u64)pn[1] << 32) | 358 ((u64)pn[0] << 40)); 359 } 360 } 361 data->use_rsc_tsc = true; 362 break; 363 } 364 365 if (data->configure_keys) { 366 mutex_lock(&mvm->mutex); 367 /* 368 * The D3 firmware hardcodes the key offset 0 as the key it 369 * uses to transmit packets to the AP, i.e. the PTK. 370 */ 371 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 372 mvm->ptk_ivlen = key->iv_len; 373 mvm->ptk_icvlen = key->icv_len; 374 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); 375 } else { 376 /* 377 * firmware only supports TSC/RSC for a single key, 378 * so if there are multiple keep overwriting them 379 * with new ones -- this relies on mac80211 doing 380 * list_add_tail(). 381 */ 382 mvm->gtk_ivlen = key->iv_len; 383 mvm->gtk_icvlen = key->icv_len; 384 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); 385 } 386 mutex_unlock(&mvm->mutex); 387 data->error = ret != 0; 388 } 389 } 390 391 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, 392 struct cfg80211_wowlan *wowlan) 393 { 394 struct iwl_wowlan_patterns_cmd *pattern_cmd; 395 struct iwl_host_cmd cmd = { 396 .id = WOWLAN_PATTERNS, 397 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 398 }; 399 int i, err; 400 401 if (!wowlan->n_patterns) 402 return 0; 403 404 cmd.len[0] = sizeof(*pattern_cmd) + 405 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern); 406 407 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); 408 if (!pattern_cmd) 409 return -ENOMEM; 410 411 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); 412 413 for (i = 0; i < wowlan->n_patterns; i++) { 414 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 415 416 memcpy(&pattern_cmd->patterns[i].mask, 417 wowlan->patterns[i].mask, mask_len); 418 memcpy(&pattern_cmd->patterns[i].pattern, 419 wowlan->patterns[i].pattern, 420 wowlan->patterns[i].pattern_len); 421 pattern_cmd->patterns[i].mask_size = mask_len; 422 pattern_cmd->patterns[i].pattern_size = 423 wowlan->patterns[i].pattern_len; 424 } 425 426 cmd.data[0] = pattern_cmd; 427 err = iwl_mvm_send_cmd(mvm, &cmd); 428 kfree(pattern_cmd); 429 return err; 430 } 431 432 enum iwl_mvm_tcp_packet_type { 433 MVM_TCP_TX_SYN, 434 MVM_TCP_RX_SYNACK, 435 MVM_TCP_TX_DATA, 436 MVM_TCP_RX_ACK, 437 MVM_TCP_RX_WAKE, 438 MVM_TCP_TX_FIN, 439 }; 440 441 static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr) 442 { 443 __sum16 check = tcp_v4_check(len, saddr, daddr, 0); 444 return cpu_to_le16(be16_to_cpu((__force __be16)check)); 445 } 446 447 static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif, 448 struct cfg80211_wowlan_tcp *tcp, 449 void *_pkt, u8 *mask, 450 __le16 *pseudo_hdr_csum, 451 enum iwl_mvm_tcp_packet_type ptype) 452 { 453 struct { 454 struct ethhdr eth; 455 struct iphdr ip; 456 struct tcphdr tcp; 457 u8 data[]; 458 } __packed *pkt = _pkt; 459 u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr); 460 int i; 461 462 pkt->eth.h_proto = cpu_to_be16(ETH_P_IP), 463 pkt->ip.version = 4; 464 pkt->ip.ihl = 5; 465 pkt->ip.protocol = IPPROTO_TCP; 466 467 switch (ptype) { 468 case MVM_TCP_TX_SYN: 469 case MVM_TCP_TX_DATA: 470 case MVM_TCP_TX_FIN: 471 memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN); 472 memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN); 473 pkt->ip.ttl = 128; 474 pkt->ip.saddr = tcp->src; 475 pkt->ip.daddr = tcp->dst; 476 pkt->tcp.source = cpu_to_be16(tcp->src_port); 477 pkt->tcp.dest = cpu_to_be16(tcp->dst_port); 478 /* overwritten for TX SYN later */ 479 pkt->tcp.doff = sizeof(struct tcphdr) / 4; 480 pkt->tcp.window = cpu_to_be16(65000); 481 break; 482 case MVM_TCP_RX_SYNACK: 483 case MVM_TCP_RX_ACK: 484 case MVM_TCP_RX_WAKE: 485 memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN); 486 memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN); 487 pkt->ip.saddr = tcp->dst; 488 pkt->ip.daddr = tcp->src; 489 pkt->tcp.source = cpu_to_be16(tcp->dst_port); 490 pkt->tcp.dest = cpu_to_be16(tcp->src_port); 491 break; 492 default: 493 WARN_ON(1); 494 return; 495 } 496 497 switch (ptype) { 498 case MVM_TCP_TX_SYN: 499 /* firmware assumes 8 option bytes - 8 NOPs for now */ 500 memset(pkt->data, 0x01, 8); 501 ip_tot_len += 8; 502 pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4; 503 pkt->tcp.syn = 1; 504 break; 505 case MVM_TCP_TX_DATA: 506 ip_tot_len += tcp->payload_len; 507 memcpy(pkt->data, tcp->payload, tcp->payload_len); 508 pkt->tcp.psh = 1; 509 pkt->tcp.ack = 1; 510 break; 511 case MVM_TCP_TX_FIN: 512 pkt->tcp.fin = 1; 513 pkt->tcp.ack = 1; 514 break; 515 case MVM_TCP_RX_SYNACK: 516 pkt->tcp.syn = 1; 517 pkt->tcp.ack = 1; 518 break; 519 case MVM_TCP_RX_ACK: 520 pkt->tcp.ack = 1; 521 break; 522 case MVM_TCP_RX_WAKE: 523 ip_tot_len += tcp->wake_len; 524 pkt->tcp.psh = 1; 525 pkt->tcp.ack = 1; 526 memcpy(pkt->data, tcp->wake_data, tcp->wake_len); 527 break; 528 } 529 530 switch (ptype) { 531 case MVM_TCP_TX_SYN: 532 case MVM_TCP_TX_DATA: 533 case MVM_TCP_TX_FIN: 534 pkt->ip.tot_len = cpu_to_be16(ip_tot_len); 535 pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl); 536 break; 537 case MVM_TCP_RX_WAKE: 538 for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) { 539 u8 tmp = tcp->wake_mask[i]; 540 mask[i + 6] |= tmp << 6; 541 if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8)) 542 mask[i + 7] = tmp >> 2; 543 } 544 /* fall through for ethernet/IP/TCP headers mask */ 545 case MVM_TCP_RX_SYNACK: 546 case MVM_TCP_RX_ACK: 547 mask[0] = 0xff; /* match ethernet */ 548 /* 549 * match ethernet, ip.version, ip.ihl 550 * the ip.ihl half byte is really masked out by firmware 551 */ 552 mask[1] = 0x7f; 553 mask[2] = 0x80; /* match ip.protocol */ 554 mask[3] = 0xfc; /* match ip.saddr, ip.daddr */ 555 mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */ 556 mask[5] = 0x80; /* match tcp flags */ 557 /* leave rest (0 or set for MVM_TCP_RX_WAKE) */ 558 break; 559 }; 560 561 *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr), 562 pkt->ip.saddr, pkt->ip.daddr); 563 } 564 565 static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, 566 struct ieee80211_vif *vif, 567 struct cfg80211_wowlan_tcp *tcp) 568 { 569 struct iwl_wowlan_remote_wake_config *cfg; 570 struct iwl_host_cmd cmd = { 571 .id = REMOTE_WAKE_CONFIG_CMD, 572 .len = { sizeof(*cfg), }, 573 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 574 }; 575 int ret; 576 577 if (!tcp) 578 return 0; 579 580 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 581 if (!cfg) 582 return -ENOMEM; 583 cmd.data[0] = cfg; 584 585 cfg->max_syn_retries = 10; 586 cfg->max_data_retries = 10; 587 cfg->tcp_syn_ack_timeout = 1; /* seconds */ 588 cfg->tcp_ack_timeout = 1; /* seconds */ 589 590 /* SYN (TX) */ 591 iwl_mvm_build_tcp_packet( 592 vif, tcp, cfg->syn_tx.data, NULL, 593 &cfg->syn_tx.info.tcp_pseudo_header_checksum, 594 MVM_TCP_TX_SYN); 595 cfg->syn_tx.info.tcp_payload_length = 0; 596 597 /* SYN/ACK (RX) */ 598 iwl_mvm_build_tcp_packet( 599 vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, 600 &cfg->synack_rx.info.tcp_pseudo_header_checksum, 601 MVM_TCP_RX_SYNACK); 602 cfg->synack_rx.info.tcp_payload_length = 0; 603 604 /* KEEPALIVE/ACK (TX) */ 605 iwl_mvm_build_tcp_packet( 606 vif, tcp, cfg->keepalive_tx.data, NULL, 607 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum, 608 MVM_TCP_TX_DATA); 609 cfg->keepalive_tx.info.tcp_payload_length = 610 cpu_to_le16(tcp->payload_len); 611 cfg->sequence_number_offset = tcp->payload_seq.offset; 612 /* length must be 0..4, the field is little endian */ 613 cfg->sequence_number_length = tcp->payload_seq.len; 614 cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start); 615 cfg->keepalive_interval = cpu_to_le16(tcp->data_interval); 616 if (tcp->payload_tok.len) { 617 cfg->token_offset = tcp->payload_tok.offset; 618 cfg->token_length = tcp->payload_tok.len; 619 cfg->num_tokens = 620 cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len); 621 memcpy(cfg->tokens, tcp->payload_tok.token_stream, 622 tcp->tokens_size); 623 } else { 624 /* set tokens to max value to almost never run out */ 625 cfg->num_tokens = cpu_to_le16(65535); 626 } 627 628 /* ACK (RX) */ 629 iwl_mvm_build_tcp_packet( 630 vif, tcp, cfg->keepalive_ack_rx.data, 631 cfg->keepalive_ack_rx.rx_mask, 632 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum, 633 MVM_TCP_RX_ACK); 634 cfg->keepalive_ack_rx.info.tcp_payload_length = 0; 635 636 /* WAKEUP (RX) */ 637 iwl_mvm_build_tcp_packet( 638 vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, 639 &cfg->wake_rx.info.tcp_pseudo_header_checksum, 640 MVM_TCP_RX_WAKE); 641 cfg->wake_rx.info.tcp_payload_length = 642 cpu_to_le16(tcp->wake_len); 643 644 /* FIN */ 645 iwl_mvm_build_tcp_packet( 646 vif, tcp, cfg->fin_tx.data, NULL, 647 &cfg->fin_tx.info.tcp_pseudo_header_checksum, 648 MVM_TCP_TX_FIN); 649 cfg->fin_tx.info.tcp_payload_length = 0; 650 651 ret = iwl_mvm_send_cmd(mvm, &cmd); 652 kfree(cfg); 653 654 return ret; 655 } 656 657 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 658 struct ieee80211_sta *ap_sta) 659 { 660 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 661 struct ieee80211_chanctx_conf *ctx; 662 u8 chains_static, chains_dynamic; 663 struct cfg80211_chan_def chandef; 664 int ret, i; 665 struct iwl_binding_cmd binding_cmd = {}; 666 struct iwl_time_quota_cmd quota_cmd = {}; 667 u32 status; 668 669 /* add back the PHY */ 670 if (WARN_ON(!mvmvif->phy_ctxt)) 671 return -EINVAL; 672 673 rcu_read_lock(); 674 ctx = rcu_dereference(vif->chanctx_conf); 675 if (WARN_ON(!ctx)) { 676 rcu_read_unlock(); 677 return -EINVAL; 678 } 679 chandef = ctx->def; 680 chains_static = ctx->rx_chains_static; 681 chains_dynamic = ctx->rx_chains_dynamic; 682 rcu_read_unlock(); 683 684 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, 685 chains_static, chains_dynamic); 686 if (ret) 687 return ret; 688 689 /* add back the MAC */ 690 mvmvif->uploaded = false; 691 692 if (WARN_ON(!vif->bss_conf.assoc)) 693 return -EINVAL; 694 695 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 696 if (ret) 697 return ret; 698 699 /* add back binding - XXX refactor? */ 700 binding_cmd.id_and_color = 701 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 702 mvmvif->phy_ctxt->color)); 703 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 704 binding_cmd.phy = 705 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 706 mvmvif->phy_ctxt->color)); 707 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 708 mvmvif->color)); 709 for (i = 1; i < MAX_MACS_IN_BINDING; i++) 710 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); 711 712 status = 0; 713 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, 714 sizeof(binding_cmd), &binding_cmd, 715 &status); 716 if (ret) { 717 IWL_ERR(mvm, "Failed to add binding: %d\n", ret); 718 return ret; 719 } 720 721 if (status) { 722 IWL_ERR(mvm, "Binding command failed: %u\n", status); 723 return -EIO; 724 } 725 726 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); 727 if (ret) 728 return ret; 729 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); 730 731 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 732 if (ret) 733 return ret; 734 735 /* and some quota */ 736 quota_cmd.quotas[0].id_and_color = 737 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, 738 mvmvif->phy_ctxt->color)); 739 quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); 740 quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); 741 742 for (i = 1; i < MAX_BINDINGS; i++) 743 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID); 744 745 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, 746 sizeof(quota_cmd), "a_cmd); 747 if (ret) 748 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 749 750 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) 751 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); 752 753 return 0; 754 } 755 756 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, 757 struct ieee80211_vif *vif) 758 { 759 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 760 struct iwl_nonqos_seq_query_cmd query_cmd = { 761 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), 762 .mac_id_n_color = 763 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 764 mvmvif->color)), 765 }; 766 struct iwl_host_cmd cmd = { 767 .id = NON_QOS_TX_COUNTER_CMD, 768 .flags = CMD_WANT_SKB, 769 }; 770 int err; 771 u32 size; 772 773 cmd.data[0] = &query_cmd; 774 cmd.len[0] = sizeof(query_cmd); 775 776 err = iwl_mvm_send_cmd(mvm, &cmd); 777 if (err) 778 return err; 779 780 size = iwl_rx_packet_payload_len(cmd.resp_pkt); 781 if (size < sizeof(__le16)) { 782 err = -EINVAL; 783 } else { 784 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); 785 /* firmware returns next, not last-used seqno */ 786 err = (u16) (err - 0x10); 787 } 788 789 iwl_free_resp(&cmd); 790 return err; 791 } 792 793 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 794 { 795 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 796 struct iwl_nonqos_seq_query_cmd query_cmd = { 797 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), 798 .mac_id_n_color = 799 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 800 mvmvif->color)), 801 .value = cpu_to_le16(mvmvif->seqno), 802 }; 803 804 /* return if called during restart, not resume from D3 */ 805 if (!mvmvif->seqno_valid) 806 return; 807 808 mvmvif->seqno_valid = false; 809 810 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, 811 sizeof(query_cmd), &query_cmd)) 812 IWL_ERR(mvm, "failed to set non-QoS seqno\n"); 813 } 814 815 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) 816 { 817 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 818 819 iwl_mvm_stop_device(mvm); 820 /* 821 * Set the HW restart bit -- this is mostly true as we're 822 * going to load new firmware and reprogram that, though 823 * the reprogramming is going to be manual to avoid adding 824 * all the MACs that aren't support. 825 * We don't have to clear up everything though because the 826 * reprogramming is manual. When we resume, we'll actually 827 * go through a proper restart sequence again to switch 828 * back to the runtime firmware image. 829 */ 830 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 831 832 /* the fw is reset, so all the keys are cleared */ 833 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 834 835 mvm->ptk_ivlen = 0; 836 mvm->ptk_icvlen = 0; 837 mvm->ptk_ivlen = 0; 838 mvm->ptk_icvlen = 0; 839 840 return iwl_mvm_load_d3_fw(mvm); 841 } 842 843 static int 844 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, 845 struct cfg80211_wowlan *wowlan, 846 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 847 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 848 struct ieee80211_sta *ap_sta) 849 { 850 int ret; 851 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); 852 853 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ 854 855 wowlan_config_cmd->is_11n_connection = 856 ap_sta->ht_cap.ht_supported; 857 wowlan_config_cmd->flags = ENABLE_L3_FILTERING | 858 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; 859 860 /* Query the last used seqno and set it */ 861 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); 862 if (ret < 0) 863 return ret; 864 865 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); 866 867 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); 868 869 if (wowlan->disconnect) 870 wowlan_config_cmd->wakeup_filter |= 871 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | 872 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 873 if (wowlan->magic_pkt) 874 wowlan_config_cmd->wakeup_filter |= 875 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); 876 if (wowlan->gtk_rekey_failure) 877 wowlan_config_cmd->wakeup_filter |= 878 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); 879 if (wowlan->eap_identity_req) 880 wowlan_config_cmd->wakeup_filter |= 881 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); 882 if (wowlan->four_way_handshake) 883 wowlan_config_cmd->wakeup_filter |= 884 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); 885 if (wowlan->n_patterns) 886 wowlan_config_cmd->wakeup_filter |= 887 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); 888 889 if (wowlan->rfkill_release) 890 wowlan_config_cmd->wakeup_filter |= 891 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 892 893 if (wowlan->tcp) { 894 /* 895 * Set the "link change" (really "link lost") flag as well 896 * since that implies losing the TCP connection. 897 */ 898 wowlan_config_cmd->wakeup_filter |= 899 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | 900 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | 901 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | 902 IWL_WOWLAN_WAKEUP_LINK_CHANGE); 903 } 904 905 return 0; 906 } 907 908 static void 909 iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm, 910 struct ieee80211_vif *vif, 911 void (*iter)(struct ieee80211_hw *hw, 912 struct ieee80211_vif *vif, 913 struct ieee80211_sta *sta, 914 struct ieee80211_key_conf *key, 915 void *data), 916 void *data) 917 { 918 struct ieee80211_sta *ap_sta; 919 920 rcu_read_lock(); 921 922 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]); 923 if (IS_ERR_OR_NULL(ap_sta)) 924 goto out; 925 926 ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data); 927 out: 928 rcu_read_unlock(); 929 } 930 931 int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, 932 struct ieee80211_vif *vif, 933 bool d0i3, 934 u32 cmd_flags) 935 { 936 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; 937 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 938 struct wowlan_key_data key_data = { 939 .configure_keys = !d0i3, 940 .use_rsc_tsc = false, 941 .tkip = &tkip_cmd, 942 .use_tkip = false, 943 }; 944 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 945 int ret; 946 947 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); 948 if (!key_data.rsc_tsc) 949 return -ENOMEM; 950 951 /* 952 * if we have to configure keys, call ieee80211_iter_keys(), 953 * as we need non-atomic context in order to take the 954 * required locks. 955 * for the d0i3 we can't use ieee80211_iter_keys(), as 956 * taking (almost) any mutex might result in deadlock. 957 */ 958 if (!d0i3) { 959 /* 960 * Note that currently we don't propagate cmd_flags 961 * to the iterator. In case of key_data.configure_keys, 962 * all the configured commands are SYNC, and 963 * iwl_mvm_wowlan_program_keys() will take care of 964 * locking/unlocking mvm->mutex. 965 */ 966 ieee80211_iter_keys(mvm->hw, vif, 967 iwl_mvm_wowlan_program_keys, 968 &key_data); 969 } else { 970 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, 971 iwl_mvm_wowlan_program_keys, 972 &key_data); 973 } 974 975 if (key_data.error) { 976 ret = -EIO; 977 goto out; 978 } 979 980 if (key_data.use_rsc_tsc) { 981 ret = iwl_mvm_send_cmd_pdu(mvm, 982 WOWLAN_TSC_RSC_PARAM, cmd_flags, 983 sizeof(*key_data.rsc_tsc), 984 key_data.rsc_tsc); 985 if (ret) 986 goto out; 987 } 988 989 if (key_data.use_tkip) { 990 ret = iwl_mvm_send_cmd_pdu(mvm, 991 WOWLAN_TKIP_PARAM, 992 cmd_flags, sizeof(tkip_cmd), 993 &tkip_cmd); 994 if (ret) 995 goto out; 996 } 997 998 /* configure rekey data only if offloaded rekey is supported (d3) */ 999 if (mvmvif->rekey_data.valid && !d0i3) { 1000 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); 1001 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, 1002 NL80211_KCK_LEN); 1003 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); 1004 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, 1005 NL80211_KEK_LEN); 1006 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); 1007 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; 1008 1009 ret = iwl_mvm_send_cmd_pdu(mvm, 1010 WOWLAN_KEK_KCK_MATERIAL, cmd_flags, 1011 sizeof(kek_kck_cmd), 1012 &kek_kck_cmd); 1013 if (ret) 1014 goto out; 1015 } 1016 ret = 0; 1017 out: 1018 kfree(key_data.rsc_tsc); 1019 return ret; 1020 } 1021 1022 static int 1023 iwl_mvm_wowlan_config(struct iwl_mvm *mvm, 1024 struct cfg80211_wowlan *wowlan, 1025 struct iwl_wowlan_config_cmd *wowlan_config_cmd, 1026 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, 1027 struct ieee80211_sta *ap_sta) 1028 { 1029 int ret; 1030 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1031 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1032 1033 if (!unified_image) { 1034 ret = iwl_mvm_switch_to_d3(mvm); 1035 if (ret) 1036 return ret; 1037 1038 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); 1039 if (ret) 1040 return ret; 1041 } 1042 1043 if (!iwlwifi_mod_params.sw_crypto) { 1044 /* 1045 * This needs to be unlocked due to lock ordering 1046 * constraints. Since we're in the suspend path 1047 * that isn't really a problem though. 1048 */ 1049 mutex_unlock(&mvm->mutex); 1050 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false, 1051 CMD_ASYNC); 1052 mutex_lock(&mvm->mutex); 1053 if (ret) 1054 return ret; 1055 } 1056 1057 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, 1058 sizeof(*wowlan_config_cmd), 1059 wowlan_config_cmd); 1060 if (ret) 1061 return ret; 1062 1063 ret = iwl_mvm_send_patterns(mvm, wowlan); 1064 if (ret) 1065 return ret; 1066 1067 ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); 1068 if (ret) 1069 return ret; 1070 1071 ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp); 1072 return ret; 1073 } 1074 1075 static int 1076 iwl_mvm_netdetect_config(struct iwl_mvm *mvm, 1077 struct cfg80211_wowlan *wowlan, 1078 struct cfg80211_sched_scan_request *nd_config, 1079 struct ieee80211_vif *vif) 1080 { 1081 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 1082 int ret; 1083 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1084 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1085 1086 if (!unified_image) { 1087 ret = iwl_mvm_switch_to_d3(mvm); 1088 if (ret) 1089 return ret; 1090 } else { 1091 /* In theory, we wouldn't have to stop a running sched 1092 * scan in order to start another one (for 1093 * net-detect). But in practice this doesn't seem to 1094 * work properly, so stop any running sched_scan now. 1095 */ 1096 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 1097 if (ret) 1098 return ret; 1099 } 1100 1101 /* rfkill release can be either for wowlan or netdetect */ 1102 if (wowlan->rfkill_release) 1103 wowlan_config_cmd.wakeup_filter |= 1104 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 1105 1106 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, 1107 sizeof(wowlan_config_cmd), 1108 &wowlan_config_cmd); 1109 if (ret) 1110 return ret; 1111 1112 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, 1113 IWL_MVM_SCAN_NETDETECT); 1114 if (ret) 1115 return ret; 1116 1117 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) 1118 return -EBUSY; 1119 1120 /* save the sched scan matchsets... */ 1121 if (nd_config->n_match_sets) { 1122 mvm->nd_match_sets = kmemdup(nd_config->match_sets, 1123 sizeof(*nd_config->match_sets) * 1124 nd_config->n_match_sets, 1125 GFP_KERNEL); 1126 if (mvm->nd_match_sets) 1127 mvm->n_nd_match_sets = nd_config->n_match_sets; 1128 } 1129 1130 /* ...and the sched scan channels for later reporting */ 1131 mvm->nd_channels = kmemdup(nd_config->channels, 1132 sizeof(*nd_config->channels) * 1133 nd_config->n_channels, 1134 GFP_KERNEL); 1135 if (mvm->nd_channels) 1136 mvm->n_nd_channels = nd_config->n_channels; 1137 1138 return 0; 1139 } 1140 1141 static void iwl_mvm_free_nd(struct iwl_mvm *mvm) 1142 { 1143 kfree(mvm->nd_match_sets); 1144 mvm->nd_match_sets = NULL; 1145 mvm->n_nd_match_sets = 0; 1146 kfree(mvm->nd_channels); 1147 mvm->nd_channels = NULL; 1148 mvm->n_nd_channels = 0; 1149 } 1150 1151 static int __iwl_mvm_suspend(struct ieee80211_hw *hw, 1152 struct cfg80211_wowlan *wowlan, 1153 bool test) 1154 { 1155 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1156 struct ieee80211_vif *vif = NULL; 1157 struct iwl_mvm_vif *mvmvif = NULL; 1158 struct ieee80211_sta *ap_sta = NULL; 1159 struct iwl_d3_manager_config d3_cfg_cmd_data = { 1160 /* 1161 * Program the minimum sleep time to 10 seconds, as many 1162 * platforms have issues processing a wakeup signal while 1163 * still being in the process of suspending. 1164 */ 1165 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), 1166 }; 1167 struct iwl_host_cmd d3_cfg_cmd = { 1168 .id = D3_CONFIG_CMD, 1169 .flags = CMD_WANT_SKB, 1170 .data[0] = &d3_cfg_cmd_data, 1171 .len[0] = sizeof(d3_cfg_cmd_data), 1172 }; 1173 int ret; 1174 int len __maybe_unused; 1175 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1176 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1177 1178 if (!wowlan) { 1179 /* 1180 * mac80211 shouldn't get here, but for D3 test 1181 * it doesn't warrant a warning 1182 */ 1183 WARN_ON(!test); 1184 return -EINVAL; 1185 } 1186 1187 mutex_lock(&mvm->mutex); 1188 1189 vif = iwl_mvm_get_bss_vif(mvm); 1190 if (IS_ERR_OR_NULL(vif)) { 1191 ret = 1; 1192 goto out_noreset; 1193 } 1194 1195 mvmvif = iwl_mvm_vif_from_mac80211(vif); 1196 1197 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) { 1198 /* if we're not associated, this must be netdetect */ 1199 if (!wowlan->nd_config) { 1200 ret = 1; 1201 goto out_noreset; 1202 } 1203 1204 ret = iwl_mvm_netdetect_config( 1205 mvm, wowlan, wowlan->nd_config, vif); 1206 if (ret) 1207 goto out; 1208 1209 mvm->net_detect = true; 1210 } else { 1211 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 1212 1213 ap_sta = rcu_dereference_protected( 1214 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], 1215 lockdep_is_held(&mvm->mutex)); 1216 if (IS_ERR_OR_NULL(ap_sta)) { 1217 ret = -EINVAL; 1218 goto out_noreset; 1219 } 1220 1221 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1222 vif, mvmvif, ap_sta); 1223 if (ret) 1224 goto out_noreset; 1225 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, 1226 vif, mvmvif, ap_sta); 1227 if (ret) 1228 goto out; 1229 1230 mvm->net_detect = false; 1231 } 1232 1233 ret = iwl_mvm_power_update_device(mvm); 1234 if (ret) 1235 goto out; 1236 1237 ret = iwl_mvm_power_update_mac(mvm); 1238 if (ret) 1239 goto out; 1240 1241 #ifdef CONFIG_IWLWIFI_DEBUGFS 1242 if (mvm->d3_wake_sysassert) 1243 d3_cfg_cmd_data.wakeup_flags |= 1244 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); 1245 #endif 1246 1247 /* must be last -- this switches firmware state */ 1248 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); 1249 if (ret) 1250 goto out; 1251 #ifdef CONFIG_IWLWIFI_DEBUGFS 1252 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); 1253 if (len >= sizeof(u32)) { 1254 mvm->d3_test_pme_ptr = 1255 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); 1256 } 1257 #endif 1258 iwl_free_resp(&d3_cfg_cmd); 1259 1260 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1261 1262 iwl_trans_d3_suspend(mvm->trans, test, !unified_image); 1263 out: 1264 if (ret < 0) { 1265 iwl_mvm_free_nd(mvm); 1266 1267 if (!unified_image) { 1268 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1269 if (mvm->restart_fw > 0) { 1270 mvm->restart_fw--; 1271 ieee80211_restart_hw(mvm->hw); 1272 } 1273 } 1274 } 1275 out_noreset: 1276 mutex_unlock(&mvm->mutex); 1277 1278 return ret; 1279 } 1280 1281 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm) 1282 { 1283 struct iwl_notification_wait wait_d3; 1284 static const u16 d3_notif[] = { D3_CONFIG_CMD }; 1285 int ret; 1286 1287 iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, 1288 d3_notif, ARRAY_SIZE(d3_notif), 1289 NULL, NULL); 1290 1291 ret = iwl_mvm_enter_d0i3(mvm->hw->priv); 1292 if (ret) 1293 goto remove_notif; 1294 1295 ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ); 1296 WARN_ON_ONCE(ret); 1297 return ret; 1298 1299 remove_notif: 1300 iwl_remove_notification(&mvm->notif_wait, &wait_d3); 1301 return ret; 1302 } 1303 1304 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 1305 { 1306 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1307 struct iwl_trans *trans = mvm->trans; 1308 int ret; 1309 1310 /* make sure the d0i3 exit work is not pending */ 1311 flush_work(&mvm->d0i3_exit_work); 1312 1313 ret = iwl_trans_suspend(trans); 1314 if (ret) 1315 return ret; 1316 1317 if (wowlan->any) { 1318 trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; 1319 1320 if (iwl_mvm_enter_d0i3_on_suspend(mvm)) { 1321 ret = iwl_mvm_enter_d0i3_sync(mvm); 1322 1323 if (ret) 1324 return ret; 1325 } 1326 1327 mutex_lock(&mvm->d0i3_suspend_mutex); 1328 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); 1329 mutex_unlock(&mvm->d0i3_suspend_mutex); 1330 1331 iwl_trans_d3_suspend(trans, false, false); 1332 1333 return 0; 1334 } 1335 1336 trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 1337 1338 return __iwl_mvm_suspend(hw, wowlan, false); 1339 } 1340 1341 /* converted data from the different status responses */ 1342 struct iwl_wowlan_status_data { 1343 u16 pattern_number; 1344 u16 qos_seq_ctr[8]; 1345 u32 wakeup_reasons; 1346 u32 wake_packet_length; 1347 u32 wake_packet_bufsize; 1348 const u8 *wake_packet; 1349 }; 1350 1351 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, 1352 struct ieee80211_vif *vif, 1353 struct iwl_wowlan_status_data *status) 1354 { 1355 struct sk_buff *pkt = NULL; 1356 struct cfg80211_wowlan_wakeup wakeup = { 1357 .pattern_idx = -1, 1358 }; 1359 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1360 u32 reasons = status->wakeup_reasons; 1361 1362 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { 1363 wakeup_report = NULL; 1364 goto report; 1365 } 1366 1367 pm_wakeup_event(mvm->dev, 0); 1368 1369 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) 1370 wakeup.magic_pkt = true; 1371 1372 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) 1373 wakeup.pattern_idx = 1374 status->pattern_number; 1375 1376 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1377 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) 1378 wakeup.disconnect = true; 1379 1380 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) 1381 wakeup.gtk_rekey_failure = true; 1382 1383 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1384 wakeup.rfkill_release = true; 1385 1386 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) 1387 wakeup.eap_identity_req = true; 1388 1389 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) 1390 wakeup.four_way_handshake = true; 1391 1392 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) 1393 wakeup.tcp_connlost = true; 1394 1395 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) 1396 wakeup.tcp_nomoretokens = true; 1397 1398 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) 1399 wakeup.tcp_match = true; 1400 1401 if (status->wake_packet_bufsize) { 1402 int pktsize = status->wake_packet_bufsize; 1403 int pktlen = status->wake_packet_length; 1404 const u8 *pktdata = status->wake_packet; 1405 struct ieee80211_hdr *hdr = (void *)pktdata; 1406 int truncated = pktlen - pktsize; 1407 1408 /* this would be a firmware bug */ 1409 if (WARN_ON_ONCE(truncated < 0)) 1410 truncated = 0; 1411 1412 if (ieee80211_is_data(hdr->frame_control)) { 1413 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 1414 int ivlen = 0, icvlen = 4; /* also FCS */ 1415 1416 pkt = alloc_skb(pktsize, GFP_KERNEL); 1417 if (!pkt) 1418 goto report; 1419 1420 memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen); 1421 pktdata += hdrlen; 1422 pktsize -= hdrlen; 1423 1424 if (ieee80211_has_protected(hdr->frame_control)) { 1425 /* 1426 * This is unlocked and using gtk_i(c)vlen, 1427 * but since everything is under RTNL still 1428 * that's not really a problem - changing 1429 * it would be difficult. 1430 */ 1431 if (is_multicast_ether_addr(hdr->addr1)) { 1432 ivlen = mvm->gtk_ivlen; 1433 icvlen += mvm->gtk_icvlen; 1434 } else { 1435 ivlen = mvm->ptk_ivlen; 1436 icvlen += mvm->ptk_icvlen; 1437 } 1438 } 1439 1440 /* if truncated, FCS/ICV is (partially) gone */ 1441 if (truncated >= icvlen) { 1442 icvlen = 0; 1443 truncated -= icvlen; 1444 } else { 1445 icvlen -= truncated; 1446 truncated = 0; 1447 } 1448 1449 pktsize -= ivlen + icvlen; 1450 pktdata += ivlen; 1451 1452 memcpy(skb_put(pkt, pktsize), pktdata, pktsize); 1453 1454 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) 1455 goto report; 1456 wakeup.packet = pkt->data; 1457 wakeup.packet_present_len = pkt->len; 1458 wakeup.packet_len = pkt->len - truncated; 1459 wakeup.packet_80211 = false; 1460 } else { 1461 int fcslen = 4; 1462 1463 if (truncated >= 4) { 1464 truncated -= 4; 1465 fcslen = 0; 1466 } else { 1467 fcslen -= truncated; 1468 truncated = 0; 1469 } 1470 pktsize -= fcslen; 1471 wakeup.packet = status->wake_packet; 1472 wakeup.packet_present_len = pktsize; 1473 wakeup.packet_len = pktlen - truncated; 1474 wakeup.packet_80211 = true; 1475 } 1476 } 1477 1478 report: 1479 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 1480 kfree_skb(pkt); 1481 } 1482 1483 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, 1484 struct ieee80211_key_seq *seq) 1485 { 1486 u64 pn; 1487 1488 pn = le64_to_cpu(sc->pn); 1489 seq->ccmp.pn[0] = pn >> 40; 1490 seq->ccmp.pn[1] = pn >> 32; 1491 seq->ccmp.pn[2] = pn >> 24; 1492 seq->ccmp.pn[3] = pn >> 16; 1493 seq->ccmp.pn[4] = pn >> 8; 1494 seq->ccmp.pn[5] = pn; 1495 } 1496 1497 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, 1498 struct ieee80211_key_seq *seq) 1499 { 1500 seq->tkip.iv32 = le32_to_cpu(sc->iv32); 1501 seq->tkip.iv16 = le16_to_cpu(sc->iv16); 1502 } 1503 1504 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs, 1505 struct ieee80211_sta *sta, 1506 struct ieee80211_key_conf *key) 1507 { 1508 int tid; 1509 1510 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1511 1512 if (sta && iwl_mvm_has_new_rx_api(mvm)) { 1513 struct iwl_mvm_sta *mvmsta; 1514 struct iwl_mvm_key_pn *ptk_pn; 1515 1516 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1517 1518 ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx], 1519 lockdep_is_held(&mvm->mutex)); 1520 if (WARN_ON(!ptk_pn)) 1521 return; 1522 1523 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 1524 struct ieee80211_key_seq seq = {}; 1525 int i; 1526 1527 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1528 ieee80211_set_key_rx_seq(key, tid, &seq); 1529 for (i = 1; i < mvm->trans->num_rx_queues; i++) 1530 memcpy(ptk_pn->q[i].pn[tid], 1531 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); 1532 } 1533 } else { 1534 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1535 struct ieee80211_key_seq seq = {}; 1536 1537 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq); 1538 ieee80211_set_key_rx_seq(key, tid, &seq); 1539 } 1540 } 1541 } 1542 1543 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs, 1544 struct ieee80211_key_conf *key) 1545 { 1546 int tid; 1547 1548 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS); 1549 1550 for (tid = 0; tid < IWL_NUM_RSC; tid++) { 1551 struct ieee80211_key_seq seq = {}; 1552 1553 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq); 1554 ieee80211_set_key_rx_seq(key, tid, &seq); 1555 } 1556 } 1557 1558 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, 1559 struct ieee80211_key_conf *key, 1560 struct iwl_wowlan_status *status) 1561 { 1562 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc; 1563 1564 switch (key->cipher) { 1565 case WLAN_CIPHER_SUITE_CCMP: 1566 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); 1567 break; 1568 case WLAN_CIPHER_SUITE_TKIP: 1569 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key); 1570 break; 1571 default: 1572 WARN_ON(1); 1573 } 1574 } 1575 1576 struct iwl_mvm_d3_gtk_iter_data { 1577 struct iwl_mvm *mvm; 1578 struct iwl_wowlan_status *status; 1579 void *last_gtk; 1580 u32 cipher; 1581 bool find_phase, unhandled_cipher; 1582 int num_keys; 1583 }; 1584 1585 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, 1586 struct ieee80211_vif *vif, 1587 struct ieee80211_sta *sta, 1588 struct ieee80211_key_conf *key, 1589 void *_data) 1590 { 1591 struct iwl_mvm_d3_gtk_iter_data *data = _data; 1592 1593 if (data->unhandled_cipher) 1594 return; 1595 1596 switch (key->cipher) { 1597 case WLAN_CIPHER_SUITE_WEP40: 1598 case WLAN_CIPHER_SUITE_WEP104: 1599 /* ignore WEP completely, nothing to do */ 1600 return; 1601 case WLAN_CIPHER_SUITE_CCMP: 1602 case WLAN_CIPHER_SUITE_TKIP: 1603 /* we support these */ 1604 break; 1605 default: 1606 /* everything else (even CMAC for MFP) - disconnect from AP */ 1607 data->unhandled_cipher = true; 1608 return; 1609 } 1610 1611 data->num_keys++; 1612 1613 /* 1614 * pairwise key - update sequence counters only; 1615 * note that this assumes no TDLS sessions are active 1616 */ 1617 if (sta) { 1618 struct ieee80211_key_seq seq = {}; 1619 union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc; 1620 1621 if (data->find_phase) 1622 return; 1623 1624 switch (key->cipher) { 1625 case WLAN_CIPHER_SUITE_CCMP: 1626 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, 1627 sta, key); 1628 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); 1629 break; 1630 case WLAN_CIPHER_SUITE_TKIP: 1631 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); 1632 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); 1633 atomic64_set(&key->tx_pn, 1634 (u64)seq.tkip.iv16 | 1635 ((u64)seq.tkip.iv32 << 16)); 1636 break; 1637 } 1638 1639 /* that's it for this key */ 1640 return; 1641 } 1642 1643 if (data->find_phase) { 1644 data->last_gtk = key; 1645 data->cipher = key->cipher; 1646 return; 1647 } 1648 1649 if (data->status->num_of_gtk_rekeys) 1650 ieee80211_remove_key(key); 1651 else if (data->last_gtk == key) 1652 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status); 1653 } 1654 1655 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 1656 struct ieee80211_vif *vif, 1657 struct iwl_wowlan_status *status) 1658 { 1659 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1660 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1661 .mvm = mvm, 1662 .status = status, 1663 }; 1664 u32 disconnection_reasons = 1665 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 1666 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; 1667 1668 if (!status || !vif->bss_conf.bssid) 1669 return false; 1670 1671 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) 1672 return false; 1673 1674 /* find last GTK that we used initially, if any */ 1675 gtkdata.find_phase = true; 1676 ieee80211_iter_keys(mvm->hw, vif, 1677 iwl_mvm_d3_update_keys, >kdata); 1678 /* not trying to keep connections with MFP/unhandled ciphers */ 1679 if (gtkdata.unhandled_cipher) 1680 return false; 1681 if (!gtkdata.num_keys) 1682 goto out; 1683 if (!gtkdata.last_gtk) 1684 return false; 1685 1686 /* 1687 * invalidate all other GTKs that might still exist and update 1688 * the one that we used 1689 */ 1690 gtkdata.find_phase = false; 1691 ieee80211_iter_keys(mvm->hw, vif, 1692 iwl_mvm_d3_update_keys, >kdata); 1693 1694 if (status->num_of_gtk_rekeys) { 1695 struct ieee80211_key_conf *key; 1696 struct { 1697 struct ieee80211_key_conf conf; 1698 u8 key[32]; 1699 } conf = { 1700 .conf.cipher = gtkdata.cipher, 1701 .conf.keyidx = status->gtk.key_index, 1702 }; 1703 1704 switch (gtkdata.cipher) { 1705 case WLAN_CIPHER_SUITE_CCMP: 1706 conf.conf.keylen = WLAN_KEY_LEN_CCMP; 1707 memcpy(conf.conf.key, status->gtk.decrypt_key, 1708 WLAN_KEY_LEN_CCMP); 1709 break; 1710 case WLAN_CIPHER_SUITE_TKIP: 1711 conf.conf.keylen = WLAN_KEY_LEN_TKIP; 1712 memcpy(conf.conf.key, status->gtk.decrypt_key, 16); 1713 /* leave TX MIC key zeroed, we don't use it anyway */ 1714 memcpy(conf.conf.key + 1715 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, 1716 status->gtk.tkip_mic_key, 8); 1717 break; 1718 } 1719 1720 key = ieee80211_gtk_rekey_add(vif, &conf.conf); 1721 if (IS_ERR(key)) 1722 return false; 1723 iwl_mvm_set_key_rx_seq(mvm, key, status); 1724 } 1725 1726 if (status->num_of_gtk_rekeys) { 1727 __be64 replay_ctr = 1728 cpu_to_be64(le64_to_cpu(status->replay_ctr)); 1729 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, 1730 (void *)&replay_ctr, GFP_KERNEL); 1731 } 1732 1733 out: 1734 mvmvif->seqno_valid = true; 1735 /* +0x10 because the set API expects next-to-use, not last-used */ 1736 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; 1737 1738 return true; 1739 } 1740 1741 static struct iwl_wowlan_status * 1742 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1743 { 1744 u32 base = mvm->error_event_table[0]; 1745 struct error_table_start { 1746 /* cf. struct iwl_error_event_table */ 1747 u32 valid; 1748 u32 error_id; 1749 } err_info; 1750 struct iwl_host_cmd cmd = { 1751 .id = WOWLAN_GET_STATUSES, 1752 .flags = CMD_WANT_SKB, 1753 }; 1754 struct iwl_wowlan_status *status, *fw_status; 1755 int ret, len, status_size; 1756 1757 iwl_trans_read_mem_bytes(mvm->trans, base, 1758 &err_info, sizeof(err_info)); 1759 1760 if (err_info.valid) { 1761 IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n", 1762 err_info.valid, err_info.error_id); 1763 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1764 struct cfg80211_wowlan_wakeup wakeup = { 1765 .rfkill_release = true, 1766 }; 1767 ieee80211_report_wowlan_wakeup(vif, &wakeup, 1768 GFP_KERNEL); 1769 } 1770 return ERR_PTR(-EIO); 1771 } 1772 1773 /* only for tracing for now */ 1774 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); 1775 if (ret) 1776 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); 1777 1778 ret = iwl_mvm_send_cmd(mvm, &cmd); 1779 if (ret) { 1780 IWL_ERR(mvm, "failed to query status (%d)\n", ret); 1781 return ERR_PTR(ret); 1782 } 1783 1784 /* RF-kill already asserted again... */ 1785 if (!cmd.resp_pkt) { 1786 fw_status = ERR_PTR(-ERFKILL); 1787 goto out_free_resp; 1788 } 1789 1790 status_size = sizeof(*fw_status); 1791 1792 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1793 if (len < status_size) { 1794 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1795 fw_status = ERR_PTR(-EIO); 1796 goto out_free_resp; 1797 } 1798 1799 status = (void *)cmd.resp_pkt->data; 1800 if (len != (status_size + 1801 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) { 1802 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1803 fw_status = ERR_PTR(-EIO); 1804 goto out_free_resp; 1805 } 1806 1807 fw_status = kmemdup(status, len, GFP_KERNEL); 1808 1809 out_free_resp: 1810 iwl_free_resp(&cmd); 1811 return fw_status; 1812 } 1813 1814 /* releases the MVM mutex */ 1815 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, 1816 struct ieee80211_vif *vif) 1817 { 1818 struct iwl_wowlan_status_data status; 1819 struct iwl_wowlan_status *fw_status; 1820 int i; 1821 bool keep; 1822 struct iwl_mvm_sta *mvm_ap_sta; 1823 1824 fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1825 if (IS_ERR_OR_NULL(fw_status)) 1826 goto out_unlock; 1827 1828 status.pattern_number = le16_to_cpu(fw_status->pattern_number); 1829 for (i = 0; i < 8; i++) 1830 status.qos_seq_ctr[i] = 1831 le16_to_cpu(fw_status->qos_seq_ctr[i]); 1832 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons); 1833 status.wake_packet_length = 1834 le32_to_cpu(fw_status->wake_packet_length); 1835 status.wake_packet_bufsize = 1836 le32_to_cpu(fw_status->wake_packet_bufsize); 1837 status.wake_packet = fw_status->wake_packet; 1838 1839 /* still at hard-coded place 0 for D3 image */ 1840 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); 1841 if (!mvm_ap_sta) 1842 goto out_free; 1843 1844 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1845 u16 seq = status.qos_seq_ctr[i]; 1846 /* firmware stores last-used value, we store next value */ 1847 seq += 0x10; 1848 mvm_ap_sta->tid_data[i].seq_number = seq; 1849 } 1850 1851 /* now we have all the data we need, unlock to avoid mac80211 issues */ 1852 mutex_unlock(&mvm->mutex); 1853 1854 iwl_mvm_report_wakeup_reasons(mvm, vif, &status); 1855 1856 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); 1857 1858 kfree(fw_status); 1859 return keep; 1860 1861 out_free: 1862 kfree(fw_status); 1863 out_unlock: 1864 mutex_unlock(&mvm->mutex); 1865 return false; 1866 } 1867 1868 void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, 1869 struct ieee80211_vif *vif, 1870 struct iwl_wowlan_status *status) 1871 { 1872 struct iwl_mvm_d3_gtk_iter_data gtkdata = { 1873 .mvm = mvm, 1874 .status = status, 1875 }; 1876 1877 /* 1878 * rekey handling requires taking locks that can't be taken now. 1879 * however, d0i3 doesn't offload rekey, so we're fine. 1880 */ 1881 if (WARN_ON_ONCE(status->num_of_gtk_rekeys)) 1882 return; 1883 1884 /* find last GTK that we used initially, if any */ 1885 gtkdata.find_phase = true; 1886 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); 1887 1888 gtkdata.find_phase = false; 1889 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); 1890 } 1891 1892 struct iwl_mvm_nd_query_results { 1893 u32 matched_profiles; 1894 struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; 1895 }; 1896 1897 static int 1898 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, 1899 struct iwl_mvm_nd_query_results *results) 1900 { 1901 struct iwl_scan_offload_profiles_query *query; 1902 struct iwl_host_cmd cmd = { 1903 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, 1904 .flags = CMD_WANT_SKB, 1905 }; 1906 int ret, len; 1907 1908 ret = iwl_mvm_send_cmd(mvm, &cmd); 1909 if (ret) { 1910 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); 1911 return ret; 1912 } 1913 1914 /* RF-kill already asserted again... */ 1915 if (!cmd.resp_pkt) { 1916 ret = -ERFKILL; 1917 goto out_free_resp; 1918 } 1919 1920 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1921 if (len < sizeof(*query)) { 1922 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); 1923 ret = -EIO; 1924 goto out_free_resp; 1925 } 1926 1927 query = (void *)cmd.resp_pkt->data; 1928 1929 results->matched_profiles = le32_to_cpu(query->matched_profiles); 1930 memcpy(results->matches, query->matches, sizeof(results->matches)); 1931 1932 #ifdef CONFIG_IWLWIFI_DEBUGFS 1933 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); 1934 #endif 1935 1936 out_free_resp: 1937 iwl_free_resp(&cmd); 1938 return ret; 1939 } 1940 1941 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, 1942 struct ieee80211_vif *vif) 1943 { 1944 struct cfg80211_wowlan_nd_info *net_detect = NULL; 1945 struct cfg80211_wowlan_wakeup wakeup = { 1946 .pattern_idx = -1, 1947 }; 1948 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; 1949 struct iwl_mvm_nd_query_results query; 1950 struct iwl_wowlan_status *fw_status; 1951 unsigned long matched_profiles; 1952 u32 reasons = 0; 1953 int i, j, n_matches, ret; 1954 1955 fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1956 if (!IS_ERR_OR_NULL(fw_status)) { 1957 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1958 kfree(fw_status); 1959 } 1960 1961 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) 1962 wakeup.rfkill_release = true; 1963 1964 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) 1965 goto out; 1966 1967 ret = iwl_mvm_netdetect_query_results(mvm, &query); 1968 if (ret || !query.matched_profiles) { 1969 wakeup_report = NULL; 1970 goto out; 1971 } 1972 1973 matched_profiles = query.matched_profiles; 1974 if (mvm->n_nd_match_sets) { 1975 n_matches = hweight_long(matched_profiles); 1976 } else { 1977 IWL_ERR(mvm, "no net detect match information available\n"); 1978 n_matches = 0; 1979 } 1980 1981 net_detect = kzalloc(sizeof(*net_detect) + 1982 (n_matches * sizeof(net_detect->matches[0])), 1983 GFP_KERNEL); 1984 if (!net_detect || !n_matches) 1985 goto out_report_nd; 1986 1987 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { 1988 struct iwl_scan_offload_profile_match *fw_match; 1989 struct cfg80211_wowlan_nd_match *match; 1990 int idx, n_channels = 0; 1991 1992 fw_match = &query.matches[i]; 1993 1994 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) 1995 n_channels += hweight8(fw_match->matching_channels[j]); 1996 1997 match = kzalloc(sizeof(*match) + 1998 (n_channels * sizeof(*match->channels)), 1999 GFP_KERNEL); 2000 if (!match) 2001 goto out_report_nd; 2002 2003 net_detect->matches[net_detect->n_matches++] = match; 2004 2005 /* We inverted the order of the SSIDs in the scan 2006 * request, so invert the index here. 2007 */ 2008 idx = mvm->n_nd_match_sets - i - 1; 2009 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; 2010 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, 2011 match->ssid.ssid_len); 2012 2013 if (mvm->n_nd_channels < n_channels) 2014 continue; 2015 2016 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++) 2017 if (fw_match->matching_channels[j / 8] & (BIT(j % 8))) 2018 match->channels[match->n_channels++] = 2019 mvm->nd_channels[j]->center_freq; 2020 } 2021 2022 out_report_nd: 2023 wakeup.net_detect = net_detect; 2024 out: 2025 iwl_mvm_free_nd(mvm); 2026 2027 mutex_unlock(&mvm->mutex); 2028 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); 2029 2030 if (net_detect) { 2031 for (i = 0; i < net_detect->n_matches; i++) 2032 kfree(net_detect->matches[i]); 2033 kfree(net_detect); 2034 } 2035 } 2036 2037 static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) 2038 { 2039 #ifdef CONFIG_IWLWIFI_DEBUGFS 2040 const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN]; 2041 u32 len = img->sec[IWL_UCODE_SECTION_DATA].len; 2042 u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset; 2043 2044 if (!mvm->store_d3_resume_sram) 2045 return; 2046 2047 if (!mvm->d3_resume_sram) { 2048 mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL); 2049 if (!mvm->d3_resume_sram) 2050 return; 2051 } 2052 2053 iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len); 2054 #endif 2055 } 2056 2057 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, 2058 struct ieee80211_vif *vif) 2059 { 2060 /* skip the one we keep connection on */ 2061 if (data == vif) 2062 return; 2063 2064 if (vif->type == NL80211_IFTYPE_STATION) 2065 ieee80211_resume_disconnect(vif); 2066 } 2067 2068 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 2069 { 2070 struct ieee80211_vif *vif = NULL; 2071 int ret = 1; 2072 enum iwl_d3_status d3_status; 2073 bool keep = false; 2074 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2075 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2076 2077 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | 2078 CMD_WAKE_UP_TRANS; 2079 2080 mutex_lock(&mvm->mutex); 2081 2082 /* get the BSS vif pointer again */ 2083 vif = iwl_mvm_get_bss_vif(mvm); 2084 if (IS_ERR_OR_NULL(vif)) 2085 goto err; 2086 2087 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); 2088 if (ret) 2089 goto err; 2090 2091 if (d3_status != IWL_D3_STATUS_ALIVE) { 2092 IWL_INFO(mvm, "Device was reset during suspend\n"); 2093 goto err; 2094 } 2095 2096 /* query SRAM first in case we want event logging */ 2097 iwl_mvm_read_d3_sram(mvm); 2098 2099 /* 2100 * Query the current location and source from the D3 firmware so we 2101 * can play it back when we re-intiailize the D0 firmware 2102 */ 2103 iwl_mvm_update_changed_regdom(mvm); 2104 2105 if (mvm->net_detect) { 2106 /* If this is a non-unified image, we restart the FW, 2107 * so no need to stop the netdetect scan. If that 2108 * fails, continue and try to get the wake-up reasons, 2109 * but trigger a HW restart by keeping a failure code 2110 * in ret. 2111 */ 2112 if (unified_image) 2113 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, 2114 false); 2115 2116 iwl_mvm_query_netdetect_reasons(mvm, vif); 2117 /* has unlocked the mutex, so skip that */ 2118 goto out; 2119 } else { 2120 keep = iwl_mvm_query_wakeup_reasons(mvm, vif); 2121 #ifdef CONFIG_IWLWIFI_DEBUGFS 2122 if (keep) 2123 mvm->keep_vif = vif; 2124 #endif 2125 /* has unlocked the mutex, so skip that */ 2126 goto out_iterate; 2127 } 2128 2129 err: 2130 iwl_mvm_free_nd(mvm); 2131 mutex_unlock(&mvm->mutex); 2132 2133 out_iterate: 2134 if (!test) 2135 ieee80211_iterate_active_interfaces_rtnl(mvm->hw, 2136 IEEE80211_IFACE_ITER_NORMAL, 2137 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); 2138 2139 out: 2140 if (unified_image && !ret) { 2141 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); 2142 if (!ret) /* D3 ended successfully - no need to reset device */ 2143 return 0; 2144 } 2145 2146 /* 2147 * Reconfigure the device in one of the following cases: 2148 * 1. We are not using a unified image 2149 * 2. We are using a unified image but had an error while exiting D3 2150 */ 2151 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 2152 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); 2153 /* 2154 * When switching images we return 1, which causes mac80211 2155 * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. 2156 * This type of reconfig calls iwl_mvm_restart_complete(), 2157 * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need 2158 * to take the reference here. 2159 */ 2160 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 2161 2162 return 1; 2163 } 2164 2165 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) 2166 { 2167 iwl_trans_resume(mvm->trans); 2168 2169 return __iwl_mvm_resume(mvm, false); 2170 } 2171 2172 static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) 2173 { 2174 bool exit_now; 2175 enum iwl_d3_status d3_status; 2176 struct iwl_trans *trans = mvm->trans; 2177 2178 iwl_trans_d3_resume(trans, &d3_status, false, false); 2179 2180 /* 2181 * make sure to clear D0I3_DEFER_WAKEUP before 2182 * calling iwl_trans_resume(), which might wait 2183 * for d0i3 exit completion. 2184 */ 2185 mutex_lock(&mvm->d0i3_suspend_mutex); 2186 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); 2187 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, 2188 &mvm->d0i3_suspend_flags); 2189 mutex_unlock(&mvm->d0i3_suspend_mutex); 2190 if (exit_now) { 2191 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); 2192 _iwl_mvm_exit_d0i3(mvm); 2193 } 2194 2195 iwl_trans_resume(trans); 2196 2197 if (iwl_mvm_enter_d0i3_on_suspend(mvm)) { 2198 int ret = iwl_mvm_exit_d0i3(mvm->hw->priv); 2199 2200 if (ret) 2201 return ret; 2202 /* 2203 * d0i3 exit will be deferred until reconfig_complete. 2204 * make sure there we are out of d0i3. 2205 */ 2206 } 2207 return 0; 2208 } 2209 2210 int iwl_mvm_resume(struct ieee80211_hw *hw) 2211 { 2212 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2213 int ret; 2214 2215 if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2216 ret = iwl_mvm_resume_d0i3(mvm); 2217 else 2218 ret = iwl_mvm_resume_d3(mvm); 2219 2220 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2221 2222 return ret; 2223 } 2224 2225 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) 2226 { 2227 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2228 2229 device_set_wakeup_enable(mvm->trans->dev, enabled); 2230 } 2231 2232 #ifdef CONFIG_IWLWIFI_DEBUGFS 2233 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) 2234 { 2235 struct iwl_mvm *mvm = inode->i_private; 2236 int err; 2237 2238 if (mvm->d3_test_active) 2239 return -EBUSY; 2240 2241 file->private_data = inode->i_private; 2242 2243 ieee80211_stop_queues(mvm->hw); 2244 synchronize_net(); 2245 2246 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 2247 2248 /* start pseudo D3 */ 2249 rtnl_lock(); 2250 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); 2251 rtnl_unlock(); 2252 if (err > 0) 2253 err = -EINVAL; 2254 if (err) { 2255 ieee80211_wake_queues(mvm->hw); 2256 return err; 2257 } 2258 mvm->d3_test_active = true; 2259 mvm->keep_vif = NULL; 2260 return 0; 2261 } 2262 2263 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, 2264 size_t count, loff_t *ppos) 2265 { 2266 struct iwl_mvm *mvm = file->private_data; 2267 u32 pme_asserted; 2268 2269 while (true) { 2270 /* read pme_ptr if available */ 2271 if (mvm->d3_test_pme_ptr) { 2272 pme_asserted = iwl_trans_read_mem32(mvm->trans, 2273 mvm->d3_test_pme_ptr); 2274 if (pme_asserted) 2275 break; 2276 } 2277 2278 if (msleep_interruptible(100)) 2279 break; 2280 } 2281 2282 return 0; 2283 } 2284 2285 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, 2286 struct ieee80211_vif *vif) 2287 { 2288 /* skip the one we keep connection on */ 2289 if (_data == vif) 2290 return; 2291 2292 if (vif->type == NL80211_IFTYPE_STATION) 2293 ieee80211_connection_loss(vif); 2294 } 2295 2296 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) 2297 { 2298 struct iwl_mvm *mvm = inode->i_private; 2299 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2300 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2301 2302 mvm->d3_test_active = false; 2303 2304 rtnl_lock(); 2305 __iwl_mvm_resume(mvm, true); 2306 rtnl_unlock(); 2307 2308 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2309 2310 iwl_abort_notification_waits(&mvm->notif_wait); 2311 if (!unified_image) { 2312 int remaining_time = 10; 2313 2314 ieee80211_restart_hw(mvm->hw); 2315 2316 /* wait for restart and disconnect all interfaces */ 2317 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2318 remaining_time > 0) { 2319 remaining_time--; 2320 msleep(1000); 2321 } 2322 2323 if (remaining_time == 0) 2324 IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); 2325 } 2326 2327 ieee80211_iterate_active_interfaces_atomic( 2328 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2329 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); 2330 2331 ieee80211_wake_queues(mvm->hw); 2332 2333 return 0; 2334 } 2335 2336 const struct file_operations iwl_dbgfs_d3_test_ops = { 2337 .llseek = no_llseek, 2338 .open = iwl_mvm_d3_test_open, 2339 .read = iwl_mvm_d3_test_read, 2340 .release = iwl_mvm_d3_test_release, 2341 }; 2342 #endif 2343