1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 
68 #include <linux/etherdevice.h>
69 #include <linux/ip.h>
70 #include <linux/fs.h>
71 #include <net/cfg80211.h>
72 #include <net/ipv6.h>
73 #include <net/tcp.h>
74 #include <net/addrconf.h>
75 #include "iwl-modparams.h"
76 #include "fw-api.h"
77 #include "mvm.h"
78 
79 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
80 			    struct ieee80211_vif *vif,
81 			    struct cfg80211_gtk_rekey_data *data)
82 {
83 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
84 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
85 
86 	if (iwlwifi_mod_params.swcrypto)
87 		return;
88 
89 	mutex_lock(&mvm->mutex);
90 
91 	memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
92 	memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
93 	mvmvif->rekey_data.replay_ctr =
94 		cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
95 	mvmvif->rekey_data.valid = true;
96 
97 	mutex_unlock(&mvm->mutex);
98 }
99 
100 #if IS_ENABLED(CONFIG_IPV6)
101 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
102 			      struct ieee80211_vif *vif,
103 			      struct inet6_dev *idev)
104 {
105 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
106 	struct inet6_ifaddr *ifa;
107 	int idx = 0;
108 
109 	memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
110 
111 	read_lock_bh(&idev->lock);
112 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
113 		mvmvif->target_ipv6_addrs[idx] = ifa->addr;
114 		if (ifa->flags & IFA_F_TENTATIVE)
115 			__set_bit(idx, mvmvif->tentative_addrs);
116 		idx++;
117 		if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
118 			break;
119 	}
120 	read_unlock_bh(&idev->lock);
121 
122 	mvmvif->num_target_ipv6_addrs = idx;
123 }
124 #endif
125 
126 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
127 				     struct ieee80211_vif *vif, int idx)
128 {
129 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
130 
131 	mvmvif->tx_key_idx = idx;
132 }
133 
134 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
135 {
136 	int i;
137 
138 	for (i = 0; i < IWL_P1K_SIZE; i++)
139 		out[i] = cpu_to_le16(p1k[i]);
140 }
141 
142 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
143 				     struct iwl_mvm_key_pn *ptk_pn,
144 				     struct ieee80211_key_seq *seq,
145 				     int tid, int queues)
146 {
147 	const u8 *ret = seq->ccmp.pn;
148 	int i;
149 
150 	/* get the PN from mac80211, used on the default queue */
151 	ieee80211_get_key_rx_seq(key, tid, seq);
152 
153 	/* and use the internal data for the other queues */
154 	for (i = 1; i < queues; i++) {
155 		const u8 *tmp = ptk_pn->q[i].pn[tid];
156 
157 		if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0)
158 			ret = tmp;
159 	}
160 
161 	return ret;
162 }
163 
164 struct wowlan_key_data {
165 	struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
166 	struct iwl_wowlan_tkip_params_cmd *tkip;
167 	bool error, use_rsc_tsc, use_tkip, configure_keys;
168 	int wep_key_idx;
169 };
170 
171 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
172 					struct ieee80211_vif *vif,
173 					struct ieee80211_sta *sta,
174 					struct ieee80211_key_conf *key,
175 					void *_data)
176 {
177 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
178 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
179 	struct wowlan_key_data *data = _data;
180 	struct aes_sc *aes_sc, *aes_tx_sc = NULL;
181 	struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
182 	struct iwl_p1k_cache *rx_p1ks;
183 	u8 *rx_mic_key;
184 	struct ieee80211_key_seq seq;
185 	u32 cur_rx_iv32 = 0;
186 	u16 p1k[IWL_P1K_SIZE];
187 	int ret, i;
188 
189 	switch (key->cipher) {
190 	case WLAN_CIPHER_SUITE_WEP40:
191 	case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
192 		struct {
193 			struct iwl_mvm_wep_key_cmd wep_key_cmd;
194 			struct iwl_mvm_wep_key wep_key;
195 		} __packed wkc = {
196 			.wep_key_cmd.mac_id_n_color =
197 				cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
198 								mvmvif->color)),
199 			.wep_key_cmd.num_keys = 1,
200 			/* firmware sets STA_KEY_FLG_WEP_13BYTES */
201 			.wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
202 			.wep_key.key_index = key->keyidx,
203 			.wep_key.key_size = key->keylen,
204 		};
205 
206 		/*
207 		 * This will fail -- the key functions don't set support
208 		 * pairwise WEP keys. However, that's better than silently
209 		 * failing WoWLAN. Or maybe not?
210 		 */
211 		if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
212 			break;
213 
214 		memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
215 		if (key->keyidx == mvmvif->tx_key_idx) {
216 			/* TX key must be at offset 0 */
217 			wkc.wep_key.key_offset = 0;
218 		} else {
219 			/* others start at 1 */
220 			data->wep_key_idx++;
221 			wkc.wep_key.key_offset = data->wep_key_idx;
222 		}
223 
224 		if (data->configure_keys) {
225 			mutex_lock(&mvm->mutex);
226 			ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
227 						   sizeof(wkc), &wkc);
228 			data->error = ret != 0;
229 
230 			mvm->ptk_ivlen = key->iv_len;
231 			mvm->ptk_icvlen = key->icv_len;
232 			mvm->gtk_ivlen = key->iv_len;
233 			mvm->gtk_icvlen = key->icv_len;
234 			mutex_unlock(&mvm->mutex);
235 		}
236 
237 		/* don't upload key again */
238 		return;
239 	}
240 	default:
241 		data->error = true;
242 		return;
243 	case WLAN_CIPHER_SUITE_AES_CMAC:
244 		/*
245 		 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
246 		 * but we also shouldn't abort suspend due to that. It does have
247 		 * support for the IGTK key renewal, but doesn't really use the
248 		 * IGTK for anything. This means we could spuriously wake up or
249 		 * be deauthenticated, but that was considered acceptable.
250 		 */
251 		return;
252 	case WLAN_CIPHER_SUITE_TKIP:
253 		if (sta) {
254 			u64 pn64;
255 
256 			tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
257 			tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
258 
259 			rx_p1ks = data->tkip->rx_uni;
260 
261 			pn64 = atomic64_read(&key->tx_pn);
262 			tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
263 			tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
264 
265 			ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
266 						  p1k);
267 			iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
268 
269 			memcpy(data->tkip->mic_keys.tx,
270 			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
271 			       IWL_MIC_KEY_SIZE);
272 
273 			rx_mic_key = data->tkip->mic_keys.rx_unicast;
274 		} else {
275 			tkip_sc =
276 				data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
277 			rx_p1ks = data->tkip->rx_multi;
278 			rx_mic_key = data->tkip->mic_keys.rx_mcast;
279 		}
280 
281 		/*
282 		 * For non-QoS this relies on the fact that both the uCode and
283 		 * mac80211 use TID 0 (as they need to to avoid replay attacks)
284 		 * for checking the IV in the frames.
285 		 */
286 		for (i = 0; i < IWL_NUM_RSC; i++) {
287 			ieee80211_get_key_rx_seq(key, i, &seq);
288 			tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
289 			tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
290 			/* wrapping isn't allowed, AP must rekey */
291 			if (seq.tkip.iv32 > cur_rx_iv32)
292 				cur_rx_iv32 = seq.tkip.iv32;
293 		}
294 
295 		ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
296 					  cur_rx_iv32, p1k);
297 		iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
298 		ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
299 					  cur_rx_iv32 + 1, p1k);
300 		iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
301 
302 		memcpy(rx_mic_key,
303 		       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
304 		       IWL_MIC_KEY_SIZE);
305 
306 		data->use_tkip = true;
307 		data->use_rsc_tsc = true;
308 		break;
309 	case WLAN_CIPHER_SUITE_CCMP:
310 		if (sta) {
311 			u64 pn64;
312 
313 			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
314 			aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
315 
316 			pn64 = atomic64_read(&key->tx_pn);
317 			aes_tx_sc->pn = cpu_to_le64(pn64);
318 		} else {
319 			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
320 		}
321 
322 		/*
323 		 * For non-QoS this relies on the fact that both the uCode and
324 		 * mac80211/our RX code use TID 0 for checking the PN.
325 		 */
326 		if (sta && iwl_mvm_has_new_rx_api(mvm)) {
327 			struct iwl_mvm_sta *mvmsta;
328 			struct iwl_mvm_key_pn *ptk_pn;
329 			const u8 *pn;
330 
331 			mvmsta = iwl_mvm_sta_from_mac80211(sta);
332 			ptk_pn = rcu_dereference_protected(
333 						mvmsta->ptk_pn[key->keyidx],
334 						lockdep_is_held(&mvm->mutex));
335 			if (WARN_ON(!ptk_pn))
336 				break;
337 
338 			for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
339 				pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
340 						mvm->trans->num_rx_queues);
341 				aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
342 							   ((u64)pn[4] << 8) |
343 							   ((u64)pn[3] << 16) |
344 							   ((u64)pn[2] << 24) |
345 							   ((u64)pn[1] << 32) |
346 							   ((u64)pn[0] << 40));
347 			}
348 		} else {
349 			for (i = 0; i < IWL_NUM_RSC; i++) {
350 				u8 *pn = seq.ccmp.pn;
351 
352 				ieee80211_get_key_rx_seq(key, i, &seq);
353 				aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
354 							   ((u64)pn[4] << 8) |
355 							   ((u64)pn[3] << 16) |
356 							   ((u64)pn[2] << 24) |
357 							   ((u64)pn[1] << 32) |
358 							   ((u64)pn[0] << 40));
359 			}
360 		}
361 		data->use_rsc_tsc = true;
362 		break;
363 	}
364 
365 	if (data->configure_keys) {
366 		mutex_lock(&mvm->mutex);
367 		/*
368 		 * The D3 firmware hardcodes the key offset 0 as the key it
369 		 * uses to transmit packets to the AP, i.e. the PTK.
370 		 */
371 		if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
372 			mvm->ptk_ivlen = key->iv_len;
373 			mvm->ptk_icvlen = key->icv_len;
374 			ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
375 		} else {
376 			/*
377 			 * firmware only supports TSC/RSC for a single key,
378 			 * so if there are multiple keep overwriting them
379 			 * with new ones -- this relies on mac80211 doing
380 			 * list_add_tail().
381 			 */
382 			mvm->gtk_ivlen = key->iv_len;
383 			mvm->gtk_icvlen = key->icv_len;
384 			ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
385 		}
386 		mutex_unlock(&mvm->mutex);
387 		data->error = ret != 0;
388 	}
389 }
390 
391 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
392 				 struct cfg80211_wowlan *wowlan)
393 {
394 	struct iwl_wowlan_patterns_cmd *pattern_cmd;
395 	struct iwl_host_cmd cmd = {
396 		.id = WOWLAN_PATTERNS,
397 		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
398 	};
399 	int i, err;
400 
401 	if (!wowlan->n_patterns)
402 		return 0;
403 
404 	cmd.len[0] = sizeof(*pattern_cmd) +
405 		wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
406 
407 	pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
408 	if (!pattern_cmd)
409 		return -ENOMEM;
410 
411 	pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
412 
413 	for (i = 0; i < wowlan->n_patterns; i++) {
414 		int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
415 
416 		memcpy(&pattern_cmd->patterns[i].mask,
417 		       wowlan->patterns[i].mask, mask_len);
418 		memcpy(&pattern_cmd->patterns[i].pattern,
419 		       wowlan->patterns[i].pattern,
420 		       wowlan->patterns[i].pattern_len);
421 		pattern_cmd->patterns[i].mask_size = mask_len;
422 		pattern_cmd->patterns[i].pattern_size =
423 			wowlan->patterns[i].pattern_len;
424 	}
425 
426 	cmd.data[0] = pattern_cmd;
427 	err = iwl_mvm_send_cmd(mvm, &cmd);
428 	kfree(pattern_cmd);
429 	return err;
430 }
431 
432 enum iwl_mvm_tcp_packet_type {
433 	MVM_TCP_TX_SYN,
434 	MVM_TCP_RX_SYNACK,
435 	MVM_TCP_TX_DATA,
436 	MVM_TCP_RX_ACK,
437 	MVM_TCP_RX_WAKE,
438 	MVM_TCP_TX_FIN,
439 };
440 
441 static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
442 {
443 	__sum16 check = tcp_v4_check(len, saddr, daddr, 0);
444 	return cpu_to_le16(be16_to_cpu((__force __be16)check));
445 }
446 
447 static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
448 				     struct cfg80211_wowlan_tcp *tcp,
449 				     void *_pkt, u8 *mask,
450 				     __le16 *pseudo_hdr_csum,
451 				     enum iwl_mvm_tcp_packet_type ptype)
452 {
453 	struct {
454 		struct ethhdr eth;
455 		struct iphdr ip;
456 		struct tcphdr tcp;
457 		u8 data[];
458 	} __packed *pkt = _pkt;
459 	u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
460 	int i;
461 
462 	pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
463 	pkt->ip.version = 4;
464 	pkt->ip.ihl = 5;
465 	pkt->ip.protocol = IPPROTO_TCP;
466 
467 	switch (ptype) {
468 	case MVM_TCP_TX_SYN:
469 	case MVM_TCP_TX_DATA:
470 	case MVM_TCP_TX_FIN:
471 		memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
472 		memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
473 		pkt->ip.ttl = 128;
474 		pkt->ip.saddr = tcp->src;
475 		pkt->ip.daddr = tcp->dst;
476 		pkt->tcp.source = cpu_to_be16(tcp->src_port);
477 		pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
478 		/* overwritten for TX SYN later */
479 		pkt->tcp.doff = sizeof(struct tcphdr) / 4;
480 		pkt->tcp.window = cpu_to_be16(65000);
481 		break;
482 	case MVM_TCP_RX_SYNACK:
483 	case MVM_TCP_RX_ACK:
484 	case MVM_TCP_RX_WAKE:
485 		memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
486 		memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
487 		pkt->ip.saddr = tcp->dst;
488 		pkt->ip.daddr = tcp->src;
489 		pkt->tcp.source = cpu_to_be16(tcp->dst_port);
490 		pkt->tcp.dest = cpu_to_be16(tcp->src_port);
491 		break;
492 	default:
493 		WARN_ON(1);
494 		return;
495 	}
496 
497 	switch (ptype) {
498 	case MVM_TCP_TX_SYN:
499 		/* firmware assumes 8 option bytes - 8 NOPs for now */
500 		memset(pkt->data, 0x01, 8);
501 		ip_tot_len += 8;
502 		pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
503 		pkt->tcp.syn = 1;
504 		break;
505 	case MVM_TCP_TX_DATA:
506 		ip_tot_len += tcp->payload_len;
507 		memcpy(pkt->data, tcp->payload, tcp->payload_len);
508 		pkt->tcp.psh = 1;
509 		pkt->tcp.ack = 1;
510 		break;
511 	case MVM_TCP_TX_FIN:
512 		pkt->tcp.fin = 1;
513 		pkt->tcp.ack = 1;
514 		break;
515 	case MVM_TCP_RX_SYNACK:
516 		pkt->tcp.syn = 1;
517 		pkt->tcp.ack = 1;
518 		break;
519 	case MVM_TCP_RX_ACK:
520 		pkt->tcp.ack = 1;
521 		break;
522 	case MVM_TCP_RX_WAKE:
523 		ip_tot_len += tcp->wake_len;
524 		pkt->tcp.psh = 1;
525 		pkt->tcp.ack = 1;
526 		memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
527 		break;
528 	}
529 
530 	switch (ptype) {
531 	case MVM_TCP_TX_SYN:
532 	case MVM_TCP_TX_DATA:
533 	case MVM_TCP_TX_FIN:
534 		pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
535 		pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
536 		break;
537 	case MVM_TCP_RX_WAKE:
538 		for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
539 			u8 tmp = tcp->wake_mask[i];
540 			mask[i + 6] |= tmp << 6;
541 			if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
542 				mask[i + 7] = tmp >> 2;
543 		}
544 		/* fall through for ethernet/IP/TCP headers mask */
545 	case MVM_TCP_RX_SYNACK:
546 	case MVM_TCP_RX_ACK:
547 		mask[0] = 0xff; /* match ethernet */
548 		/*
549 		 * match ethernet, ip.version, ip.ihl
550 		 * the ip.ihl half byte is really masked out by firmware
551 		 */
552 		mask[1] = 0x7f;
553 		mask[2] = 0x80; /* match ip.protocol */
554 		mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
555 		mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
556 		mask[5] = 0x80; /* match tcp flags */
557 		/* leave rest (0 or set for MVM_TCP_RX_WAKE) */
558 		break;
559 	};
560 
561 	*pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
562 					    pkt->ip.saddr, pkt->ip.daddr);
563 }
564 
565 static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
566 					struct ieee80211_vif *vif,
567 					struct cfg80211_wowlan_tcp *tcp)
568 {
569 	struct iwl_wowlan_remote_wake_config *cfg;
570 	struct iwl_host_cmd cmd = {
571 		.id = REMOTE_WAKE_CONFIG_CMD,
572 		.len = { sizeof(*cfg), },
573 		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
574 	};
575 	int ret;
576 
577 	if (!tcp)
578 		return 0;
579 
580 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
581 	if (!cfg)
582 		return -ENOMEM;
583 	cmd.data[0] = cfg;
584 
585 	cfg->max_syn_retries = 10;
586 	cfg->max_data_retries = 10;
587 	cfg->tcp_syn_ack_timeout = 1; /* seconds */
588 	cfg->tcp_ack_timeout = 1; /* seconds */
589 
590 	/* SYN (TX) */
591 	iwl_mvm_build_tcp_packet(
592 		vif, tcp, cfg->syn_tx.data, NULL,
593 		&cfg->syn_tx.info.tcp_pseudo_header_checksum,
594 		MVM_TCP_TX_SYN);
595 	cfg->syn_tx.info.tcp_payload_length = 0;
596 
597 	/* SYN/ACK (RX) */
598 	iwl_mvm_build_tcp_packet(
599 		vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
600 		&cfg->synack_rx.info.tcp_pseudo_header_checksum,
601 		MVM_TCP_RX_SYNACK);
602 	cfg->synack_rx.info.tcp_payload_length = 0;
603 
604 	/* KEEPALIVE/ACK (TX) */
605 	iwl_mvm_build_tcp_packet(
606 		vif, tcp, cfg->keepalive_tx.data, NULL,
607 		&cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
608 		MVM_TCP_TX_DATA);
609 	cfg->keepalive_tx.info.tcp_payload_length =
610 		cpu_to_le16(tcp->payload_len);
611 	cfg->sequence_number_offset = tcp->payload_seq.offset;
612 	/* length must be 0..4, the field is little endian */
613 	cfg->sequence_number_length = tcp->payload_seq.len;
614 	cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
615 	cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
616 	if (tcp->payload_tok.len) {
617 		cfg->token_offset = tcp->payload_tok.offset;
618 		cfg->token_length = tcp->payload_tok.len;
619 		cfg->num_tokens =
620 			cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
621 		memcpy(cfg->tokens, tcp->payload_tok.token_stream,
622 		       tcp->tokens_size);
623 	} else {
624 		/* set tokens to max value to almost never run out */
625 		cfg->num_tokens = cpu_to_le16(65535);
626 	}
627 
628 	/* ACK (RX) */
629 	iwl_mvm_build_tcp_packet(
630 		vif, tcp, cfg->keepalive_ack_rx.data,
631 		cfg->keepalive_ack_rx.rx_mask,
632 		&cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
633 		MVM_TCP_RX_ACK);
634 	cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
635 
636 	/* WAKEUP (RX) */
637 	iwl_mvm_build_tcp_packet(
638 		vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
639 		&cfg->wake_rx.info.tcp_pseudo_header_checksum,
640 		MVM_TCP_RX_WAKE);
641 	cfg->wake_rx.info.tcp_payload_length =
642 		cpu_to_le16(tcp->wake_len);
643 
644 	/* FIN */
645 	iwl_mvm_build_tcp_packet(
646 		vif, tcp, cfg->fin_tx.data, NULL,
647 		&cfg->fin_tx.info.tcp_pseudo_header_checksum,
648 		MVM_TCP_TX_FIN);
649 	cfg->fin_tx.info.tcp_payload_length = 0;
650 
651 	ret = iwl_mvm_send_cmd(mvm, &cmd);
652 	kfree(cfg);
653 
654 	return ret;
655 }
656 
657 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
658 				struct ieee80211_sta *ap_sta)
659 {
660 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
661 	struct ieee80211_chanctx_conf *ctx;
662 	u8 chains_static, chains_dynamic;
663 	struct cfg80211_chan_def chandef;
664 	int ret, i;
665 	struct iwl_binding_cmd binding_cmd = {};
666 	struct iwl_time_quota_cmd quota_cmd = {};
667 	struct iwl_time_quota_data *quota;
668 	u32 status;
669 	int size;
670 
671 	if (fw_has_capa(&mvm->fw->ucode_capa,
672 			IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
673 		size = sizeof(binding_cmd);
674 		if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
675 		    !iwl_mvm_is_cdb_supported(mvm))
676 			binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
677 		else
678 			binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
679 	} else {
680 		size = IWL_BINDING_CMD_SIZE_V1;
681 	}
682 
683 	/* add back the PHY */
684 	if (WARN_ON(!mvmvif->phy_ctxt))
685 		return -EINVAL;
686 
687 	rcu_read_lock();
688 	ctx = rcu_dereference(vif->chanctx_conf);
689 	if (WARN_ON(!ctx)) {
690 		rcu_read_unlock();
691 		return -EINVAL;
692 	}
693 	chandef = ctx->def;
694 	chains_static = ctx->rx_chains_static;
695 	chains_dynamic = ctx->rx_chains_dynamic;
696 	rcu_read_unlock();
697 
698 	ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
699 				   chains_static, chains_dynamic);
700 	if (ret)
701 		return ret;
702 
703 	/* add back the MAC */
704 	mvmvif->uploaded = false;
705 
706 	if (WARN_ON(!vif->bss_conf.assoc))
707 		return -EINVAL;
708 
709 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
710 	if (ret)
711 		return ret;
712 
713 	/* add back binding - XXX refactor? */
714 	binding_cmd.id_and_color =
715 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
716 						mvmvif->phy_ctxt->color));
717 	binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
718 	binding_cmd.phy =
719 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
720 						mvmvif->phy_ctxt->color));
721 	binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
722 							      mvmvif->color));
723 	for (i = 1; i < MAX_MACS_IN_BINDING; i++)
724 		binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
725 
726 	status = 0;
727 	ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
728 					  size, &binding_cmd, &status);
729 	if (ret) {
730 		IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
731 		return ret;
732 	}
733 
734 	if (status) {
735 		IWL_ERR(mvm, "Binding command failed: %u\n", status);
736 		return -EIO;
737 	}
738 
739 	ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
740 	if (ret)
741 		return ret;
742 	rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
743 
744 	ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
745 	if (ret)
746 		return ret;
747 
748 	/* and some quota */
749 	quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, 0);
750 	quota->id_and_color =
751 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
752 						mvmvif->phy_ctxt->color));
753 	quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
754 	quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
755 
756 	for (i = 1; i < MAX_BINDINGS; i++) {
757 		quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, i);
758 		quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
759 	}
760 
761 	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
762 				   iwl_mvm_quota_cmd_size(mvm), &quota_cmd);
763 	if (ret)
764 		IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
765 
766 	if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
767 		IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
768 
769 	return 0;
770 }
771 
772 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
773 				       struct ieee80211_vif *vif)
774 {
775 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
776 	struct iwl_nonqos_seq_query_cmd query_cmd = {
777 		.get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
778 		.mac_id_n_color =
779 			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
780 							mvmvif->color)),
781 	};
782 	struct iwl_host_cmd cmd = {
783 		.id = NON_QOS_TX_COUNTER_CMD,
784 		.flags = CMD_WANT_SKB,
785 	};
786 	int err;
787 	u32 size;
788 
789 	cmd.data[0] = &query_cmd;
790 	cmd.len[0] = sizeof(query_cmd);
791 
792 	err = iwl_mvm_send_cmd(mvm, &cmd);
793 	if (err)
794 		return err;
795 
796 	size = iwl_rx_packet_payload_len(cmd.resp_pkt);
797 	if (size < sizeof(__le16)) {
798 		err = -EINVAL;
799 	} else {
800 		err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
801 		/* firmware returns next, not last-used seqno */
802 		err = (u16) (err - 0x10);
803 	}
804 
805 	iwl_free_resp(&cmd);
806 	return err;
807 }
808 
809 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
810 {
811 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
812 	struct iwl_nonqos_seq_query_cmd query_cmd = {
813 		.get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
814 		.mac_id_n_color =
815 			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
816 							mvmvif->color)),
817 		.value = cpu_to_le16(mvmvif->seqno),
818 	};
819 
820 	/* return if called during restart, not resume from D3 */
821 	if (!mvmvif->seqno_valid)
822 		return;
823 
824 	mvmvif->seqno_valid = false;
825 
826 	if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
827 				 sizeof(query_cmd), &query_cmd))
828 		IWL_ERR(mvm, "failed to set non-QoS seqno\n");
829 }
830 
831 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
832 {
833 	iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
834 
835 	iwl_mvm_stop_device(mvm);
836 	/*
837 	 * Set the HW restart bit -- this is mostly true as we're
838 	 * going to load new firmware and reprogram that, though
839 	 * the reprogramming is going to be manual to avoid adding
840 	 * all the MACs that aren't support.
841 	 * We don't have to clear up everything though because the
842 	 * reprogramming is manual. When we resume, we'll actually
843 	 * go through a proper restart sequence again to switch
844 	 * back to the runtime firmware image.
845 	 */
846 	set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
847 
848 	/* the fw is reset, so all the keys are cleared */
849 	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
850 
851 	mvm->ptk_ivlen = 0;
852 	mvm->ptk_icvlen = 0;
853 	mvm->ptk_ivlen = 0;
854 	mvm->ptk_icvlen = 0;
855 
856 	return iwl_mvm_load_d3_fw(mvm);
857 }
858 
859 static int
860 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
861 			  struct cfg80211_wowlan *wowlan,
862 			  struct iwl_wowlan_config_cmd *wowlan_config_cmd,
863 			  struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
864 			  struct ieee80211_sta *ap_sta)
865 {
866 	int ret;
867 	struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
868 
869 	/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
870 
871 	wowlan_config_cmd->is_11n_connection =
872 					ap_sta->ht_cap.ht_supported;
873 	wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
874 		ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
875 
876 	/* Query the last used seqno and set it */
877 	ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
878 	if (ret < 0)
879 		return ret;
880 
881 	wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
882 
883 	iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
884 
885 	if (wowlan->disconnect)
886 		wowlan_config_cmd->wakeup_filter |=
887 			cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
888 				    IWL_WOWLAN_WAKEUP_LINK_CHANGE);
889 	if (wowlan->magic_pkt)
890 		wowlan_config_cmd->wakeup_filter |=
891 			cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
892 	if (wowlan->gtk_rekey_failure)
893 		wowlan_config_cmd->wakeup_filter |=
894 			cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
895 	if (wowlan->eap_identity_req)
896 		wowlan_config_cmd->wakeup_filter |=
897 			cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
898 	if (wowlan->four_way_handshake)
899 		wowlan_config_cmd->wakeup_filter |=
900 			cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
901 	if (wowlan->n_patterns)
902 		wowlan_config_cmd->wakeup_filter |=
903 			cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
904 
905 	if (wowlan->rfkill_release)
906 		wowlan_config_cmd->wakeup_filter |=
907 			cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
908 
909 	if (wowlan->tcp) {
910 		/*
911 		 * Set the "link change" (really "link lost") flag as well
912 		 * since that implies losing the TCP connection.
913 		 */
914 		wowlan_config_cmd->wakeup_filter |=
915 			cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
916 				    IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
917 				    IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
918 				    IWL_WOWLAN_WAKEUP_LINK_CHANGE);
919 	}
920 
921 	return 0;
922 }
923 
924 static void
925 iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm,
926 			  struct ieee80211_vif *vif,
927 			  void (*iter)(struct ieee80211_hw *hw,
928 				       struct ieee80211_vif *vif,
929 				       struct ieee80211_sta *sta,
930 				       struct ieee80211_key_conf *key,
931 				       void *data),
932 			  void *data)
933 {
934 	struct ieee80211_sta *ap_sta;
935 
936 	rcu_read_lock();
937 
938 	ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]);
939 	if (IS_ERR_OR_NULL(ap_sta))
940 		goto out;
941 
942 	ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data);
943 out:
944 	rcu_read_unlock();
945 }
946 
947 int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
948 				     struct ieee80211_vif *vif,
949 				     bool d0i3,
950 				     u32 cmd_flags)
951 {
952 	struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
953 	struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
954 	struct wowlan_key_data key_data = {
955 		.configure_keys = !d0i3,
956 		.use_rsc_tsc = false,
957 		.tkip = &tkip_cmd,
958 		.use_tkip = false,
959 	};
960 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
961 	int ret;
962 
963 	key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
964 	if (!key_data.rsc_tsc)
965 		return -ENOMEM;
966 
967 	/*
968 	 * if we have to configure keys, call ieee80211_iter_keys(),
969 	 * as we need non-atomic context in order to take the
970 	 * required locks.
971 	 * for the d0i3 we can't use ieee80211_iter_keys(), as
972 	 * taking (almost) any mutex might result in deadlock.
973 	 */
974 	if (!d0i3) {
975 		/*
976 		 * Note that currently we don't propagate cmd_flags
977 		 * to the iterator. In case of key_data.configure_keys,
978 		 * all the configured commands are SYNC, and
979 		 * iwl_mvm_wowlan_program_keys() will take care of
980 		 * locking/unlocking mvm->mutex.
981 		 */
982 		ieee80211_iter_keys(mvm->hw, vif,
983 				    iwl_mvm_wowlan_program_keys,
984 				    &key_data);
985 	} else {
986 		iwl_mvm_iter_d0i3_ap_keys(mvm, vif,
987 					  iwl_mvm_wowlan_program_keys,
988 					  &key_data);
989 	}
990 
991 	if (key_data.error) {
992 		ret = -EIO;
993 		goto out;
994 	}
995 
996 	if (key_data.use_rsc_tsc) {
997 		ret = iwl_mvm_send_cmd_pdu(mvm,
998 					   WOWLAN_TSC_RSC_PARAM, cmd_flags,
999 					   sizeof(*key_data.rsc_tsc),
1000 					   key_data.rsc_tsc);
1001 		if (ret)
1002 			goto out;
1003 	}
1004 
1005 	if (key_data.use_tkip &&
1006 	    !fw_has_api(&mvm->fw->ucode_capa,
1007 			IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
1008 		ret = iwl_mvm_send_cmd_pdu(mvm,
1009 					   WOWLAN_TKIP_PARAM,
1010 					   cmd_flags, sizeof(tkip_cmd),
1011 					   &tkip_cmd);
1012 		if (ret)
1013 			goto out;
1014 	}
1015 
1016 	/* configure rekey data only if offloaded rekey is supported (d3) */
1017 	if (mvmvif->rekey_data.valid && !d0i3) {
1018 		memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
1019 		memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
1020 		       NL80211_KCK_LEN);
1021 		kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
1022 		memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
1023 		       NL80211_KEK_LEN);
1024 		kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
1025 		kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
1026 
1027 		ret = iwl_mvm_send_cmd_pdu(mvm,
1028 					   WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
1029 					   sizeof(kek_kck_cmd),
1030 					   &kek_kck_cmd);
1031 		if (ret)
1032 			goto out;
1033 	}
1034 	ret = 0;
1035 out:
1036 	kfree(key_data.rsc_tsc);
1037 	return ret;
1038 }
1039 
1040 static int
1041 iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
1042 		      struct cfg80211_wowlan *wowlan,
1043 		      struct iwl_wowlan_config_cmd *wowlan_config_cmd,
1044 		      struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
1045 		      struct ieee80211_sta *ap_sta)
1046 {
1047 	int ret;
1048 	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1049 					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1050 
1051 	if (!unified_image) {
1052 		ret = iwl_mvm_switch_to_d3(mvm);
1053 		if (ret)
1054 			return ret;
1055 
1056 		ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
1057 		if (ret)
1058 			return ret;
1059 	}
1060 
1061 	if (!iwlwifi_mod_params.swcrypto) {
1062 		/*
1063 		 * This needs to be unlocked due to lock ordering
1064 		 * constraints. Since we're in the suspend path
1065 		 * that isn't really a problem though.
1066 		 */
1067 		mutex_unlock(&mvm->mutex);
1068 		ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false,
1069 						       CMD_ASYNC);
1070 		mutex_lock(&mvm->mutex);
1071 		if (ret)
1072 			return ret;
1073 	}
1074 
1075 	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
1076 				   sizeof(*wowlan_config_cmd),
1077 				   wowlan_config_cmd);
1078 	if (ret)
1079 		return ret;
1080 
1081 	ret = iwl_mvm_send_patterns(mvm, wowlan);
1082 	if (ret)
1083 		return ret;
1084 
1085 	ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
1086 	if (ret)
1087 		return ret;
1088 
1089 	ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
1090 	return ret;
1091 }
1092 
1093 static int
1094 iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
1095 			 struct cfg80211_wowlan *wowlan,
1096 			 struct cfg80211_sched_scan_request *nd_config,
1097 			 struct ieee80211_vif *vif)
1098 {
1099 	struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1100 	int ret;
1101 	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1102 					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1103 
1104 	if (!unified_image) {
1105 		ret = iwl_mvm_switch_to_d3(mvm);
1106 		if (ret)
1107 			return ret;
1108 	} else {
1109 		/* In theory, we wouldn't have to stop a running sched
1110 		 * scan in order to start another one (for
1111 		 * net-detect).  But in practice this doesn't seem to
1112 		 * work properly, so stop any running sched_scan now.
1113 		 */
1114 		ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1115 		if (ret)
1116 			return ret;
1117 	}
1118 
1119 	/* rfkill release can be either for wowlan or netdetect */
1120 	if (wowlan->rfkill_release)
1121 		wowlan_config_cmd.wakeup_filter |=
1122 			cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
1123 
1124 	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
1125 				   sizeof(wowlan_config_cmd),
1126 				   &wowlan_config_cmd);
1127 	if (ret)
1128 		return ret;
1129 
1130 	ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
1131 				       IWL_MVM_SCAN_NETDETECT);
1132 	if (ret)
1133 		return ret;
1134 
1135 	if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
1136 		return -EBUSY;
1137 
1138 	/* save the sched scan matchsets... */
1139 	if (nd_config->n_match_sets) {
1140 		mvm->nd_match_sets = kmemdup(nd_config->match_sets,
1141 					     sizeof(*nd_config->match_sets) *
1142 					     nd_config->n_match_sets,
1143 					     GFP_KERNEL);
1144 		if (mvm->nd_match_sets)
1145 			mvm->n_nd_match_sets = nd_config->n_match_sets;
1146 	}
1147 
1148 	/* ...and the sched scan channels for later reporting */
1149 	mvm->nd_channels = kmemdup(nd_config->channels,
1150 				   sizeof(*nd_config->channels) *
1151 				   nd_config->n_channels,
1152 				   GFP_KERNEL);
1153 	if (mvm->nd_channels)
1154 		mvm->n_nd_channels = nd_config->n_channels;
1155 
1156 	return 0;
1157 }
1158 
1159 static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
1160 {
1161 	kfree(mvm->nd_match_sets);
1162 	mvm->nd_match_sets = NULL;
1163 	mvm->n_nd_match_sets = 0;
1164 	kfree(mvm->nd_channels);
1165 	mvm->nd_channels = NULL;
1166 	mvm->n_nd_channels = 0;
1167 }
1168 
1169 static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1170 			     struct cfg80211_wowlan *wowlan,
1171 			     bool test)
1172 {
1173 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1174 	struct ieee80211_vif *vif = NULL;
1175 	struct iwl_mvm_vif *mvmvif = NULL;
1176 	struct ieee80211_sta *ap_sta = NULL;
1177 	struct iwl_d3_manager_config d3_cfg_cmd_data = {
1178 		/*
1179 		 * Program the minimum sleep time to 10 seconds, as many
1180 		 * platforms have issues processing a wakeup signal while
1181 		 * still being in the process of suspending.
1182 		 */
1183 		.min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
1184 	};
1185 	struct iwl_host_cmd d3_cfg_cmd = {
1186 		.id = D3_CONFIG_CMD,
1187 		.flags = CMD_WANT_SKB,
1188 		.data[0] = &d3_cfg_cmd_data,
1189 		.len[0] = sizeof(d3_cfg_cmd_data),
1190 	};
1191 	int ret;
1192 	int len __maybe_unused;
1193 	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1194 					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1195 
1196 	if (!wowlan) {
1197 		/*
1198 		 * mac80211 shouldn't get here, but for D3 test
1199 		 * it doesn't warrant a warning
1200 		 */
1201 		WARN_ON(!test);
1202 		return -EINVAL;
1203 	}
1204 
1205 	mutex_lock(&mvm->mutex);
1206 
1207 	vif = iwl_mvm_get_bss_vif(mvm);
1208 	if (IS_ERR_OR_NULL(vif)) {
1209 		ret = 1;
1210 		goto out_noreset;
1211 	}
1212 
1213 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
1214 
1215 	if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
1216 		/* if we're not associated, this must be netdetect */
1217 		if (!wowlan->nd_config) {
1218 			ret = 1;
1219 			goto out_noreset;
1220 		}
1221 
1222 		ret = iwl_mvm_netdetect_config(
1223 			mvm, wowlan, wowlan->nd_config, vif);
1224 		if (ret)
1225 			goto out;
1226 
1227 		mvm->net_detect = true;
1228 	} else {
1229 		struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1230 
1231 		ap_sta = rcu_dereference_protected(
1232 			mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1233 			lockdep_is_held(&mvm->mutex));
1234 		if (IS_ERR_OR_NULL(ap_sta)) {
1235 			ret = -EINVAL;
1236 			goto out_noreset;
1237 		}
1238 
1239 		ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1240 						vif, mvmvif, ap_sta);
1241 		if (ret)
1242 			goto out_noreset;
1243 		ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1244 					    vif, mvmvif, ap_sta);
1245 		if (ret)
1246 			goto out;
1247 
1248 		mvm->net_detect = false;
1249 	}
1250 
1251 	ret = iwl_mvm_power_update_device(mvm);
1252 	if (ret)
1253 		goto out;
1254 
1255 	ret = iwl_mvm_power_update_mac(mvm);
1256 	if (ret)
1257 		goto out;
1258 
1259 #ifdef CONFIG_IWLWIFI_DEBUGFS
1260 	if (mvm->d3_wake_sysassert)
1261 		d3_cfg_cmd_data.wakeup_flags |=
1262 			cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1263 #endif
1264 
1265 	/* must be last -- this switches firmware state */
1266 	ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1267 	if (ret)
1268 		goto out;
1269 #ifdef CONFIG_IWLWIFI_DEBUGFS
1270 	len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
1271 	if (len >= sizeof(u32)) {
1272 		mvm->d3_test_pme_ptr =
1273 			le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
1274 	}
1275 #endif
1276 	iwl_free_resp(&d3_cfg_cmd);
1277 
1278 	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1279 
1280 	iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
1281  out:
1282 	if (ret < 0) {
1283 		iwl_mvm_free_nd(mvm);
1284 
1285 		if (!unified_image) {
1286 			iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1287 			if (mvm->fw_restart > 0) {
1288 				mvm->fw_restart--;
1289 				ieee80211_restart_hw(mvm->hw);
1290 			}
1291 		}
1292 	}
1293  out_noreset:
1294 	mutex_unlock(&mvm->mutex);
1295 
1296 	return ret;
1297 }
1298 
1299 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
1300 {
1301 	struct iwl_notification_wait wait_d3;
1302 	static const u16 d3_notif[] = { D3_CONFIG_CMD };
1303 	int ret;
1304 
1305 	iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
1306 				   d3_notif, ARRAY_SIZE(d3_notif),
1307 				   NULL, NULL);
1308 
1309 	ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
1310 	if (ret)
1311 		goto remove_notif;
1312 
1313 	ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
1314 	WARN_ON_ONCE(ret);
1315 	return ret;
1316 
1317 remove_notif:
1318 	iwl_remove_notification(&mvm->notif_wait, &wait_d3);
1319 	return ret;
1320 }
1321 
1322 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1323 {
1324 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1325 	struct iwl_trans *trans = mvm->trans;
1326 	int ret;
1327 
1328 	/* make sure the d0i3 exit work is not pending */
1329 	flush_work(&mvm->d0i3_exit_work);
1330 
1331 	ret = iwl_trans_suspend(trans);
1332 	if (ret)
1333 		return ret;
1334 
1335 	if (wowlan->any) {
1336 		trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
1337 
1338 		if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
1339 			ret = iwl_mvm_enter_d0i3_sync(mvm);
1340 
1341 			if (ret)
1342 				return ret;
1343 		}
1344 
1345 		mutex_lock(&mvm->d0i3_suspend_mutex);
1346 		__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1347 		mutex_unlock(&mvm->d0i3_suspend_mutex);
1348 
1349 		iwl_trans_d3_suspend(trans, false, false);
1350 
1351 		return 0;
1352 	}
1353 
1354 	trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
1355 
1356 	return __iwl_mvm_suspend(hw, wowlan, false);
1357 }
1358 
1359 /* converted data from the different status responses */
1360 struct iwl_wowlan_status_data {
1361 	u16 pattern_number;
1362 	u16 qos_seq_ctr[8];
1363 	u32 wakeup_reasons;
1364 	u32 wake_packet_length;
1365 	u32 wake_packet_bufsize;
1366 	const u8 *wake_packet;
1367 };
1368 
1369 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1370 					  struct ieee80211_vif *vif,
1371 					  struct iwl_wowlan_status_data *status)
1372 {
1373 	struct sk_buff *pkt = NULL;
1374 	struct cfg80211_wowlan_wakeup wakeup = {
1375 		.pattern_idx = -1,
1376 	};
1377 	struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1378 	u32 reasons = status->wakeup_reasons;
1379 
1380 	if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1381 		wakeup_report = NULL;
1382 		goto report;
1383 	}
1384 
1385 	pm_wakeup_event(mvm->dev, 0);
1386 
1387 	if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
1388 		wakeup.magic_pkt = true;
1389 
1390 	if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
1391 		wakeup.pattern_idx =
1392 			status->pattern_number;
1393 
1394 	if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1395 		       IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
1396 		wakeup.disconnect = true;
1397 
1398 	if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
1399 		wakeup.gtk_rekey_failure = true;
1400 
1401 	if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1402 		wakeup.rfkill_release = true;
1403 
1404 	if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
1405 		wakeup.eap_identity_req = true;
1406 
1407 	if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
1408 		wakeup.four_way_handshake = true;
1409 
1410 	if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1411 		wakeup.tcp_connlost = true;
1412 
1413 	if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1414 		wakeup.tcp_nomoretokens = true;
1415 
1416 	if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1417 		wakeup.tcp_match = true;
1418 
1419 	if (status->wake_packet_bufsize) {
1420 		int pktsize = status->wake_packet_bufsize;
1421 		int pktlen = status->wake_packet_length;
1422 		const u8 *pktdata = status->wake_packet;
1423 		struct ieee80211_hdr *hdr = (void *)pktdata;
1424 		int truncated = pktlen - pktsize;
1425 
1426 		/* this would be a firmware bug */
1427 		if (WARN_ON_ONCE(truncated < 0))
1428 			truncated = 0;
1429 
1430 		if (ieee80211_is_data(hdr->frame_control)) {
1431 			int hdrlen = ieee80211_hdrlen(hdr->frame_control);
1432 			int ivlen = 0, icvlen = 4; /* also FCS */
1433 
1434 			pkt = alloc_skb(pktsize, GFP_KERNEL);
1435 			if (!pkt)
1436 				goto report;
1437 
1438 			skb_put_data(pkt, pktdata, hdrlen);
1439 			pktdata += hdrlen;
1440 			pktsize -= hdrlen;
1441 
1442 			if (ieee80211_has_protected(hdr->frame_control)) {
1443 				/*
1444 				 * This is unlocked and using gtk_i(c)vlen,
1445 				 * but since everything is under RTNL still
1446 				 * that's not really a problem - changing
1447 				 * it would be difficult.
1448 				 */
1449 				if (is_multicast_ether_addr(hdr->addr1)) {
1450 					ivlen = mvm->gtk_ivlen;
1451 					icvlen += mvm->gtk_icvlen;
1452 				} else {
1453 					ivlen = mvm->ptk_ivlen;
1454 					icvlen += mvm->ptk_icvlen;
1455 				}
1456 			}
1457 
1458 			/* if truncated, FCS/ICV is (partially) gone */
1459 			if (truncated >= icvlen) {
1460 				icvlen = 0;
1461 				truncated -= icvlen;
1462 			} else {
1463 				icvlen -= truncated;
1464 				truncated = 0;
1465 			}
1466 
1467 			pktsize -= ivlen + icvlen;
1468 			pktdata += ivlen;
1469 
1470 			skb_put_data(pkt, pktdata, pktsize);
1471 
1472 			if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
1473 				goto report;
1474 			wakeup.packet = pkt->data;
1475 			wakeup.packet_present_len = pkt->len;
1476 			wakeup.packet_len = pkt->len - truncated;
1477 			wakeup.packet_80211 = false;
1478 		} else {
1479 			int fcslen = 4;
1480 
1481 			if (truncated >= 4) {
1482 				truncated -= 4;
1483 				fcslen = 0;
1484 			} else {
1485 				fcslen -= truncated;
1486 				truncated = 0;
1487 			}
1488 			pktsize -= fcslen;
1489 			wakeup.packet = status->wake_packet;
1490 			wakeup.packet_present_len = pktsize;
1491 			wakeup.packet_len = pktlen - truncated;
1492 			wakeup.packet_80211 = true;
1493 		}
1494 	}
1495 
1496  report:
1497 	ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1498 	kfree_skb(pkt);
1499 }
1500 
1501 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
1502 				  struct ieee80211_key_seq *seq)
1503 {
1504 	u64 pn;
1505 
1506 	pn = le64_to_cpu(sc->pn);
1507 	seq->ccmp.pn[0] = pn >> 40;
1508 	seq->ccmp.pn[1] = pn >> 32;
1509 	seq->ccmp.pn[2] = pn >> 24;
1510 	seq->ccmp.pn[3] = pn >> 16;
1511 	seq->ccmp.pn[4] = pn >> 8;
1512 	seq->ccmp.pn[5] = pn;
1513 }
1514 
1515 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
1516 				   struct ieee80211_key_seq *seq)
1517 {
1518 	seq->tkip.iv32 = le32_to_cpu(sc->iv32);
1519 	seq->tkip.iv16 = le16_to_cpu(sc->iv16);
1520 }
1521 
1522 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
1523 				   struct ieee80211_sta *sta,
1524 				   struct ieee80211_key_conf *key)
1525 {
1526 	int tid;
1527 
1528 	BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1529 
1530 	if (sta && iwl_mvm_has_new_rx_api(mvm)) {
1531 		struct iwl_mvm_sta *mvmsta;
1532 		struct iwl_mvm_key_pn *ptk_pn;
1533 
1534 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1535 
1536 		ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
1537 						   lockdep_is_held(&mvm->mutex));
1538 		if (WARN_ON(!ptk_pn))
1539 			return;
1540 
1541 		for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1542 			struct ieee80211_key_seq seq = {};
1543 			int i;
1544 
1545 			iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1546 			ieee80211_set_key_rx_seq(key, tid, &seq);
1547 			for (i = 1; i < mvm->trans->num_rx_queues; i++)
1548 				memcpy(ptk_pn->q[i].pn[tid],
1549 				       seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
1550 		}
1551 	} else {
1552 		for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1553 			struct ieee80211_key_seq seq = {};
1554 
1555 			iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1556 			ieee80211_set_key_rx_seq(key, tid, &seq);
1557 		}
1558 	}
1559 }
1560 
1561 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1562 				    struct ieee80211_key_conf *key)
1563 {
1564 	int tid;
1565 
1566 	BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1567 
1568 	for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1569 		struct ieee80211_key_seq seq = {};
1570 
1571 		iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
1572 		ieee80211_set_key_rx_seq(key, tid, &seq);
1573 	}
1574 }
1575 
1576 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
1577 				   struct ieee80211_key_conf *key,
1578 				   struct iwl_wowlan_status *status)
1579 {
1580 	union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
1581 
1582 	switch (key->cipher) {
1583 	case WLAN_CIPHER_SUITE_CCMP:
1584 		iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
1585 		break;
1586 	case WLAN_CIPHER_SUITE_TKIP:
1587 		iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
1588 		break;
1589 	default:
1590 		WARN_ON(1);
1591 	}
1592 }
1593 
1594 struct iwl_mvm_d3_gtk_iter_data {
1595 	struct iwl_mvm *mvm;
1596 	struct iwl_wowlan_status *status;
1597 	void *last_gtk;
1598 	u32 cipher;
1599 	bool find_phase, unhandled_cipher;
1600 	int num_keys;
1601 };
1602 
1603 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
1604 				   struct ieee80211_vif *vif,
1605 				   struct ieee80211_sta *sta,
1606 				   struct ieee80211_key_conf *key,
1607 				   void *_data)
1608 {
1609 	struct iwl_mvm_d3_gtk_iter_data *data = _data;
1610 
1611 	if (data->unhandled_cipher)
1612 		return;
1613 
1614 	switch (key->cipher) {
1615 	case WLAN_CIPHER_SUITE_WEP40:
1616 	case WLAN_CIPHER_SUITE_WEP104:
1617 		/* ignore WEP completely, nothing to do */
1618 		return;
1619 	case WLAN_CIPHER_SUITE_CCMP:
1620 	case WLAN_CIPHER_SUITE_TKIP:
1621 		/* we support these */
1622 		break;
1623 	default:
1624 		/* everything else (even CMAC for MFP) - disconnect from AP */
1625 		data->unhandled_cipher = true;
1626 		return;
1627 	}
1628 
1629 	data->num_keys++;
1630 
1631 	/*
1632 	 * pairwise key - update sequence counters only;
1633 	 * note that this assumes no TDLS sessions are active
1634 	 */
1635 	if (sta) {
1636 		struct ieee80211_key_seq seq = {};
1637 		union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
1638 
1639 		if (data->find_phase)
1640 			return;
1641 
1642 		switch (key->cipher) {
1643 		case WLAN_CIPHER_SUITE_CCMP:
1644 			iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
1645 					       sta, key);
1646 			atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
1647 			break;
1648 		case WLAN_CIPHER_SUITE_TKIP:
1649 			iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1650 			iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1651 			atomic64_set(&key->tx_pn,
1652 				     (u64)seq.tkip.iv16 |
1653 				     ((u64)seq.tkip.iv32 << 16));
1654 			break;
1655 		}
1656 
1657 		/* that's it for this key */
1658 		return;
1659 	}
1660 
1661 	if (data->find_phase) {
1662 		data->last_gtk = key;
1663 		data->cipher = key->cipher;
1664 		return;
1665 	}
1666 
1667 	if (data->status->num_of_gtk_rekeys)
1668 		ieee80211_remove_key(key);
1669 	else if (data->last_gtk == key)
1670 		iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
1671 }
1672 
1673 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1674 					  struct ieee80211_vif *vif,
1675 					  struct iwl_wowlan_status *status)
1676 {
1677 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1678 	struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1679 		.mvm = mvm,
1680 		.status = status,
1681 	};
1682 	u32 disconnection_reasons =
1683 		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1684 		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1685 
1686 	if (!status || !vif->bss_conf.bssid)
1687 		return false;
1688 
1689 	if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
1690 		return false;
1691 
1692 	/* find last GTK that we used initially, if any */
1693 	gtkdata.find_phase = true;
1694 	ieee80211_iter_keys(mvm->hw, vif,
1695 			    iwl_mvm_d3_update_keys, &gtkdata);
1696 	/* not trying to keep connections with MFP/unhandled ciphers */
1697 	if (gtkdata.unhandled_cipher)
1698 		return false;
1699 	if (!gtkdata.num_keys)
1700 		goto out;
1701 	if (!gtkdata.last_gtk)
1702 		return false;
1703 
1704 	/*
1705 	 * invalidate all other GTKs that might still exist and update
1706 	 * the one that we used
1707 	 */
1708 	gtkdata.find_phase = false;
1709 	ieee80211_iter_keys(mvm->hw, vif,
1710 			    iwl_mvm_d3_update_keys, &gtkdata);
1711 
1712 	if (status->num_of_gtk_rekeys) {
1713 		struct ieee80211_key_conf *key;
1714 		struct {
1715 			struct ieee80211_key_conf conf;
1716 			u8 key[32];
1717 		} conf = {
1718 			.conf.cipher = gtkdata.cipher,
1719 			.conf.keyidx = status->gtk.key_index,
1720 		};
1721 
1722 		switch (gtkdata.cipher) {
1723 		case WLAN_CIPHER_SUITE_CCMP:
1724 			conf.conf.keylen = WLAN_KEY_LEN_CCMP;
1725 			memcpy(conf.conf.key, status->gtk.decrypt_key,
1726 			       WLAN_KEY_LEN_CCMP);
1727 			break;
1728 		case WLAN_CIPHER_SUITE_TKIP:
1729 			conf.conf.keylen = WLAN_KEY_LEN_TKIP;
1730 			memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
1731 			/* leave TX MIC key zeroed, we don't use it anyway */
1732 			memcpy(conf.conf.key +
1733 			       NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
1734 			       status->gtk.tkip_mic_key, 8);
1735 			break;
1736 		}
1737 
1738 		key = ieee80211_gtk_rekey_add(vif, &conf.conf);
1739 		if (IS_ERR(key))
1740 			return false;
1741 		iwl_mvm_set_key_rx_seq(mvm, key, status);
1742 	}
1743 
1744 	if (status->num_of_gtk_rekeys) {
1745 		__be64 replay_ctr =
1746 			cpu_to_be64(le64_to_cpu(status->replay_ctr));
1747 		ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
1748 					   (void *)&replay_ctr, GFP_KERNEL);
1749 	}
1750 
1751 out:
1752 	mvmvif->seqno_valid = true;
1753 	/* +0x10 because the set API expects next-to-use, not last-used */
1754 	mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
1755 
1756 	return true;
1757 }
1758 
1759 static struct iwl_wowlan_status *
1760 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1761 {
1762 	u32 base = mvm->error_event_table[0];
1763 	struct error_table_start {
1764 		/* cf. struct iwl_error_event_table */
1765 		u32 valid;
1766 		u32 error_id;
1767 	} err_info;
1768 	struct iwl_host_cmd cmd = {
1769 		.id = WOWLAN_GET_STATUSES,
1770 		.flags = CMD_WANT_SKB,
1771 	};
1772 	struct iwl_wowlan_status *status, *fw_status;
1773 	int ret, len, status_size;
1774 
1775 	iwl_trans_read_mem_bytes(mvm->trans, base,
1776 				 &err_info, sizeof(err_info));
1777 
1778 	if (err_info.valid) {
1779 		IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
1780 			 err_info.valid, err_info.error_id);
1781 		if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1782 			struct cfg80211_wowlan_wakeup wakeup = {
1783 				.rfkill_release = true,
1784 			};
1785 			ieee80211_report_wowlan_wakeup(vif, &wakeup,
1786 						       GFP_KERNEL);
1787 		}
1788 		return ERR_PTR(-EIO);
1789 	}
1790 
1791 	/* only for tracing for now */
1792 	ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1793 	if (ret)
1794 		IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1795 
1796 	ret = iwl_mvm_send_cmd(mvm, &cmd);
1797 	if (ret) {
1798 		IWL_ERR(mvm, "failed to query status (%d)\n", ret);
1799 		return ERR_PTR(ret);
1800 	}
1801 
1802 	status_size = sizeof(*fw_status);
1803 
1804 	len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1805 	if (len < status_size) {
1806 		IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1807 		fw_status = ERR_PTR(-EIO);
1808 		goto out_free_resp;
1809 	}
1810 
1811 	status = (void *)cmd.resp_pkt->data;
1812 	if (len != (status_size +
1813 		    ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
1814 		IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1815 		fw_status = ERR_PTR(-EIO);
1816 		goto out_free_resp;
1817 	}
1818 
1819 	fw_status = kmemdup(status, len, GFP_KERNEL);
1820 
1821 out_free_resp:
1822 	iwl_free_resp(&cmd);
1823 	return fw_status;
1824 }
1825 
1826 /* releases the MVM mutex */
1827 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1828 					 struct ieee80211_vif *vif)
1829 {
1830 	struct iwl_wowlan_status_data status;
1831 	struct iwl_wowlan_status *fw_status;
1832 	int i;
1833 	bool keep;
1834 	struct iwl_mvm_sta *mvm_ap_sta;
1835 
1836 	fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1837 	if (IS_ERR_OR_NULL(fw_status))
1838 		goto out_unlock;
1839 
1840 	status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1841 	for (i = 0; i < 8; i++)
1842 		status.qos_seq_ctr[i] =
1843 			le16_to_cpu(fw_status->qos_seq_ctr[i]);
1844 	status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1845 	status.wake_packet_length =
1846 		le32_to_cpu(fw_status->wake_packet_length);
1847 	status.wake_packet_bufsize =
1848 		le32_to_cpu(fw_status->wake_packet_bufsize);
1849 	status.wake_packet = fw_status->wake_packet;
1850 
1851 	/* still at hard-coded place 0 for D3 image */
1852 	mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
1853 	if (!mvm_ap_sta)
1854 		goto out_free;
1855 
1856 	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1857 		u16 seq = status.qos_seq_ctr[i];
1858 		/* firmware stores last-used value, we store next value */
1859 		seq += 0x10;
1860 		mvm_ap_sta->tid_data[i].seq_number = seq;
1861 	}
1862 
1863 	/* now we have all the data we need, unlock to avoid mac80211 issues */
1864 	mutex_unlock(&mvm->mutex);
1865 
1866 	iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1867 
1868 	keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1869 
1870 	kfree(fw_status);
1871 	return keep;
1872 
1873 out_free:
1874 	kfree(fw_status);
1875 out_unlock:
1876 	mutex_unlock(&mvm->mutex);
1877 	return false;
1878 }
1879 
1880 void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
1881 			      struct ieee80211_vif *vif,
1882 			      struct iwl_wowlan_status *status)
1883 {
1884 	struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1885 		.mvm = mvm,
1886 		.status = status,
1887 	};
1888 
1889 	/*
1890 	 * rekey handling requires taking locks that can't be taken now.
1891 	 * however, d0i3 doesn't offload rekey, so we're fine.
1892 	 */
1893 	if (WARN_ON_ONCE(status->num_of_gtk_rekeys))
1894 		return;
1895 
1896 	/* find last GTK that we used initially, if any */
1897 	gtkdata.find_phase = true;
1898 	iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
1899 
1900 	gtkdata.find_phase = false;
1901 	iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
1902 }
1903 
1904 struct iwl_mvm_nd_query_results {
1905 	u32 matched_profiles;
1906 	struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
1907 };
1908 
1909 static int
1910 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
1911 				struct iwl_mvm_nd_query_results *results)
1912 {
1913 	struct iwl_scan_offload_profiles_query *query;
1914 	struct iwl_host_cmd cmd = {
1915 		.id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
1916 		.flags = CMD_WANT_SKB,
1917 	};
1918 	int ret, len;
1919 
1920 	ret = iwl_mvm_send_cmd(mvm, &cmd);
1921 	if (ret) {
1922 		IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
1923 		return ret;
1924 	}
1925 
1926 	len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1927 	if (len < sizeof(*query)) {
1928 		IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
1929 		ret = -EIO;
1930 		goto out_free_resp;
1931 	}
1932 
1933 	query = (void *)cmd.resp_pkt->data;
1934 
1935 	results->matched_profiles = le32_to_cpu(query->matched_profiles);
1936 	memcpy(results->matches, query->matches, sizeof(results->matches));
1937 
1938 #ifdef CONFIG_IWLWIFI_DEBUGFS
1939 	mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
1940 #endif
1941 
1942 out_free_resp:
1943 	iwl_free_resp(&cmd);
1944 	return ret;
1945 }
1946 
1947 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1948 					    struct ieee80211_vif *vif)
1949 {
1950 	struct cfg80211_wowlan_nd_info *net_detect = NULL;
1951 	struct cfg80211_wowlan_wakeup wakeup = {
1952 		.pattern_idx = -1,
1953 	};
1954 	struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1955 	struct iwl_mvm_nd_query_results query;
1956 	struct iwl_wowlan_status *fw_status;
1957 	unsigned long matched_profiles;
1958 	u32 reasons = 0;
1959 	int i, j, n_matches, ret;
1960 
1961 	fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1962 	if (!IS_ERR_OR_NULL(fw_status)) {
1963 		reasons = le32_to_cpu(fw_status->wakeup_reasons);
1964 		kfree(fw_status);
1965 	}
1966 
1967 	if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1968 		wakeup.rfkill_release = true;
1969 
1970 	if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
1971 		goto out;
1972 
1973 	ret = iwl_mvm_netdetect_query_results(mvm, &query);
1974 	if (ret || !query.matched_profiles) {
1975 		wakeup_report = NULL;
1976 		goto out;
1977 	}
1978 
1979 	matched_profiles = query.matched_profiles;
1980 	if (mvm->n_nd_match_sets) {
1981 		n_matches = hweight_long(matched_profiles);
1982 	} else {
1983 		IWL_ERR(mvm, "no net detect match information available\n");
1984 		n_matches = 0;
1985 	}
1986 
1987 	net_detect = kzalloc(sizeof(*net_detect) +
1988 			     (n_matches * sizeof(net_detect->matches[0])),
1989 			     GFP_KERNEL);
1990 	if (!net_detect || !n_matches)
1991 		goto out_report_nd;
1992 
1993 	for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
1994 		struct iwl_scan_offload_profile_match *fw_match;
1995 		struct cfg80211_wowlan_nd_match *match;
1996 		int idx, n_channels = 0;
1997 
1998 		fw_match = &query.matches[i];
1999 
2000 		for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
2001 			n_channels += hweight8(fw_match->matching_channels[j]);
2002 
2003 		match = kzalloc(sizeof(*match) +
2004 				(n_channels * sizeof(*match->channels)),
2005 				GFP_KERNEL);
2006 		if (!match)
2007 			goto out_report_nd;
2008 
2009 		net_detect->matches[net_detect->n_matches++] = match;
2010 
2011 		/* We inverted the order of the SSIDs in the scan
2012 		 * request, so invert the index here.
2013 		 */
2014 		idx = mvm->n_nd_match_sets - i - 1;
2015 		match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
2016 		memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
2017 		       match->ssid.ssid_len);
2018 
2019 		if (mvm->n_nd_channels < n_channels)
2020 			continue;
2021 
2022 		for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
2023 			if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
2024 				match->channels[match->n_channels++] =
2025 					mvm->nd_channels[j]->center_freq;
2026 	}
2027 
2028 out_report_nd:
2029 	wakeup.net_detect = net_detect;
2030 out:
2031 	iwl_mvm_free_nd(mvm);
2032 
2033 	mutex_unlock(&mvm->mutex);
2034 	ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
2035 
2036 	if (net_detect) {
2037 		for (i = 0; i < net_detect->n_matches; i++)
2038 			kfree(net_detect->matches[i]);
2039 		kfree(net_detect);
2040 	}
2041 }
2042 
2043 static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
2044 {
2045 #ifdef CONFIG_IWLWIFI_DEBUGFS
2046 	const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
2047 	u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
2048 	u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
2049 
2050 	if (!mvm->store_d3_resume_sram)
2051 		return;
2052 
2053 	if (!mvm->d3_resume_sram) {
2054 		mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
2055 		if (!mvm->d3_resume_sram)
2056 			return;
2057 	}
2058 
2059 	iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
2060 #endif
2061 }
2062 
2063 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
2064 				       struct ieee80211_vif *vif)
2065 {
2066 	/* skip the one we keep connection on */
2067 	if (data == vif)
2068 		return;
2069 
2070 	if (vif->type == NL80211_IFTYPE_STATION)
2071 		ieee80211_resume_disconnect(vif);
2072 }
2073 
2074 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
2075 {
2076 	struct ieee80211_vif *vif = NULL;
2077 	int ret = 1;
2078 	enum iwl_d3_status d3_status;
2079 	bool keep = false;
2080 	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2081 					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2082 	bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
2083 				      IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
2084 
2085 	mutex_lock(&mvm->mutex);
2086 
2087 	/* get the BSS vif pointer again */
2088 	vif = iwl_mvm_get_bss_vif(mvm);
2089 	if (IS_ERR_OR_NULL(vif))
2090 		goto err;
2091 
2092 	ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
2093 	if (ret)
2094 		goto err;
2095 
2096 	if (d3_status != IWL_D3_STATUS_ALIVE) {
2097 		IWL_INFO(mvm, "Device was reset during suspend\n");
2098 		goto err;
2099 	}
2100 
2101 	/* query SRAM first in case we want event logging */
2102 	iwl_mvm_read_d3_sram(mvm);
2103 
2104 	if (d0i3_first) {
2105 		ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
2106 		if (ret < 0) {
2107 			IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
2108 				ret);
2109 			goto err;
2110 		}
2111 	}
2112 
2113 	/*
2114 	 * Query the current location and source from the D3 firmware so we
2115 	 * can play it back when we re-intiailize the D0 firmware
2116 	 */
2117 	iwl_mvm_update_changed_regdom(mvm);
2118 
2119 	if (!unified_image)
2120 		/*  Re-configure default SAR profile */
2121 		iwl_mvm_sar_select_profile(mvm, 1, 1);
2122 
2123 	if (mvm->net_detect) {
2124 		/* If this is a non-unified image, we restart the FW,
2125 		 * so no need to stop the netdetect scan.  If that
2126 		 * fails, continue and try to get the wake-up reasons,
2127 		 * but trigger a HW restart by keeping a failure code
2128 		 * in ret.
2129 		 */
2130 		if (unified_image)
2131 			ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
2132 						false);
2133 
2134 		iwl_mvm_query_netdetect_reasons(mvm, vif);
2135 		/* has unlocked the mutex, so skip that */
2136 		goto out;
2137 	} else {
2138 		keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
2139 #ifdef CONFIG_IWLWIFI_DEBUGFS
2140 		if (keep)
2141 			mvm->keep_vif = vif;
2142 #endif
2143 		/* has unlocked the mutex, so skip that */
2144 		goto out_iterate;
2145 	}
2146 
2147 err:
2148 	iwl_mvm_free_nd(mvm);
2149 	mutex_unlock(&mvm->mutex);
2150 
2151 out_iterate:
2152 	if (!test)
2153 		ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
2154 			IEEE80211_IFACE_ITER_NORMAL,
2155 			iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
2156 
2157 out:
2158 	/* no need to reset the device in unified images, if successful */
2159 	if (unified_image && !ret) {
2160 		/* nothing else to do if we already sent D0I3_END_CMD */
2161 		if (d0i3_first)
2162 			return 0;
2163 
2164 		ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
2165 		if (!ret)
2166 			return 0;
2167 	}
2168 
2169 	/*
2170 	 * Reconfigure the device in one of the following cases:
2171 	 * 1. We are not using a unified image
2172 	 * 2. We are using a unified image but had an error while exiting D3
2173 	 */
2174 	set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
2175 	set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
2176 	/*
2177 	 * When switching images we return 1, which causes mac80211
2178 	 * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
2179 	 * This type of reconfig calls iwl_mvm_restart_complete(),
2180 	 * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
2181 	 * to take the reference here.
2182 	 */
2183 	iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
2184 
2185 	return 1;
2186 }
2187 
2188 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
2189 {
2190 	iwl_trans_resume(mvm->trans);
2191 
2192 	return __iwl_mvm_resume(mvm, false);
2193 }
2194 
2195 static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
2196 {
2197 	bool exit_now;
2198 	enum iwl_d3_status d3_status;
2199 	struct iwl_trans *trans = mvm->trans;
2200 
2201 	iwl_trans_d3_resume(trans, &d3_status, false, false);
2202 
2203 	/*
2204 	 * make sure to clear D0I3_DEFER_WAKEUP before
2205 	 * calling iwl_trans_resume(), which might wait
2206 	 * for d0i3 exit completion.
2207 	 */
2208 	mutex_lock(&mvm->d0i3_suspend_mutex);
2209 	__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
2210 	exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
2211 					&mvm->d0i3_suspend_flags);
2212 	mutex_unlock(&mvm->d0i3_suspend_mutex);
2213 	if (exit_now) {
2214 		IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
2215 		_iwl_mvm_exit_d0i3(mvm);
2216 	}
2217 
2218 	iwl_trans_resume(trans);
2219 
2220 	if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
2221 		int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
2222 
2223 		if (ret)
2224 			return ret;
2225 		/*
2226 		 * d0i3 exit will be deferred until reconfig_complete.
2227 		 * make sure there we are out of d0i3.
2228 		 */
2229 	}
2230 	return 0;
2231 }
2232 
2233 int iwl_mvm_resume(struct ieee80211_hw *hw)
2234 {
2235 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2236 	int ret;
2237 
2238 	if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)
2239 		ret = iwl_mvm_resume_d0i3(mvm);
2240 	else
2241 		ret = iwl_mvm_resume_d3(mvm);
2242 
2243 	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2244 
2245 	return ret;
2246 }
2247 
2248 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
2249 {
2250 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2251 
2252 	device_set_wakeup_enable(mvm->trans->dev, enabled);
2253 }
2254 
2255 #ifdef CONFIG_IWLWIFI_DEBUGFS
2256 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2257 {
2258 	struct iwl_mvm *mvm = inode->i_private;
2259 	int err;
2260 
2261 	if (mvm->d3_test_active)
2262 		return -EBUSY;
2263 
2264 	file->private_data = inode->i_private;
2265 
2266 	ieee80211_stop_queues(mvm->hw);
2267 	synchronize_net();
2268 
2269 	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
2270 
2271 	/* start pseudo D3 */
2272 	rtnl_lock();
2273 	err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
2274 	rtnl_unlock();
2275 	if (err > 0)
2276 		err = -EINVAL;
2277 	if (err) {
2278 		ieee80211_wake_queues(mvm->hw);
2279 		return err;
2280 	}
2281 	mvm->d3_test_active = true;
2282 	mvm->keep_vif = NULL;
2283 	return 0;
2284 }
2285 
2286 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
2287 				    size_t count, loff_t *ppos)
2288 {
2289 	struct iwl_mvm *mvm = file->private_data;
2290 	u32 pme_asserted;
2291 
2292 	while (true) {
2293 		/* read pme_ptr if available */
2294 		if (mvm->d3_test_pme_ptr) {
2295 			pme_asserted = iwl_trans_read_mem32(mvm->trans,
2296 						mvm->d3_test_pme_ptr);
2297 			if (pme_asserted)
2298 				break;
2299 		}
2300 
2301 		if (msleep_interruptible(100))
2302 			break;
2303 	}
2304 
2305 	return 0;
2306 }
2307 
2308 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2309 					      struct ieee80211_vif *vif)
2310 {
2311 	/* skip the one we keep connection on */
2312 	if (_data == vif)
2313 		return;
2314 
2315 	if (vif->type == NL80211_IFTYPE_STATION)
2316 		ieee80211_connection_loss(vif);
2317 }
2318 
2319 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2320 {
2321 	struct iwl_mvm *mvm = inode->i_private;
2322 	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2323 					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2324 
2325 	mvm->d3_test_active = false;
2326 
2327 	rtnl_lock();
2328 	__iwl_mvm_resume(mvm, true);
2329 	rtnl_unlock();
2330 
2331 	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2332 
2333 	iwl_abort_notification_waits(&mvm->notif_wait);
2334 	if (!unified_image) {
2335 		int remaining_time = 10;
2336 
2337 		ieee80211_restart_hw(mvm->hw);
2338 
2339 		/* wait for restart and disconnect all interfaces */
2340 		while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2341 		       remaining_time > 0) {
2342 			remaining_time--;
2343 			msleep(1000);
2344 		}
2345 
2346 		if (remaining_time == 0)
2347 			IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
2348 	}
2349 
2350 	ieee80211_iterate_active_interfaces_atomic(
2351 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2352 		iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
2353 
2354 	ieee80211_wake_queues(mvm->hw);
2355 
2356 	return 0;
2357 }
2358 
2359 const struct file_operations iwl_dbgfs_d3_test_ops = {
2360 	.llseek = no_llseek,
2361 	.open = iwl_mvm_d3_test_open,
2362 	.read = iwl_mvm_d3_test_read,
2363 	.release = iwl_mvm_d3_test_release,
2364 };
2365 #endif
2366