1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018        Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018        Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <linux/kernel.h>
65 #include <linux/slab.h>
66 #include <linux/skbuff.h>
67 #include <linux/netdevice.h>
68 #include <linux/etherdevice.h>
69 #include <linux/ip.h>
70 #include <linux/if_arp.h>
71 #include <linux/time.h>
72 #include <net/mac80211.h>
73 #include <net/ieee80211_radiotap.h>
74 #include <net/tcp.h>
75 
76 #include "iwl-op-mode.h"
77 #include "iwl-io.h"
78 #include "mvm.h"
79 #include "sta.h"
80 #include "time-event.h"
81 #include "iwl-eeprom-parse.h"
82 #include "iwl-phy-db.h"
83 #include "testmode.h"
84 #include "fw/error-dump.h"
85 #include "iwl-prph.h"
86 #include "iwl-nvm-parse.h"
87 
88 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
89 	{
90 		.max = 1,
91 		.types = BIT(NL80211_IFTYPE_STATION),
92 	},
93 	{
94 		.max = 1,
95 		.types = BIT(NL80211_IFTYPE_AP) |
96 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
97 			BIT(NL80211_IFTYPE_P2P_GO),
98 	},
99 	{
100 		.max = 1,
101 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
102 	},
103 };
104 
105 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
106 	{
107 		.num_different_channels = 2,
108 		.max_interfaces = 3,
109 		.limits = iwl_mvm_limits,
110 		.n_limits = ARRAY_SIZE(iwl_mvm_limits),
111 	},
112 };
113 
114 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
115 /*
116  * Use the reserved field to indicate magic values.
117  * these values will only be used internally by the driver,
118  * and won't make it to the fw (reserved will be 0).
119  * BC_FILTER_MAGIC_IP - configure the val of this attribute to
120  *	be the vif's ip address. in case there is not a single
121  *	ip address (0, or more than 1), this attribute will
122  *	be skipped.
123  * BC_FILTER_MAGIC_MAC - set the val of this attribute to
124  *	the LSB bytes of the vif's mac address
125  */
126 enum {
127 	BC_FILTER_MAGIC_NONE = 0,
128 	BC_FILTER_MAGIC_IP,
129 	BC_FILTER_MAGIC_MAC,
130 };
131 
132 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
133 	{
134 		/* arp */
135 		.discard = 0,
136 		.frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
137 		.attrs = {
138 			{
139 				/* frame type - arp, hw type - ethernet */
140 				.offset_type =
141 					BCAST_FILTER_OFFSET_PAYLOAD_START,
142 				.offset = sizeof(rfc1042_header),
143 				.val = cpu_to_be32(0x08060001),
144 				.mask = cpu_to_be32(0xffffffff),
145 			},
146 			{
147 				/* arp dest ip */
148 				.offset_type =
149 					BCAST_FILTER_OFFSET_PAYLOAD_START,
150 				.offset = sizeof(rfc1042_header) + 2 +
151 					  sizeof(struct arphdr) +
152 					  ETH_ALEN + sizeof(__be32) +
153 					  ETH_ALEN,
154 				.mask = cpu_to_be32(0xffffffff),
155 				/* mark it as special field */
156 				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
157 			},
158 		},
159 	},
160 	{
161 		/* dhcp offer bcast */
162 		.discard = 0,
163 		.frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
164 		.attrs = {
165 			{
166 				/* udp dest port - 68 (bootp client)*/
167 				.offset_type = BCAST_FILTER_OFFSET_IP_END,
168 				.offset = offsetof(struct udphdr, dest),
169 				.val = cpu_to_be32(0x00440000),
170 				.mask = cpu_to_be32(0xffff0000),
171 			},
172 			{
173 				/* dhcp - lsb bytes of client hw address */
174 				.offset_type = BCAST_FILTER_OFFSET_IP_END,
175 				.offset = 38,
176 				.mask = cpu_to_be32(0xffffffff),
177 				/* mark it as special field */
178 				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
179 			},
180 		},
181 	},
182 	/* last filter must be empty */
183 	{},
184 };
185 #endif
186 
187 static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
188 	.max_peers = IWL_MVM_TOF_MAX_APS,
189 	.report_ap_tsf = 1,
190 	.randomize_mac_addr = 1,
191 
192 	.ftm = {
193 		.supported = 1,
194 		.asap = 1,
195 		.non_asap = 1,
196 		.request_lci = 1,
197 		.request_civicloc = 1,
198 		.max_bursts_exponent = -1, /* all supported */
199 		.max_ftms_per_burst = 0, /* no limits */
200 		.bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
201 			      BIT(NL80211_CHAN_WIDTH_20) |
202 			      BIT(NL80211_CHAN_WIDTH_40) |
203 			      BIT(NL80211_CHAN_WIDTH_80),
204 		.preambles = BIT(NL80211_PREAMBLE_LEGACY) |
205 			     BIT(NL80211_PREAMBLE_HT) |
206 			     BIT(NL80211_PREAMBLE_VHT),
207 	},
208 };
209 
210 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
211 {
212 	if (!iwl_mvm_is_d0i3_supported(mvm))
213 		return;
214 
215 	IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
216 	spin_lock_bh(&mvm->refs_lock);
217 	mvm->refs[ref_type]++;
218 	spin_unlock_bh(&mvm->refs_lock);
219 	iwl_trans_ref(mvm->trans);
220 }
221 
222 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
223 {
224 	if (!iwl_mvm_is_d0i3_supported(mvm))
225 		return;
226 
227 	IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
228 	spin_lock_bh(&mvm->refs_lock);
229 	if (WARN_ON(!mvm->refs[ref_type])) {
230 		spin_unlock_bh(&mvm->refs_lock);
231 		return;
232 	}
233 	mvm->refs[ref_type]--;
234 	spin_unlock_bh(&mvm->refs_lock);
235 	iwl_trans_unref(mvm->trans);
236 }
237 
238 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
239 				     enum iwl_mvm_ref_type except_ref)
240 {
241 	int i, j;
242 
243 	if (!iwl_mvm_is_d0i3_supported(mvm))
244 		return;
245 
246 	spin_lock_bh(&mvm->refs_lock);
247 	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
248 		if (except_ref == i || !mvm->refs[i])
249 			continue;
250 
251 		IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
252 			      i, mvm->refs[i]);
253 		for (j = 0; j < mvm->refs[i]; j++)
254 			iwl_trans_unref(mvm->trans);
255 		mvm->refs[i] = 0;
256 	}
257 	spin_unlock_bh(&mvm->refs_lock);
258 }
259 
260 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
261 {
262 	int i;
263 	bool taken = false;
264 
265 	if (!iwl_mvm_is_d0i3_supported(mvm))
266 		return true;
267 
268 	spin_lock_bh(&mvm->refs_lock);
269 	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
270 		if (mvm->refs[i]) {
271 			taken = true;
272 			break;
273 		}
274 	}
275 	spin_unlock_bh(&mvm->refs_lock);
276 
277 	return taken;
278 }
279 
280 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
281 {
282 	iwl_mvm_ref(mvm, ref_type);
283 
284 	if (!wait_event_timeout(mvm->d0i3_exit_waitq,
285 				!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
286 				HZ)) {
287 		WARN_ON_ONCE(1);
288 		iwl_mvm_unref(mvm, ref_type);
289 		return -EIO;
290 	}
291 
292 	return 0;
293 }
294 
295 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
296 {
297 	int i;
298 
299 	memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
300 	for (i = 0; i < NUM_PHY_CTX; i++) {
301 		mvm->phy_ctxts[i].id = i;
302 		mvm->phy_ctxts[i].ref = 0;
303 	}
304 }
305 
306 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
307 						  const char *alpha2,
308 						  enum iwl_mcc_source src_id,
309 						  bool *changed)
310 {
311 	struct ieee80211_regdomain *regd = NULL;
312 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
313 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
314 	struct iwl_mcc_update_resp *resp;
315 
316 	IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
317 
318 	lockdep_assert_held(&mvm->mutex);
319 
320 	resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
321 	if (IS_ERR_OR_NULL(resp)) {
322 		IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
323 			      PTR_ERR_OR_ZERO(resp));
324 		goto out;
325 	}
326 
327 	if (changed) {
328 		u32 status = le32_to_cpu(resp->status);
329 
330 		*changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
331 			    status == MCC_RESP_ILLEGAL);
332 	}
333 
334 	regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
335 				      __le32_to_cpu(resp->n_channels),
336 				      resp->channels,
337 				      __le16_to_cpu(resp->mcc),
338 				      __le16_to_cpu(resp->geo_info));
339 	/* Store the return source id */
340 	src_id = resp->source_id;
341 	kfree(resp);
342 	if (IS_ERR_OR_NULL(regd)) {
343 		IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
344 			      PTR_ERR_OR_ZERO(regd));
345 		goto out;
346 	}
347 
348 	IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
349 		      regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
350 	mvm->lar_regdom_set = true;
351 	mvm->mcc_src = src_id;
352 
353 out:
354 	return regd;
355 }
356 
357 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
358 {
359 	bool changed;
360 	struct ieee80211_regdomain *regd;
361 
362 	if (!iwl_mvm_is_lar_supported(mvm))
363 		return;
364 
365 	regd = iwl_mvm_get_current_regdomain(mvm, &changed);
366 	if (!IS_ERR_OR_NULL(regd)) {
367 		/* only update the regulatory core if changed */
368 		if (changed)
369 			regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
370 
371 		kfree(regd);
372 	}
373 }
374 
375 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
376 							  bool *changed)
377 {
378 	return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
379 				     iwl_mvm_is_wifi_mcc_supported(mvm) ?
380 				     MCC_SOURCE_GET_CURRENT :
381 				     MCC_SOURCE_OLD_FW, changed);
382 }
383 
384 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
385 {
386 	enum iwl_mcc_source used_src;
387 	struct ieee80211_regdomain *regd;
388 	int ret;
389 	bool changed;
390 	const struct ieee80211_regdomain *r =
391 			rtnl_dereference(mvm->hw->wiphy->regd);
392 
393 	if (!r)
394 		return -ENOENT;
395 
396 	/* save the last source in case we overwrite it below */
397 	used_src = mvm->mcc_src;
398 	if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
399 		/* Notify the firmware we support wifi location updates */
400 		regd = iwl_mvm_get_current_regdomain(mvm, NULL);
401 		if (!IS_ERR_OR_NULL(regd))
402 			kfree(regd);
403 	}
404 
405 	/* Now set our last stored MCC and source */
406 	regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
407 				     &changed);
408 	if (IS_ERR_OR_NULL(regd))
409 		return -EIO;
410 
411 	/* update cfg80211 if the regdomain was changed */
412 	if (changed)
413 		ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
414 	else
415 		ret = 0;
416 
417 	kfree(regd);
418 	return ret;
419 }
420 
421 const static u8 he_if_types_ext_capa_sta[] = {
422 	 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
423 	 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
424 	 [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
425 };
426 
427 const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
428 	{
429 		.iftype = NL80211_IFTYPE_STATION,
430 		.extended_capabilities = he_if_types_ext_capa_sta,
431 		.extended_capabilities_mask = he_if_types_ext_capa_sta,
432 		.extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
433 	},
434 };
435 
436 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
437 {
438 	struct ieee80211_hw *hw = mvm->hw;
439 	int num_mac, ret, i;
440 	static const u32 mvm_ciphers[] = {
441 		WLAN_CIPHER_SUITE_WEP40,
442 		WLAN_CIPHER_SUITE_WEP104,
443 		WLAN_CIPHER_SUITE_TKIP,
444 		WLAN_CIPHER_SUITE_CCMP,
445 	};
446 #ifdef CONFIG_PM_SLEEP
447 	bool unified = fw_has_capa(&mvm->fw->ucode_capa,
448 				   IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
449 #endif
450 
451 	/* Tell mac80211 our characteristics */
452 	ieee80211_hw_set(hw, SIGNAL_DBM);
453 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
454 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
455 	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
456 	ieee80211_hw_set(hw, SUPPORTS_PS);
457 	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
458 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
459 	ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
460 	ieee80211_hw_set(hw, CONNECTION_MONITOR);
461 	ieee80211_hw_set(hw, CHANCTX_STA_CSA);
462 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
463 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
464 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
465 	ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
466 	ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
467 	ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
468 	ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
469 	ieee80211_hw_set(hw, STA_MMPDU_TXQ);
470 	ieee80211_hw_set(hw, TX_AMSDU);
471 	ieee80211_hw_set(hw, TX_FRAG_LIST);
472 
473 	if (iwl_mvm_has_tlc_offload(mvm)) {
474 		ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
475 		ieee80211_hw_set(hw, HAS_RATE_CONTROL);
476 	}
477 
478 	if (iwl_mvm_has_new_rx_api(mvm))
479 		ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
480 
481 	if (fw_has_capa(&mvm->fw->ucode_capa,
482 			IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
483 		ieee80211_hw_set(hw, AP_LINK_PS);
484 	} else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
485 		/*
486 		 * we absolutely need this for the new TX API since that comes
487 		 * with many more queues than the current code can deal with
488 		 * for station powersave
489 		 */
490 		return -EINVAL;
491 	}
492 
493 	if (mvm->trans->num_rx_queues > 1)
494 		ieee80211_hw_set(hw, USES_RSS);
495 
496 	if (mvm->trans->max_skb_frags)
497 		hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
498 
499 	hw->queues = IEEE80211_MAX_QUEUES;
500 	hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
501 	hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
502 				    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
503 	hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
504 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
505 
506 	hw->radiotap_timestamp.units_pos =
507 		IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
508 		IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
509 	/* this is the case for CCK frames, it's better (only 8) for OFDM */
510 	hw->radiotap_timestamp.accuracy = 22;
511 
512 	if (!iwl_mvm_has_tlc_offload(mvm))
513 		hw->rate_control_algorithm = RS_NAME;
514 
515 	hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
516 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
517 	hw->max_tx_fragments = mvm->trans->max_skb_frags;
518 
519 	BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
520 	memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
521 	hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
522 	hw->wiphy->cipher_suites = mvm->ciphers;
523 
524 	if (iwl_mvm_has_new_rx_api(mvm)) {
525 		mvm->ciphers[hw->wiphy->n_cipher_suites] =
526 			WLAN_CIPHER_SUITE_GCMP;
527 		hw->wiphy->n_cipher_suites++;
528 		mvm->ciphers[hw->wiphy->n_cipher_suites] =
529 			WLAN_CIPHER_SUITE_GCMP_256;
530 		hw->wiphy->n_cipher_suites++;
531 	}
532 
533 	/* Enable 11w if software crypto is not enabled (as the
534 	 * firmware will interpret some mgmt packets, so enabling it
535 	 * with software crypto isn't safe).
536 	 */
537 	if (!iwlwifi_mod_params.swcrypto) {
538 		ieee80211_hw_set(hw, MFP_CAPABLE);
539 		mvm->ciphers[hw->wiphy->n_cipher_suites] =
540 			WLAN_CIPHER_SUITE_AES_CMAC;
541 		hw->wiphy->n_cipher_suites++;
542 		if (iwl_mvm_has_new_rx_api(mvm)) {
543 			mvm->ciphers[hw->wiphy->n_cipher_suites] =
544 				WLAN_CIPHER_SUITE_BIP_GMAC_128;
545 			hw->wiphy->n_cipher_suites++;
546 			mvm->ciphers[hw->wiphy->n_cipher_suites] =
547 				WLAN_CIPHER_SUITE_BIP_GMAC_256;
548 			hw->wiphy->n_cipher_suites++;
549 		}
550 	}
551 
552 	/* currently FW API supports only one optional cipher scheme */
553 	if (mvm->fw->cs[0].cipher) {
554 		const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
555 		struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
556 
557 		mvm->hw->n_cipher_schemes = 1;
558 
559 		cs->cipher = le32_to_cpu(fwcs->cipher);
560 		cs->iftype = BIT(NL80211_IFTYPE_STATION);
561 		cs->hdr_len = fwcs->hdr_len;
562 		cs->pn_len = fwcs->pn_len;
563 		cs->pn_off = fwcs->pn_off;
564 		cs->key_idx_off = fwcs->key_idx_off;
565 		cs->key_idx_mask = fwcs->key_idx_mask;
566 		cs->key_idx_shift = fwcs->key_idx_shift;
567 		cs->mic_len = fwcs->mic_len;
568 
569 		mvm->hw->cipher_schemes = mvm->cs;
570 		mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
571 		hw->wiphy->n_cipher_suites++;
572 	}
573 
574 	if (fw_has_capa(&mvm->fw->ucode_capa,
575 			IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
576 		wiphy_ext_feature_set(hw->wiphy,
577 				      NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
578 		hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa;
579 	}
580 
581 	ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
582 	hw->wiphy->features |=
583 		NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
584 		NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
585 		NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
586 
587 	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
588 	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
589 	hw->chanctx_data_size = sizeof(u16);
590 	hw->txq_data_size = sizeof(struct iwl_mvm_txq);
591 
592 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
593 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
594 		BIT(NL80211_IFTYPE_AP) |
595 		BIT(NL80211_IFTYPE_P2P_GO) |
596 		BIT(NL80211_IFTYPE_P2P_DEVICE) |
597 		BIT(NL80211_IFTYPE_ADHOC);
598 
599 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
600 	hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
601 	if (iwl_mvm_is_lar_supported(mvm))
602 		hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
603 	else
604 		hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
605 					       REGULATORY_DISABLE_BEACON_HINTS;
606 
607 	hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
608 	hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
609 
610 	hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
611 	hw->wiphy->n_iface_combinations =
612 		ARRAY_SIZE(iwl_mvm_iface_combinations);
613 
614 	hw->wiphy->max_remain_on_channel_duration = 10000;
615 	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
616 
617 	/* Extract MAC address */
618 	memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
619 	hw->wiphy->addresses = mvm->addresses;
620 	hw->wiphy->n_addresses = 1;
621 
622 	/* Extract additional MAC addresses if available */
623 	num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
624 		min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
625 
626 	for (i = 1; i < num_mac; i++) {
627 		memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
628 		       ETH_ALEN);
629 		mvm->addresses[i].addr[5]++;
630 		hw->wiphy->n_addresses++;
631 	}
632 
633 	iwl_mvm_reset_phy_ctxts(mvm);
634 
635 	hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
636 
637 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
638 
639 	BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
640 	BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
641 		     IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
642 
643 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
644 		mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
645 	else
646 		mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
647 
648 	if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
649 		hw->wiphy->bands[NL80211_BAND_2GHZ] =
650 			&mvm->nvm_data->bands[NL80211_BAND_2GHZ];
651 	if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
652 		hw->wiphy->bands[NL80211_BAND_5GHZ] =
653 			&mvm->nvm_data->bands[NL80211_BAND_5GHZ];
654 
655 		if (fw_has_capa(&mvm->fw->ucode_capa,
656 				IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
657 		    fw_has_api(&mvm->fw->ucode_capa,
658 			       IWL_UCODE_TLV_API_LQ_SS_PARAMS))
659 			hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
660 				IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
661 	}
662 
663 	hw->wiphy->hw_version = mvm->trans->hw_id;
664 
665 	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
666 		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
667 	else
668 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
669 
670 	hw->wiphy->max_sched_scan_reqs = 1;
671 	hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
672 	hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
673 	/* we create the 802.11 header and zero length SSID IE. */
674 	hw->wiphy->max_sched_scan_ie_len =
675 		SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
676 	hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
677 	hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
678 
679 	/*
680 	 * the firmware uses u8 for num of iterations, but 0xff is saved for
681 	 * infinite loop, so the maximum number of iterations is actually 254.
682 	 */
683 	hw->wiphy->max_sched_scan_plan_iterations = 254;
684 
685 	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
686 			       NL80211_FEATURE_LOW_PRIORITY_SCAN |
687 			       NL80211_FEATURE_P2P_GO_OPPPS |
688 			       NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
689 			       NL80211_FEATURE_DYNAMIC_SMPS |
690 			       NL80211_FEATURE_STATIC_SMPS |
691 			       NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
692 
693 	if (fw_has_capa(&mvm->fw->ucode_capa,
694 			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
695 		hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
696 	if (fw_has_capa(&mvm->fw->ucode_capa,
697 			IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
698 		hw->wiphy->features |= NL80211_FEATURE_QUIET;
699 
700 	if (fw_has_capa(&mvm->fw->ucode_capa,
701 			IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
702 		hw->wiphy->features |=
703 			NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
704 
705 	if (fw_has_capa(&mvm->fw->ucode_capa,
706 			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
707 		hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
708 
709 	if (fw_has_api(&mvm->fw->ucode_capa,
710 		       IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
711 		wiphy_ext_feature_set(hw->wiphy,
712 				      NL80211_EXT_FEATURE_SCAN_START_TIME);
713 		wiphy_ext_feature_set(hw->wiphy,
714 				      NL80211_EXT_FEATURE_BSS_PARENT_TSF);
715 		wiphy_ext_feature_set(hw->wiphy,
716 				      NL80211_EXT_FEATURE_SET_SCAN_DWELL);
717 	}
718 
719 	if (iwl_mvm_is_oce_supported(mvm)) {
720 		wiphy_ext_feature_set(hw->wiphy,
721 			NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
722 		wiphy_ext_feature_set(hw->wiphy,
723 			NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
724 		wiphy_ext_feature_set(hw->wiphy,
725 			NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
726 		wiphy_ext_feature_set(hw->wiphy,
727 			NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
728 	}
729 
730 	if (mvm->nvm_data->sku_cap_11ax_enable &&
731 	    !iwlwifi_mod_params.disable_11ax) {
732 		hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa;
733 		hw->wiphy->num_iftype_ext_capab =
734 			ARRAY_SIZE(he_iftypes_ext_capa);
735 	}
736 
737 	mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
738 
739 #ifdef CONFIG_PM_SLEEP
740 	if (iwl_mvm_is_d0i3_supported(mvm) &&
741 	    device_can_wakeup(mvm->trans->dev)) {
742 		mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
743 		hw->wiphy->wowlan = &mvm->wowlan;
744 	}
745 
746 	if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
747 	    mvm->trans->ops->d3_suspend &&
748 	    mvm->trans->ops->d3_resume &&
749 	    device_can_wakeup(mvm->trans->dev)) {
750 		mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
751 				     WIPHY_WOWLAN_DISCONNECT |
752 				     WIPHY_WOWLAN_EAP_IDENTITY_REQ |
753 				     WIPHY_WOWLAN_RFKILL_RELEASE |
754 				     WIPHY_WOWLAN_NET_DETECT;
755 		if (!iwlwifi_mod_params.swcrypto)
756 			mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
757 					     WIPHY_WOWLAN_GTK_REKEY_FAILURE |
758 					     WIPHY_WOWLAN_4WAY_HANDSHAKE;
759 
760 		mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
761 		mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
762 		mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
763 		mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
764 		hw->wiphy->wowlan = &mvm->wowlan;
765 	}
766 #endif
767 
768 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
769 	/* assign default bcast filtering configuration */
770 	mvm->bcast_filters = iwl_mvm_default_bcast_filters;
771 #endif
772 
773 	ret = iwl_mvm_leds_init(mvm);
774 	if (ret)
775 		return ret;
776 
777 	if (fw_has_capa(&mvm->fw->ucode_capa,
778 			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
779 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
780 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
781 		ieee80211_hw_set(hw, TDLS_WIDER_BW);
782 	}
783 
784 	if (fw_has_capa(&mvm->fw->ucode_capa,
785 			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
786 		IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
787 		hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
788 	}
789 
790 	hw->netdev_features |= mvm->cfg->features;
791 	if (!iwl_mvm_is_csum_supported(mvm)) {
792 		hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
793 					 NETIF_F_RXCSUM);
794 		/* We may support SW TX CSUM */
795 		if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
796 			hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
797 	}
798 
799 	if (mvm->cfg->vht_mu_mimo_supported)
800 		wiphy_ext_feature_set(hw->wiphy,
801 				      NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
802 
803 	ret = ieee80211_register_hw(mvm->hw);
804 	if (ret) {
805 		iwl_mvm_leds_exit(mvm);
806 	}
807 
808 	return ret;
809 }
810 
811 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
812 			     struct ieee80211_sta *sta,
813 			     struct sk_buff *skb)
814 {
815 	struct iwl_mvm_sta *mvmsta;
816 	bool defer = false;
817 
818 	/*
819 	 * double check the IN_D0I3 flag both before and after
820 	 * taking the spinlock, in order to prevent taking
821 	 * the spinlock when not needed.
822 	 */
823 	if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
824 		return false;
825 
826 	spin_lock(&mvm->d0i3_tx_lock);
827 	/*
828 	 * testing the flag again ensures the skb dequeue
829 	 * loop (on d0i3 exit) hasn't run yet.
830 	 */
831 	if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
832 		goto out;
833 
834 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
835 	if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
836 	    mvmsta->sta_id != mvm->d0i3_ap_sta_id)
837 		goto out;
838 
839 	__skb_queue_tail(&mvm->d0i3_tx, skb);
840 
841 	/* trigger wakeup */
842 	iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
843 	iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
844 
845 	defer = true;
846 out:
847 	spin_unlock(&mvm->d0i3_tx_lock);
848 	return defer;
849 }
850 
851 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
852 			   struct ieee80211_tx_control *control,
853 			   struct sk_buff *skb)
854 {
855 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
856 	struct ieee80211_sta *sta = control->sta;
857 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
858 	struct ieee80211_hdr *hdr = (void *)skb->data;
859 	bool offchannel = IEEE80211_SKB_CB(skb)->flags &
860 		IEEE80211_TX_CTL_TX_OFFCHAN;
861 
862 	if (iwl_mvm_is_radio_killed(mvm)) {
863 		IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
864 		goto drop;
865 	}
866 
867 	if (offchannel &&
868 	    !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
869 	    !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
870 		goto drop;
871 
872 	/* treat non-bufferable MMPDUs on AP interfaces as broadcast */
873 	if ((info->control.vif->type == NL80211_IFTYPE_AP ||
874 	     info->control.vif->type == NL80211_IFTYPE_ADHOC) &&
875 	    ieee80211_is_mgmt(hdr->frame_control) &&
876 	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control))
877 		sta = NULL;
878 
879 	/* If there is no sta, and it's not offchannel - send through AP */
880 	if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
881 	    !offchannel) {
882 		struct iwl_mvm_vif *mvmvif =
883 			iwl_mvm_vif_from_mac80211(info->control.vif);
884 		u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
885 
886 		if (ap_sta_id < IWL_MVM_STATION_COUNT) {
887 			/* mac80211 holds rcu read lock */
888 			sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
889 			if (IS_ERR_OR_NULL(sta))
890 				goto drop;
891 		}
892 	}
893 
894 	if (sta) {
895 		if (iwl_mvm_defer_tx(mvm, sta, skb))
896 			return;
897 		if (iwl_mvm_tx_skb(mvm, skb, sta))
898 			goto drop;
899 		return;
900 	}
901 
902 	if (iwl_mvm_tx_skb_non_sta(mvm, skb))
903 		goto drop;
904 	return;
905  drop:
906 	ieee80211_free_txskb(hw, skb);
907 }
908 
909 void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
910 {
911 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
912 	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
913 	struct sk_buff *skb = NULL;
914 
915 	/*
916 	 * No need for threads to be pending here, they can leave the first
917 	 * taker all the work.
918 	 *
919 	 * mvmtxq->tx_request logic:
920 	 *
921 	 * If 0, no one is currently TXing, set to 1 to indicate current thread
922 	 * will now start TX and other threads should quit.
923 	 *
924 	 * If 1, another thread is currently TXing, set to 2 to indicate to
925 	 * that thread that there was another request. Since that request may
926 	 * have raced with the check whether the queue is empty, the TXing
927 	 * thread should check the queue's status one more time before leaving.
928 	 * This check is done in order to not leave any TX hanging in the queue
929 	 * until the next TX invocation (which may not even happen).
930 	 *
931 	 * If 2, another thread is currently TXing, and it will already double
932 	 * check the queue, so do nothing.
933 	 */
934 	if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
935 		return;
936 
937 	rcu_read_lock();
938 	do {
939 		while (likely(!mvmtxq->stopped &&
940 			      (mvm->trans->system_pm_mode ==
941 			       IWL_PLAT_PM_MODE_DISABLED))) {
942 			skb = ieee80211_tx_dequeue(hw, txq);
943 
944 			if (!skb) {
945 				if (txq->sta)
946 					IWL_DEBUG_TX(mvm,
947 						     "TXQ of sta %pM tid %d is now empty\n",
948 						     txq->sta->addr,
949 						     txq->tid);
950 				break;
951 			}
952 
953 			if (!txq->sta)
954 				iwl_mvm_tx_skb_non_sta(mvm, skb);
955 			else
956 				iwl_mvm_tx_skb(mvm, skb, txq->sta);
957 		}
958 	} while (atomic_dec_return(&mvmtxq->tx_request));
959 	rcu_read_unlock();
960 }
961 
962 static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
963 				      struct ieee80211_txq *txq)
964 {
965 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
966 	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
967 
968 	/*
969 	 * Please note that racing is handled very carefully here:
970 	 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
971 	 * deleted afterwards.
972 	 * This means that if:
973 	 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
974 	 *	queue is allocated and we can TX.
975 	 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
976 	 *	a race, should defer the frame.
977 	 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
978 	 *	need to allocate the queue and defer the frame.
979 	 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
980 	 *	queue is already scheduled for allocation, no need to allocate,
981 	 *	should defer the frame.
982 	 */
983 
984 	/* If the queue is allocated TX and return. */
985 	if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
986 		/*
987 		 * Check that list is empty to avoid a race where txq_id is
988 		 * already updated, but the queue allocation work wasn't
989 		 * finished
990 		 */
991 		if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
992 			return;
993 
994 		iwl_mvm_mac_itxq_xmit(hw, txq);
995 		return;
996 	}
997 
998 	/* The list is being deleted only after the queue is fully allocated. */
999 	if (!list_empty(&mvmtxq->list))
1000 		return;
1001 
1002 	list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
1003 	schedule_work(&mvm->add_stream_wk);
1004 }
1005 
1006 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)		\
1007 	do {								\
1008 		if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))		\
1009 			break;						\
1010 		iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt);	\
1011 	} while (0)
1012 
1013 static void
1014 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1015 			    struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
1016 			    enum ieee80211_ampdu_mlme_action action)
1017 {
1018 	struct iwl_fw_dbg_trigger_tlv *trig;
1019 	struct iwl_fw_dbg_trigger_ba *ba_trig;
1020 
1021 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
1022 				     FW_DBG_TRIGGER_BA);
1023 	if (!trig)
1024 		return;
1025 
1026 	ba_trig = (void *)trig->data;
1027 
1028 	switch (action) {
1029 	case IEEE80211_AMPDU_TX_OPERATIONAL: {
1030 		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1031 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1032 
1033 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
1034 				 "TX AGG START: MAC %pM tid %d ssn %d\n",
1035 				 sta->addr, tid, tid_data->ssn);
1036 		break;
1037 		}
1038 	case IEEE80211_AMPDU_TX_STOP_CONT:
1039 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
1040 				 "TX AGG STOP: MAC %pM tid %d\n",
1041 				 sta->addr, tid);
1042 		break;
1043 	case IEEE80211_AMPDU_RX_START:
1044 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
1045 				 "RX AGG START: MAC %pM tid %d ssn %d\n",
1046 				 sta->addr, tid, rx_ba_ssn);
1047 		break;
1048 	case IEEE80211_AMPDU_RX_STOP:
1049 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
1050 				 "RX AGG STOP: MAC %pM tid %d\n",
1051 				 sta->addr, tid);
1052 		break;
1053 	default:
1054 		break;
1055 	}
1056 }
1057 
1058 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
1059 				    struct ieee80211_vif *vif,
1060 				    struct ieee80211_ampdu_params *params)
1061 {
1062 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1063 	int ret;
1064 	bool tx_agg_ref = false;
1065 	struct ieee80211_sta *sta = params->sta;
1066 	enum ieee80211_ampdu_mlme_action action = params->action;
1067 	u16 tid = params->tid;
1068 	u16 *ssn = &params->ssn;
1069 	u16 buf_size = params->buf_size;
1070 	bool amsdu = params->amsdu;
1071 	u16 timeout = params->timeout;
1072 
1073 	IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
1074 		     sta->addr, tid, action);
1075 
1076 	if (!(mvm->nvm_data->sku_cap_11n_enable))
1077 		return -EACCES;
1078 
1079 	/* return from D0i3 before starting a new Tx aggregation */
1080 	switch (action) {
1081 	case IEEE80211_AMPDU_TX_START:
1082 	case IEEE80211_AMPDU_TX_STOP_CONT:
1083 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
1084 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1085 	case IEEE80211_AMPDU_TX_OPERATIONAL:
1086 		/*
1087 		 * for tx start, wait synchronously until D0i3 exit to
1088 		 * get the correct sequence number for the tid.
1089 		 * additionally, some other ampdu actions use direct
1090 		 * target access, which is not handled automatically
1091 		 * by the trans layer (unlike commands), so wait for
1092 		 * d0i3 exit in these cases as well.
1093 		 */
1094 		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
1095 		if (ret)
1096 			return ret;
1097 
1098 		tx_agg_ref = true;
1099 		break;
1100 	default:
1101 		break;
1102 	}
1103 
1104 	mutex_lock(&mvm->mutex);
1105 
1106 	switch (action) {
1107 	case IEEE80211_AMPDU_RX_START:
1108 		if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id ==
1109 				iwl_mvm_sta_from_mac80211(sta)->sta_id) {
1110 			struct iwl_mvm_vif *mvmvif;
1111 			u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
1112 			struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];
1113 
1114 			mdata->opened_rx_ba_sessions = true;
1115 			mvmvif = iwl_mvm_vif_from_mac80211(vif);
1116 			cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
1117 		}
1118 		if (!iwl_enable_rx_ampdu()) {
1119 			ret = -EINVAL;
1120 			break;
1121 		}
1122 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
1123 					 timeout);
1124 		break;
1125 	case IEEE80211_AMPDU_RX_STOP:
1126 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
1127 					 timeout);
1128 		break;
1129 	case IEEE80211_AMPDU_TX_START:
1130 		if (!iwl_enable_tx_ampdu()) {
1131 			ret = -EINVAL;
1132 			break;
1133 		}
1134 		ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
1135 		break;
1136 	case IEEE80211_AMPDU_TX_STOP_CONT:
1137 		ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
1138 		break;
1139 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
1140 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1141 		ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
1142 		break;
1143 	case IEEE80211_AMPDU_TX_OPERATIONAL:
1144 		ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
1145 					      buf_size, amsdu);
1146 		break;
1147 	default:
1148 		WARN_ON_ONCE(1);
1149 		ret = -EINVAL;
1150 		break;
1151 	}
1152 
1153 	if (!ret) {
1154 		u16 rx_ba_ssn = 0;
1155 
1156 		if (action == IEEE80211_AMPDU_RX_START)
1157 			rx_ba_ssn = *ssn;
1158 
1159 		iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
1160 					    rx_ba_ssn, action);
1161 	}
1162 	mutex_unlock(&mvm->mutex);
1163 
1164 	/*
1165 	 * If the tid is marked as started, we won't use it for offloaded
1166 	 * traffic on the next D0i3 entry. It's safe to unref.
1167 	 */
1168 	if (tx_agg_ref)
1169 		iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
1170 
1171 	return ret;
1172 }
1173 
1174 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
1175 				     struct ieee80211_vif *vif)
1176 {
1177 	struct iwl_mvm *mvm = data;
1178 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1179 
1180 	mvmvif->uploaded = false;
1181 	mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1182 
1183 	spin_lock_bh(&mvm->time_event_lock);
1184 	iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
1185 	spin_unlock_bh(&mvm->time_event_lock);
1186 
1187 	mvmvif->phy_ctxt = NULL;
1188 	memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
1189 	memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data));
1190 }
1191 
1192 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1193 {
1194 	/* clear the D3 reconfig, we only need it to avoid dumping a
1195 	 * firmware coredump on reconfiguration, we shouldn't do that
1196 	 * on D3->D0 transition
1197 	 */
1198 	if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1199 		mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
1200 		iwl_fw_error_dump(&mvm->fwrt);
1201 	}
1202 
1203 	/* cleanup all stale references (scan, roc), but keep the
1204 	 * ucode_down ref until reconfig is complete
1205 	 */
1206 	iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1207 
1208 	iwl_mvm_stop_device(mvm);
1209 
1210 	mvm->cur_aid = 0;
1211 
1212 	mvm->scan_status = 0;
1213 	mvm->ps_disabled = false;
1214 	mvm->calibrating = false;
1215 
1216 	/* just in case one was running */
1217 	iwl_mvm_cleanup_roc_te(mvm);
1218 	ieee80211_remain_on_channel_expired(mvm->hw);
1219 
1220 	iwl_mvm_ftm_restart(mvm);
1221 
1222 	/*
1223 	 * cleanup all interfaces, even inactive ones, as some might have
1224 	 * gone down during the HW restart
1225 	 */
1226 	ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1227 
1228 	mvm->p2p_device_vif = NULL;
1229 	mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1230 
1231 	iwl_mvm_reset_phy_ctxts(mvm);
1232 	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1233 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1234 	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1235 
1236 	ieee80211_wake_queues(mvm->hw);
1237 
1238 	/* clear any stale d0i3 state */
1239 	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1240 
1241 	mvm->vif_count = 0;
1242 	mvm->rx_ba_sessions = 0;
1243 	mvm->fwrt.dump.conf = FW_DBG_INVALID;
1244 	mvm->monitor_on = false;
1245 
1246 	/* keep statistics ticking */
1247 	iwl_mvm_accu_radio_stats(mvm);
1248 }
1249 
1250 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1251 {
1252 	int ret;
1253 
1254 	lockdep_assert_held(&mvm->mutex);
1255 
1256 	if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
1257 		/*
1258 		 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
1259 		 * so later code will - from now on - see that we're doing it.
1260 		 */
1261 		set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1262 		clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1263 		/* Clean up some internal and mac80211 state on restart */
1264 		iwl_mvm_restart_cleanup(mvm);
1265 	} else {
1266 		/* Hold the reference to prevent runtime suspend while
1267 		 * the start procedure runs.  It's a bit confusing
1268 		 * that the UCODE_DOWN reference is taken, but it just
1269 		 * means "UCODE is not UP yet". ( TODO: rename this
1270 		 * reference).
1271 		 */
1272 		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1273 	}
1274 	ret = iwl_mvm_up(mvm);
1275 
1276 	iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_POST_INIT);
1277 
1278 	if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1279 		/* Something went wrong - we need to finish some cleanup
1280 		 * that normally iwl_mvm_mac_restart_complete() below
1281 		 * would do.
1282 		 */
1283 		clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1284 #ifdef CONFIG_PM
1285 		iwl_mvm_d0i3_enable_tx(mvm, NULL);
1286 #endif
1287 	}
1288 
1289 	return ret;
1290 }
1291 
1292 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1293 {
1294 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1295 	int ret;
1296 
1297 	/* Some hw restart cleanups must not hold the mutex */
1298 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1299 		/*
1300 		 * Make sure we are out of d0i3. This is needed
1301 		 * to make sure the reference accounting is correct
1302 		 * (and there is no stale d0i3_exit_work).
1303 		 */
1304 		wait_event_timeout(mvm->d0i3_exit_waitq,
1305 				   !test_bit(IWL_MVM_STATUS_IN_D0I3,
1306 					     &mvm->status),
1307 				   HZ);
1308 	}
1309 
1310 	mutex_lock(&mvm->mutex);
1311 	ret = __iwl_mvm_mac_start(mvm);
1312 	mutex_unlock(&mvm->mutex);
1313 
1314 	return ret;
1315 }
1316 
1317 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1318 {
1319 	int ret;
1320 
1321 	mutex_lock(&mvm->mutex);
1322 
1323 	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1324 #ifdef CONFIG_PM
1325 	iwl_mvm_d0i3_enable_tx(mvm, NULL);
1326 #endif
1327 	ret = iwl_mvm_update_quotas(mvm, true, NULL);
1328 	if (ret)
1329 		IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1330 			ret);
1331 
1332 	/* allow transport/FW low power modes */
1333 	iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1334 
1335 	iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY);
1336 
1337 	/*
1338 	 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1339 	 * of packets the FW sent out, so we must reconnect.
1340 	 */
1341 	iwl_mvm_teardown_tdls_peers(mvm);
1342 
1343 	mutex_unlock(&mvm->mutex);
1344 }
1345 
1346 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1347 {
1348 	if (iwl_mvm_is_d0i3_supported(mvm) &&
1349 	    iwl_mvm_enter_d0i3_on_suspend(mvm))
1350 		WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1351 					      !test_bit(IWL_MVM_STATUS_IN_D0I3,
1352 							&mvm->status),
1353 					      HZ),
1354 			  "D0i3 exit on resume timed out\n");
1355 }
1356 
1357 static void
1358 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1359 			      enum ieee80211_reconfig_type reconfig_type)
1360 {
1361 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1362 
1363 	switch (reconfig_type) {
1364 	case IEEE80211_RECONFIG_TYPE_RESTART:
1365 		iwl_mvm_restart_complete(mvm);
1366 		break;
1367 	case IEEE80211_RECONFIG_TYPE_SUSPEND:
1368 		iwl_mvm_resume_complete(mvm);
1369 		break;
1370 	}
1371 }
1372 
1373 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1374 {
1375 	lockdep_assert_held(&mvm->mutex);
1376 
1377 	/* firmware counters are obviously reset now, but we shouldn't
1378 	 * partially track so also clear the fw_reset_accu counters.
1379 	 */
1380 	memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1381 
1382 	/* async_handlers_wk is now blocked */
1383 
1384 	/*
1385 	 * The work item could be running or queued if the
1386 	 * ROC time event stops just as we get here.
1387 	 */
1388 	flush_work(&mvm->roc_done_wk);
1389 
1390 	iwl_mvm_stop_device(mvm);
1391 
1392 	iwl_mvm_async_handlers_purge(mvm);
1393 	/* async_handlers_list is empty and will stay empty: HW is stopped */
1394 
1395 	/* the fw is stopped, the aux sta is dead: clean up driver state */
1396 	iwl_mvm_del_aux_sta(mvm);
1397 
1398 	/*
1399 	 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
1400 	 * hw (as restart_complete() won't be called in this case) and mac80211
1401 	 * won't execute the restart.
1402 	 * But make sure to cleanup interfaces that have gone down before/during
1403 	 * HW restart was requested.
1404 	 */
1405 	if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1406 	    test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
1407 			       &mvm->status))
1408 		ieee80211_iterate_interfaces(mvm->hw, 0,
1409 					     iwl_mvm_cleanup_iterator, mvm);
1410 
1411 	/* We shouldn't have any UIDs still set.  Loop over all the UIDs to
1412 	 * make sure there's nothing left there and warn if any is found.
1413 	 */
1414 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1415 		int i;
1416 
1417 		for (i = 0; i < mvm->max_scans; i++) {
1418 			if (WARN_ONCE(mvm->scan_uid_status[i],
1419 				      "UMAC scan UID %d status was not cleaned\n",
1420 				      i))
1421 				mvm->scan_uid_status[i] = 0;
1422 		}
1423 	}
1424 }
1425 
1426 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1427 {
1428 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1429 
1430 	flush_work(&mvm->d0i3_exit_work);
1431 	flush_work(&mvm->async_handlers_wk);
1432 	flush_work(&mvm->add_stream_wk);
1433 
1434 	/*
1435 	 * Lock and clear the firmware running bit here already, so that
1436 	 * new commands coming in elsewhere, e.g. from debugfs, will not
1437 	 * be able to proceed. This is important here because one of those
1438 	 * debugfs files causes the firmware dump to be triggered, and if we
1439 	 * don't stop debugfs accesses before canceling that it could be
1440 	 * retriggered after we flush it but before we've cleared the bit.
1441 	 */
1442 	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1443 
1444 	iwl_fw_cancel_dump(&mvm->fwrt);
1445 	cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
1446 	cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
1447 	iwl_fw_free_dump_desc(&mvm->fwrt);
1448 
1449 	mutex_lock(&mvm->mutex);
1450 	__iwl_mvm_mac_stop(mvm);
1451 	mutex_unlock(&mvm->mutex);
1452 
1453 	/*
1454 	 * The worker might have been waiting for the mutex, let it run and
1455 	 * discover that its list is now empty.
1456 	 */
1457 	cancel_work_sync(&mvm->async_handlers_wk);
1458 }
1459 
1460 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1461 {
1462 	u16 i;
1463 
1464 	lockdep_assert_held(&mvm->mutex);
1465 
1466 	for (i = 0; i < NUM_PHY_CTX; i++)
1467 		if (!mvm->phy_ctxts[i].ref)
1468 			return &mvm->phy_ctxts[i];
1469 
1470 	IWL_ERR(mvm, "No available PHY context\n");
1471 	return NULL;
1472 }
1473 
1474 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1475 				s16 tx_power)
1476 {
1477 	int len;
1478 	union {
1479 		struct iwl_dev_tx_power_cmd v5;
1480 		struct iwl_dev_tx_power_cmd_v4 v4;
1481 	} cmd = {
1482 		.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1483 		.v5.v3.mac_context_id =
1484 			cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1485 		.v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power),
1486 	};
1487 
1488 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1489 		cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1490 
1491 	if (fw_has_api(&mvm->fw->ucode_capa,
1492 		       IWL_UCODE_TLV_API_REDUCE_TX_POWER))
1493 		len = sizeof(cmd.v5);
1494 	else if (fw_has_capa(&mvm->fw->ucode_capa,
1495 			     IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1496 		len = sizeof(cmd.v4);
1497 	else
1498 		len = sizeof(cmd.v4.v3);
1499 
1500 	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1501 }
1502 
1503 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1504 				     struct ieee80211_vif *vif)
1505 {
1506 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1507 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1508 	int ret;
1509 
1510 	mvmvif->mvm = mvm;
1511 	RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
1512 
1513 	/*
1514 	 * make sure D0i3 exit is completed, otherwise a target access
1515 	 * during tx queue configuration could be done when still in
1516 	 * D0i3 state.
1517 	 */
1518 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1519 	if (ret)
1520 		return ret;
1521 
1522 	/*
1523 	 * Not much to do here. The stack will not allow interface
1524 	 * types or combinations that we didn't advertise, so we
1525 	 * don't really have to check the types.
1526 	 */
1527 
1528 	mutex_lock(&mvm->mutex);
1529 
1530 	/* make sure that beacon statistics don't go backwards with FW reset */
1531 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1532 		mvmvif->beacon_stats.accu_num_beacons +=
1533 			mvmvif->beacon_stats.num_beacons;
1534 
1535 	/* Allocate resources for the MAC context, and add it to the fw  */
1536 	ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1537 	if (ret)
1538 		goto out_unlock;
1539 
1540 	rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
1541 
1542 	/* Counting number of interfaces is needed for legacy PM */
1543 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1544 		mvm->vif_count++;
1545 
1546 	/*
1547 	 * The AP binding flow can be done only after the beacon
1548 	 * template is configured (which happens only in the mac80211
1549 	 * start_ap() flow), and adding the broadcast station can happen
1550 	 * only after the binding.
1551 	 * In addition, since modifying the MAC before adding a bcast
1552 	 * station is not allowed by the FW, delay the adding of MAC context to
1553 	 * the point where we can also add the bcast station.
1554 	 * In short: there's not much we can do at this point, other than
1555 	 * allocating resources :)
1556 	 */
1557 	if (vif->type == NL80211_IFTYPE_AP ||
1558 	    vif->type == NL80211_IFTYPE_ADHOC) {
1559 		ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1560 		if (ret) {
1561 			IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1562 			goto out_release;
1563 		}
1564 
1565 		/*
1566 		 * Only queue for this station is the mcast queue,
1567 		 * which shouldn't be in TFD mask anyway
1568 		 */
1569 		ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
1570 					       0, vif->type,
1571 					       IWL_STA_MULTICAST);
1572 		if (ret)
1573 			goto out_release;
1574 
1575 		iwl_mvm_vif_dbgfs_register(mvm, vif);
1576 		goto out_unlock;
1577 	}
1578 
1579 	mvmvif->features |= hw->netdev_features;
1580 
1581 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1582 	if (ret)
1583 		goto out_release;
1584 
1585 	ret = iwl_mvm_power_update_mac(mvm);
1586 	if (ret)
1587 		goto out_remove_mac;
1588 
1589 	/* beacon filtering */
1590 	ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1591 	if (ret)
1592 		goto out_remove_mac;
1593 
1594 	if (!mvm->bf_allowed_vif &&
1595 	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1596 		mvm->bf_allowed_vif = mvmvif;
1597 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1598 				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1599 	}
1600 
1601 	/*
1602 	 * P2P_DEVICE interface does not have a channel context assigned to it,
1603 	 * so a dedicated PHY context is allocated to it and the corresponding
1604 	 * MAC context is bound to it at this stage.
1605 	 */
1606 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1607 
1608 		mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1609 		if (!mvmvif->phy_ctxt) {
1610 			ret = -ENOSPC;
1611 			goto out_free_bf;
1612 		}
1613 
1614 		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1615 		ret = iwl_mvm_binding_add_vif(mvm, vif);
1616 		if (ret)
1617 			goto out_unref_phy;
1618 
1619 		ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
1620 		if (ret)
1621 			goto out_unbind;
1622 
1623 		/* Save a pointer to p2p device vif, so it can later be used to
1624 		 * update the p2p device MAC when a GO is started/stopped */
1625 		mvm->p2p_device_vif = vif;
1626 	}
1627 
1628 	iwl_mvm_tcm_add_vif(mvm, vif);
1629 
1630 	if (vif->type == NL80211_IFTYPE_MONITOR)
1631 		mvm->monitor_on = true;
1632 
1633 	iwl_mvm_vif_dbgfs_register(mvm, vif);
1634 	goto out_unlock;
1635 
1636  out_unbind:
1637 	iwl_mvm_binding_remove_vif(mvm, vif);
1638  out_unref_phy:
1639 	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1640  out_free_bf:
1641 	if (mvm->bf_allowed_vif == mvmvif) {
1642 		mvm->bf_allowed_vif = NULL;
1643 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1644 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1645 	}
1646  out_remove_mac:
1647 	mvmvif->phy_ctxt = NULL;
1648 	iwl_mvm_mac_ctxt_remove(mvm, vif);
1649  out_release:
1650 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1651 		mvm->vif_count--;
1652  out_unlock:
1653 	mutex_unlock(&mvm->mutex);
1654 
1655 	iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1656 
1657 	return ret;
1658 }
1659 
1660 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1661 					struct ieee80211_vif *vif)
1662 {
1663 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1664 		/*
1665 		 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1666 		 * We assume here that all the packets sent to the OFFCHANNEL
1667 		 * queue are sent in ROC session.
1668 		 */
1669 		flush_work(&mvm->roc_done_wk);
1670 	}
1671 }
1672 
1673 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1674 					 struct ieee80211_vif *vif)
1675 {
1676 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1677 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1678 	struct iwl_probe_resp_data *probe_data;
1679 
1680 	iwl_mvm_prepare_mac_removal(mvm, vif);
1681 
1682 	if (!(vif->type == NL80211_IFTYPE_AP ||
1683 	      vif->type == NL80211_IFTYPE_ADHOC))
1684 		iwl_mvm_tcm_rm_vif(mvm, vif);
1685 
1686 	mutex_lock(&mvm->mutex);
1687 
1688 	probe_data = rcu_dereference_protected(mvmvif->probe_resp_data,
1689 					       lockdep_is_held(&mvm->mutex));
1690 	RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
1691 	if (probe_data)
1692 		kfree_rcu(probe_data, rcu_head);
1693 
1694 	if (mvm->bf_allowed_vif == mvmvif) {
1695 		mvm->bf_allowed_vif = NULL;
1696 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1697 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1698 	}
1699 
1700 	if (vif->bss_conf.ftm_responder)
1701 		memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats));
1702 
1703 	iwl_mvm_vif_dbgfs_clean(mvm, vif);
1704 
1705 	/*
1706 	 * For AP/GO interface, the tear down of the resources allocated to the
1707 	 * interface is be handled as part of the stop_ap flow.
1708 	 */
1709 	if (vif->type == NL80211_IFTYPE_AP ||
1710 	    vif->type == NL80211_IFTYPE_ADHOC) {
1711 #ifdef CONFIG_NL80211_TESTMODE
1712 		if (vif == mvm->noa_vif) {
1713 			mvm->noa_vif = NULL;
1714 			mvm->noa_duration = 0;
1715 		}
1716 #endif
1717 		iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
1718 		iwl_mvm_dealloc_bcast_sta(mvm, vif);
1719 		goto out_release;
1720 	}
1721 
1722 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1723 		mvm->p2p_device_vif = NULL;
1724 		iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
1725 		iwl_mvm_binding_remove_vif(mvm, vif);
1726 		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1727 		mvmvif->phy_ctxt = NULL;
1728 	}
1729 
1730 	if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1731 		mvm->vif_count--;
1732 
1733 	iwl_mvm_power_update_mac(mvm);
1734 	iwl_mvm_mac_ctxt_remove(mvm, vif);
1735 
1736 	RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
1737 
1738 	if (vif->type == NL80211_IFTYPE_MONITOR)
1739 		mvm->monitor_on = false;
1740 
1741 out_release:
1742 	mutex_unlock(&mvm->mutex);
1743 }
1744 
1745 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1746 {
1747 	return 0;
1748 }
1749 
1750 struct iwl_mvm_mc_iter_data {
1751 	struct iwl_mvm *mvm;
1752 	int port_id;
1753 };
1754 
1755 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1756 				      struct ieee80211_vif *vif)
1757 {
1758 	struct iwl_mvm_mc_iter_data *data = _data;
1759 	struct iwl_mvm *mvm = data->mvm;
1760 	struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1761 	struct iwl_host_cmd hcmd = {
1762 		.id = MCAST_FILTER_CMD,
1763 		.flags = CMD_ASYNC,
1764 		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1765 	};
1766 	int ret, len;
1767 
1768 	/* if we don't have free ports, mcast frames will be dropped */
1769 	if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1770 		return;
1771 
1772 	if (vif->type != NL80211_IFTYPE_STATION ||
1773 	    !vif->bss_conf.assoc)
1774 		return;
1775 
1776 	cmd->port_id = data->port_id++;
1777 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1778 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1779 
1780 	hcmd.len[0] = len;
1781 	hcmd.data[0] = cmd;
1782 
1783 	ret = iwl_mvm_send_cmd(mvm, &hcmd);
1784 	if (ret)
1785 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1786 }
1787 
1788 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1789 {
1790 	struct iwl_mvm_mc_iter_data iter_data = {
1791 		.mvm = mvm,
1792 	};
1793 
1794 	lockdep_assert_held(&mvm->mutex);
1795 
1796 	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1797 		return;
1798 
1799 	ieee80211_iterate_active_interfaces_atomic(
1800 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1801 		iwl_mvm_mc_iface_iterator, &iter_data);
1802 }
1803 
1804 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1805 				     struct netdev_hw_addr_list *mc_list)
1806 {
1807 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1808 	struct iwl_mcast_filter_cmd *cmd;
1809 	struct netdev_hw_addr *addr;
1810 	int addr_count;
1811 	bool pass_all;
1812 	int len;
1813 
1814 	addr_count = netdev_hw_addr_list_count(mc_list);
1815 	pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1816 		   IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1817 	if (pass_all)
1818 		addr_count = 0;
1819 
1820 	len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1821 	cmd = kzalloc(len, GFP_ATOMIC);
1822 	if (!cmd)
1823 		return 0;
1824 
1825 	if (pass_all) {
1826 		cmd->pass_all = 1;
1827 		return (u64)(unsigned long)cmd;
1828 	}
1829 
1830 	netdev_hw_addr_list_for_each(addr, mc_list) {
1831 		IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1832 				   cmd->count, addr->addr);
1833 		memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1834 		       addr->addr, ETH_ALEN);
1835 		cmd->count++;
1836 	}
1837 
1838 	return (u64)(unsigned long)cmd;
1839 }
1840 
1841 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1842 				     unsigned int changed_flags,
1843 				     unsigned int *total_flags,
1844 				     u64 multicast)
1845 {
1846 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1847 	struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1848 
1849 	mutex_lock(&mvm->mutex);
1850 
1851 	/* replace previous configuration */
1852 	kfree(mvm->mcast_filter_cmd);
1853 	mvm->mcast_filter_cmd = cmd;
1854 
1855 	if (!cmd)
1856 		goto out;
1857 
1858 	if (changed_flags & FIF_ALLMULTI)
1859 		cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
1860 
1861 	if (cmd->pass_all)
1862 		cmd->count = 0;
1863 
1864 	iwl_mvm_recalc_multicast(mvm);
1865 out:
1866 	mutex_unlock(&mvm->mutex);
1867 	*total_flags = 0;
1868 }
1869 
1870 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1871 					struct ieee80211_vif *vif,
1872 					unsigned int filter_flags,
1873 					unsigned int changed_flags)
1874 {
1875 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1876 
1877 	/* We support only filter for probe requests */
1878 	if (!(changed_flags & FIF_PROBE_REQ))
1879 		return;
1880 
1881 	/* Supported only for p2p client interfaces */
1882 	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1883 	    !vif->p2p)
1884 		return;
1885 
1886 	mutex_lock(&mvm->mutex);
1887 	iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1888 	mutex_unlock(&mvm->mutex);
1889 }
1890 
1891 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1892 struct iwl_bcast_iter_data {
1893 	struct iwl_mvm *mvm;
1894 	struct iwl_bcast_filter_cmd *cmd;
1895 	u8 current_filter;
1896 };
1897 
1898 static void
1899 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1900 			 const struct iwl_fw_bcast_filter *in_filter,
1901 			 struct iwl_fw_bcast_filter *out_filter)
1902 {
1903 	struct iwl_fw_bcast_filter_attr *attr;
1904 	int i;
1905 
1906 	memcpy(out_filter, in_filter, sizeof(*out_filter));
1907 
1908 	for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1909 		attr = &out_filter->attrs[i];
1910 
1911 		if (!attr->mask)
1912 			break;
1913 
1914 		switch (attr->reserved1) {
1915 		case cpu_to_le16(BC_FILTER_MAGIC_IP):
1916 			if (vif->bss_conf.arp_addr_cnt != 1) {
1917 				attr->mask = 0;
1918 				continue;
1919 			}
1920 
1921 			attr->val = vif->bss_conf.arp_addr_list[0];
1922 			break;
1923 		case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1924 			attr->val = *(__be32 *)&vif->addr[2];
1925 			break;
1926 		default:
1927 			break;
1928 		}
1929 		attr->reserved1 = 0;
1930 		out_filter->num_attrs++;
1931 	}
1932 }
1933 
1934 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1935 					  struct ieee80211_vif *vif)
1936 {
1937 	struct iwl_bcast_iter_data *data = _data;
1938 	struct iwl_mvm *mvm = data->mvm;
1939 	struct iwl_bcast_filter_cmd *cmd = data->cmd;
1940 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1941 	struct iwl_fw_bcast_mac *bcast_mac;
1942 	int i;
1943 
1944 	if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1945 		return;
1946 
1947 	bcast_mac = &cmd->macs[mvmvif->id];
1948 
1949 	/*
1950 	 * enable filtering only for associated stations, but not for P2P
1951 	 * Clients
1952 	 */
1953 	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1954 	    !vif->bss_conf.assoc)
1955 		return;
1956 
1957 	bcast_mac->default_discard = 1;
1958 
1959 	/* copy all configured filters */
1960 	for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1961 		/*
1962 		 * Make sure we don't exceed our filters limit.
1963 		 * if there is still a valid filter to be configured,
1964 		 * be on the safe side and just allow bcast for this mac.
1965 		 */
1966 		if (WARN_ON_ONCE(data->current_filter >=
1967 				 ARRAY_SIZE(cmd->filters))) {
1968 			bcast_mac->default_discard = 0;
1969 			bcast_mac->attached_filters = 0;
1970 			break;
1971 		}
1972 
1973 		iwl_mvm_set_bcast_filter(vif,
1974 					 &mvm->bcast_filters[i],
1975 					 &cmd->filters[data->current_filter]);
1976 
1977 		/* skip current filter if it contains no attributes */
1978 		if (!cmd->filters[data->current_filter].num_attrs)
1979 			continue;
1980 
1981 		/* attach the filter to current mac */
1982 		bcast_mac->attached_filters |=
1983 				cpu_to_le16(BIT(data->current_filter));
1984 
1985 		data->current_filter++;
1986 	}
1987 }
1988 
1989 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1990 				    struct iwl_bcast_filter_cmd *cmd)
1991 {
1992 	struct iwl_bcast_iter_data iter_data = {
1993 		.mvm = mvm,
1994 		.cmd = cmd,
1995 	};
1996 
1997 	if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1998 		return false;
1999 
2000 	memset(cmd, 0, sizeof(*cmd));
2001 	cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
2002 	cmd->max_macs = ARRAY_SIZE(cmd->macs);
2003 
2004 #ifdef CONFIG_IWLWIFI_DEBUGFS
2005 	/* use debugfs filters/macs if override is configured */
2006 	if (mvm->dbgfs_bcast_filtering.override) {
2007 		memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
2008 		       sizeof(cmd->filters));
2009 		memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
2010 		       sizeof(cmd->macs));
2011 		return true;
2012 	}
2013 #endif
2014 
2015 	/* if no filters are configured, do nothing */
2016 	if (!mvm->bcast_filters)
2017 		return false;
2018 
2019 	/* configure and attach these filters for each associated sta vif */
2020 	ieee80211_iterate_active_interfaces(
2021 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2022 		iwl_mvm_bcast_filter_iterator, &iter_data);
2023 
2024 	return true;
2025 }
2026 
2027 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
2028 {
2029 	struct iwl_bcast_filter_cmd cmd;
2030 
2031 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2032 		return 0;
2033 
2034 	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2035 		return 0;
2036 
2037 	return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
2038 				    sizeof(cmd), &cmd);
2039 }
2040 #else
2041 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
2042 {
2043 	return 0;
2044 }
2045 #endif
2046 
2047 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
2048 				    struct ieee80211_vif *vif)
2049 {
2050 	struct iwl_mu_group_mgmt_cmd cmd = {};
2051 
2052 	memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
2053 	       WLAN_MEMBERSHIP_LEN);
2054 	memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
2055 	       WLAN_USER_POSITION_LEN);
2056 
2057 	return iwl_mvm_send_cmd_pdu(mvm,
2058 				    WIDE_ID(DATA_PATH_GROUP,
2059 					    UPDATE_MU_GROUPS_CMD),
2060 				    0, sizeof(cmd), &cmd);
2061 }
2062 
2063 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
2064 					   struct ieee80211_vif *vif)
2065 {
2066 	if (vif->mu_mimo_owner) {
2067 		struct iwl_mu_group_mgmt_notif *notif = _data;
2068 
2069 		/*
2070 		 * MU-MIMO Group Id action frame is little endian. We treat
2071 		 * the data received from firmware as if it came from the
2072 		 * action frame, so no conversion is needed.
2073 		 */
2074 		ieee80211_update_mu_groups(vif,
2075 					   (u8 *)&notif->membership_status,
2076 					   (u8 *)&notif->user_position);
2077 	}
2078 }
2079 
2080 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
2081 			       struct iwl_rx_cmd_buffer *rxb)
2082 {
2083 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
2084 	struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
2085 
2086 	ieee80211_iterate_active_interfaces_atomic(
2087 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2088 			iwl_mvm_mu_mimo_iface_iterator, notif);
2089 }
2090 
2091 static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
2092 {
2093 	u8 byte_num = ppe_pos_bit / 8;
2094 	u8 bit_num = ppe_pos_bit % 8;
2095 	u8 residue_bits;
2096 	u8 res;
2097 
2098 	if (bit_num <= 5)
2099 		return (ppe[byte_num] >> bit_num) &
2100 		       (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
2101 
2102 	/*
2103 	 * If bit_num > 5, we have to combine bits with next byte.
2104 	 * Calculate how many bits we need to take from current byte (called
2105 	 * here "residue_bits"), and add them to bits from next byte.
2106 	 */
2107 
2108 	residue_bits = 8 - bit_num;
2109 
2110 	res = (ppe[byte_num + 1] &
2111 	       (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
2112 	      residue_bits;
2113 	res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
2114 
2115 	return res;
2116 }
2117 
2118 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
2119 			       struct ieee80211_vif *vif, u8 sta_id)
2120 {
2121 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2122 	struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
2123 		.sta_id = sta_id,
2124 		.tid_limit = IWL_MAX_TID_COUNT,
2125 		.bss_color = vif->bss_conf.bss_color,
2126 		.htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
2127 		.frame_time_rts_th =
2128 			cpu_to_le16(vif->bss_conf.frame_time_rts_th),
2129 	};
2130 	struct ieee80211_sta *sta;
2131 	u32 flags;
2132 	int i;
2133 
2134 	rcu_read_lock();
2135 
2136 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
2137 	if (IS_ERR(sta)) {
2138 		rcu_read_unlock();
2139 		WARN(1, "Can't find STA to configure HE\n");
2140 		return;
2141 	}
2142 
2143 	if (!sta->he_cap.has_he) {
2144 		rcu_read_unlock();
2145 		return;
2146 	}
2147 
2148 	flags = 0;
2149 
2150 	/* HTC flags */
2151 	if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
2152 	    IEEE80211_HE_MAC_CAP0_HTC_HE)
2153 		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
2154 	if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
2155 	      IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
2156 	    (sta->he_cap.he_cap_elem.mac_cap_info[2] &
2157 	      IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
2158 		u8 link_adap =
2159 			((sta->he_cap.he_cap_elem.mac_cap_info[2] &
2160 			  IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
2161 			 (sta->he_cap.he_cap_elem.mac_cap_info[1] &
2162 			  IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
2163 
2164 		if (link_adap == 2)
2165 			sta_ctxt_cmd.htc_flags |=
2166 				cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
2167 		else if (link_adap == 3)
2168 			sta_ctxt_cmd.htc_flags |=
2169 				cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
2170 	}
2171 	if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
2172 		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
2173 	if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
2174 	    IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
2175 		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
2176 	if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
2177 		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
2178 
2179 	/*
2180 	 * Initialize the PPE thresholds to "None" (7), as described in Table
2181 	 * 9-262ac of 80211.ax/D3.0.
2182 	 */
2183 	memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
2184 
2185 	/* If PPE Thresholds exist, parse them into a FW-familiar format. */
2186 	if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
2187 	    IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
2188 		u8 nss = (sta->he_cap.ppe_thres[0] &
2189 			  IEEE80211_PPE_THRES_NSS_MASK) + 1;
2190 		u8 ru_index_bitmap =
2191 			(sta->he_cap.ppe_thres[0] &
2192 			 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
2193 			IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
2194 		u8 *ppe = &sta->he_cap.ppe_thres[0];
2195 		u8 ppe_pos_bit = 7; /* Starting after PPE header */
2196 
2197 		/*
2198 		 * FW currently supports only nss == MAX_HE_SUPP_NSS
2199 		 *
2200 		 * If nss > MAX: we can ignore values we don't support
2201 		 * If nss < MAX: we can set zeros in other streams
2202 		 */
2203 		if (nss > MAX_HE_SUPP_NSS) {
2204 			IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
2205 				 MAX_HE_SUPP_NSS);
2206 			nss = MAX_HE_SUPP_NSS;
2207 		}
2208 
2209 		for (i = 0; i < nss; i++) {
2210 			u8 ru_index_tmp = ru_index_bitmap << 1;
2211 			u8 bw;
2212 
2213 			for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
2214 				ru_index_tmp >>= 1;
2215 				if (!(ru_index_tmp & 1))
2216 					continue;
2217 
2218 				sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
2219 					iwl_mvm_he_get_ppe_val(ppe,
2220 							       ppe_pos_bit);
2221 				ppe_pos_bit +=
2222 					IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2223 				sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
2224 					iwl_mvm_he_get_ppe_val(ppe,
2225 							       ppe_pos_bit);
2226 				ppe_pos_bit +=
2227 					IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2228 			}
2229 		}
2230 
2231 		flags |= STA_CTXT_HE_PACKET_EXT;
2232 	} else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] &
2233 		    IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) !=
2234 		  IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) {
2235 		int low_th = -1;
2236 		int high_th = -1;
2237 
2238 		/* Take the PPE thresholds from the nominal padding info */
2239 		switch (sta->he_cap.he_cap_elem.phy_cap_info[9] &
2240 			IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) {
2241 		case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US:
2242 			low_th = IWL_HE_PKT_EXT_NONE;
2243 			high_th = IWL_HE_PKT_EXT_NONE;
2244 			break;
2245 		case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US:
2246 			low_th = IWL_HE_PKT_EXT_BPSK;
2247 			high_th = IWL_HE_PKT_EXT_NONE;
2248 			break;
2249 		case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US:
2250 			low_th = IWL_HE_PKT_EXT_NONE;
2251 			high_th = IWL_HE_PKT_EXT_BPSK;
2252 			break;
2253 		}
2254 
2255 		/* Set the PPE thresholds accordingly */
2256 		if (low_th >= 0 && high_th >= 0) {
2257 			u8 ***pkt_ext_qam =
2258 				(void *)sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th;
2259 
2260 			for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
2261 				u8 bw;
2262 
2263 				for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
2264 				     bw++) {
2265 					pkt_ext_qam[i][bw][0] = low_th;
2266 					pkt_ext_qam[i][bw][1] = high_th;
2267 				}
2268 			}
2269 
2270 			flags |= STA_CTXT_HE_PACKET_EXT;
2271 		}
2272 	}
2273 	rcu_read_unlock();
2274 
2275 	/* Mark MU EDCA as enabled, unless none detected on some AC */
2276 	flags |= STA_CTXT_HE_MU_EDCA_CW;
2277 	for (i = 0; i < AC_NUM; i++) {
2278 		struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
2279 			&mvmvif->queue_params[i].mu_edca_param_rec;
2280 
2281 		if (!mvmvif->queue_params[i].mu_edca) {
2282 			flags &= ~STA_CTXT_HE_MU_EDCA_CW;
2283 			break;
2284 		}
2285 
2286 		sta_ctxt_cmd.trig_based_txf[i].cwmin =
2287 			cpu_to_le16(mu_edca->ecw_min_max & 0xf);
2288 		sta_ctxt_cmd.trig_based_txf[i].cwmax =
2289 			cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
2290 		sta_ctxt_cmd.trig_based_txf[i].aifsn =
2291 			cpu_to_le16(mu_edca->aifsn);
2292 		sta_ctxt_cmd.trig_based_txf[i].mu_time =
2293 			cpu_to_le16(mu_edca->mu_edca_timer);
2294 	}
2295 
2296 	if (vif->bss_conf.multi_sta_back_32bit)
2297 		flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
2298 
2299 	if (vif->bss_conf.ack_enabled)
2300 		flags |= STA_CTXT_HE_ACK_ENABLED;
2301 
2302 	if (vif->bss_conf.uora_exists) {
2303 		flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
2304 
2305 		sta_ctxt_cmd.rand_alloc_ecwmin =
2306 			vif->bss_conf.uora_ocw_range & 0x7;
2307 		sta_ctxt_cmd.rand_alloc_ecwmax =
2308 			(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
2309 	}
2310 
2311 	/* TODO: support Multi BSSID IE */
2312 
2313 	sta_ctxt_cmd.flags = cpu_to_le32(flags);
2314 
2315 	if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
2316 						 DATA_PATH_GROUP, 0),
2317 				 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
2318 		IWL_ERR(mvm, "Failed to config FW to work HE!\n");
2319 }
2320 
2321 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2322 					     struct ieee80211_vif *vif,
2323 					     struct ieee80211_bss_conf *bss_conf,
2324 					     u32 changes)
2325 {
2326 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2327 	int ret;
2328 
2329 	/*
2330 	 * Re-calculate the tsf id, as the master-slave relations depend on the
2331 	 * beacon interval, which was not known when the station interface was
2332 	 * added.
2333 	 */
2334 	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
2335 		if (vif->bss_conf.he_support &&
2336 		    !iwlwifi_mod_params.disable_11ax)
2337 			iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
2338 
2339 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2340 	}
2341 
2342 	/* Update MU EDCA params */
2343 	if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
2344 	    bss_conf->assoc && vif->bss_conf.he_support &&
2345 	    !iwlwifi_mod_params.disable_11ax)
2346 		iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
2347 
2348 	/*
2349 	 * If we're not associated yet, take the (new) BSSID before associating
2350 	 * so the firmware knows. If we're already associated, then use the old
2351 	 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2352 	 * branch for disassociation below.
2353 	 */
2354 	if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2355 		memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2356 
2357 	ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2358 	if (ret)
2359 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2360 
2361 	/* after sending it once, adopt mac80211 data */
2362 	memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2363 	mvmvif->associated = bss_conf->assoc;
2364 
2365 	if (changes & BSS_CHANGED_ASSOC) {
2366 		if (bss_conf->assoc) {
2367 			/* clear statistics to get clean beacon counter */
2368 			iwl_mvm_request_statistics(mvm, true);
2369 			memset(&mvmvif->beacon_stats, 0,
2370 			       sizeof(mvmvif->beacon_stats));
2371 
2372 			/* add quota for this interface */
2373 			ret = iwl_mvm_update_quotas(mvm, true, NULL);
2374 			if (ret) {
2375 				IWL_ERR(mvm, "failed to update quotas\n");
2376 				return;
2377 			}
2378 
2379 			if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2380 				     &mvm->status)) {
2381 				/*
2382 				 * If we're restarting then the firmware will
2383 				 * obviously have lost synchronisation with
2384 				 * the AP. It will attempt to synchronise by
2385 				 * itself, but we can make it more reliable by
2386 				 * scheduling a session protection time event.
2387 				 *
2388 				 * The firmware needs to receive a beacon to
2389 				 * catch up with synchronisation, use 110% of
2390 				 * the beacon interval.
2391 				 *
2392 				 * Set a large maximum delay to allow for more
2393 				 * than a single interface.
2394 				 */
2395 				u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2396 				iwl_mvm_protect_session(mvm, vif, dur, dur,
2397 							5 * dur, false);
2398 			}
2399 
2400 			iwl_mvm_sf_update(mvm, vif, false);
2401 			iwl_mvm_power_vif_assoc(mvm, vif);
2402 			if (vif->p2p) {
2403 				iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2404 				iwl_mvm_update_smps(mvm, vif,
2405 						    IWL_MVM_SMPS_REQ_PROT,
2406 						    IEEE80211_SMPS_DYNAMIC);
2407 			}
2408 		} else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
2409 			/*
2410 			 * If update fails - SF might be running in associated
2411 			 * mode while disassociated - which is forbidden.
2412 			 */
2413 			ret = iwl_mvm_sf_update(mvm, vif, false);
2414 			WARN_ONCE(ret &&
2415 				  !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
2416 					    &mvm->status),
2417 				  "Failed to update SF upon disassociation\n");
2418 
2419 			/*
2420 			 * If we get an assert during the connection (after the
2421 			 * station has been added, but before the vif is set
2422 			 * to associated), mac80211 will re-add the station and
2423 			 * then configure the vif. Since the vif is not
2424 			 * associated, we would remove the station here and
2425 			 * this would fail the recovery.
2426 			 */
2427 			if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2428 				      &mvm->status)) {
2429 				/*
2430 				 * Remove AP station now that
2431 				 * the MAC is unassoc
2432 				 */
2433 				ret = iwl_mvm_rm_sta_id(mvm, vif,
2434 							mvmvif->ap_sta_id);
2435 				if (ret)
2436 					IWL_ERR(mvm,
2437 						"failed to remove AP station\n");
2438 
2439 				if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2440 					mvm->d0i3_ap_sta_id =
2441 						IWL_MVM_INVALID_STA;
2442 				mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2443 			}
2444 
2445 			/* remove quota for this interface */
2446 			ret = iwl_mvm_update_quotas(mvm, false, NULL);
2447 			if (ret)
2448 				IWL_ERR(mvm, "failed to update quotas\n");
2449 
2450 			if (vif->p2p)
2451 				iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2452 
2453 			/* this will take the cleared BSSID from bss_conf */
2454 			ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2455 			if (ret)
2456 				IWL_ERR(mvm,
2457 					"failed to update MAC %pM (clear after unassoc)\n",
2458 					vif->addr);
2459 		}
2460 
2461 		/*
2462 		 * The firmware tracks the MU-MIMO group on its own.
2463 		 * However, on HW restart we should restore this data.
2464 		 */
2465 		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2466 		    (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
2467 			ret = iwl_mvm_update_mu_groups(mvm, vif);
2468 			if (ret)
2469 				IWL_ERR(mvm,
2470 					"failed to update VHT MU_MIMO groups\n");
2471 		}
2472 
2473 		iwl_mvm_recalc_multicast(mvm);
2474 		iwl_mvm_configure_bcast_filter(mvm);
2475 
2476 		/* reset rssi values */
2477 		mvmvif->bf_data.ave_beacon_signal = 0;
2478 
2479 		iwl_mvm_bt_coex_vif_change(mvm);
2480 		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2481 				    IEEE80211_SMPS_AUTOMATIC);
2482 		if (fw_has_capa(&mvm->fw->ucode_capa,
2483 				IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2484 			iwl_mvm_config_scan(mvm);
2485 	}
2486 
2487 	if (changes & BSS_CHANGED_BEACON_INFO) {
2488 		/*
2489 		 * We received a beacon from the associated AP so
2490 		 * remove the session protection.
2491 		 */
2492 		iwl_mvm_stop_session_protection(mvm, vif);
2493 
2494 		iwl_mvm_sf_update(mvm, vif, false);
2495 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2496 	}
2497 
2498 	if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
2499 		       /*
2500 			* Send power command on every beacon change,
2501 			* because we may have not enabled beacon abort yet.
2502 			*/
2503 		       BSS_CHANGED_BEACON_INFO)) {
2504 		ret = iwl_mvm_power_update_mac(mvm);
2505 		if (ret)
2506 			IWL_ERR(mvm, "failed to update power mode\n");
2507 	}
2508 
2509 	if (changes & BSS_CHANGED_TXPOWER) {
2510 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2511 				bss_conf->txpower);
2512 		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2513 	}
2514 
2515 	if (changes & BSS_CHANGED_CQM) {
2516 		IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2517 		/* reset cqm events tracking */
2518 		mvmvif->bf_data.last_cqm_event = 0;
2519 		if (mvmvif->bf_data.bf_enabled) {
2520 			ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2521 			if (ret)
2522 				IWL_ERR(mvm,
2523 					"failed to update CQM thresholds\n");
2524 		}
2525 	}
2526 
2527 	if (changes & BSS_CHANGED_ARP_FILTER) {
2528 		IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2529 		iwl_mvm_configure_bcast_filter(mvm);
2530 	}
2531 }
2532 
2533 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2534 				 struct ieee80211_vif *vif)
2535 {
2536 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2537 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2538 	int ret;
2539 
2540 	/*
2541 	 * iwl_mvm_mac_ctxt_add() might read directly from the device
2542 	 * (the system time), so make sure it is available.
2543 	 */
2544 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2545 	if (ret)
2546 		return ret;
2547 
2548 	mutex_lock(&mvm->mutex);
2549 
2550 	/* Send the beacon template */
2551 	ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2552 	if (ret)
2553 		goto out_unlock;
2554 
2555 	/*
2556 	 * Re-calculate the tsf id, as the master-slave relations depend on the
2557 	 * beacon interval, which was not known when the AP interface was added.
2558 	 */
2559 	if (vif->type == NL80211_IFTYPE_AP)
2560 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2561 
2562 	mvmvif->ap_assoc_sta_count = 0;
2563 
2564 	/* Add the mac context */
2565 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2566 	if (ret)
2567 		goto out_unlock;
2568 
2569 	/* Perform the binding */
2570 	ret = iwl_mvm_binding_add_vif(mvm, vif);
2571 	if (ret)
2572 		goto out_remove;
2573 
2574 	/*
2575 	 * This is not very nice, but the simplest:
2576 	 * For older FWs adding the mcast sta before the bcast station may
2577 	 * cause assert 0x2b00.
2578 	 * This is fixed in later FW so make the order of removal depend on
2579 	 * the TLV
2580 	 */
2581 	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2582 		ret = iwl_mvm_add_mcast_sta(mvm, vif);
2583 		if (ret)
2584 			goto out_unbind;
2585 		/*
2586 		 * Send the bcast station. At this stage the TBTT and DTIM time
2587 		 * events are added and applied to the scheduler
2588 		 */
2589 		ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2590 		if (ret) {
2591 			iwl_mvm_rm_mcast_sta(mvm, vif);
2592 			goto out_unbind;
2593 		}
2594 	} else {
2595 		/*
2596 		 * Send the bcast station. At this stage the TBTT and DTIM time
2597 		 * events are added and applied to the scheduler
2598 		 */
2599 		ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2600 		if (ret)
2601 			goto out_unbind;
2602 		ret = iwl_mvm_add_mcast_sta(mvm, vif);
2603 		if (ret) {
2604 			iwl_mvm_send_rm_bcast_sta(mvm, vif);
2605 			goto out_unbind;
2606 		}
2607 	}
2608 
2609 	/* must be set before quota calculations */
2610 	mvmvif->ap_ibss_active = true;
2611 
2612 	if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
2613 		iwl_mvm_vif_set_low_latency(mvmvif, true,
2614 					    LOW_LATENCY_VIF_TYPE);
2615 		iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id);
2616 	}
2617 
2618 	/* power updated needs to be done before quotas */
2619 	iwl_mvm_power_update_mac(mvm);
2620 
2621 	ret = iwl_mvm_update_quotas(mvm, false, NULL);
2622 	if (ret)
2623 		goto out_quota_failed;
2624 
2625 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2626 	if (vif->p2p && mvm->p2p_device_vif)
2627 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2628 
2629 	iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2630 
2631 	iwl_mvm_bt_coex_vif_change(mvm);
2632 
2633 	/* we don't support TDLS during DCM */
2634 	if (iwl_mvm_phy_ctx_count(mvm) > 1)
2635 		iwl_mvm_teardown_tdls_peers(mvm);
2636 
2637 	iwl_mvm_ftm_restart_responder(mvm, vif);
2638 
2639 	goto out_unlock;
2640 
2641 out_quota_failed:
2642 	iwl_mvm_power_update_mac(mvm);
2643 	mvmvif->ap_ibss_active = false;
2644 	iwl_mvm_send_rm_bcast_sta(mvm, vif);
2645 	iwl_mvm_rm_mcast_sta(mvm, vif);
2646 out_unbind:
2647 	iwl_mvm_binding_remove_vif(mvm, vif);
2648 out_remove:
2649 	iwl_mvm_mac_ctxt_remove(mvm, vif);
2650 out_unlock:
2651 	mutex_unlock(&mvm->mutex);
2652 	iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2653 	return ret;
2654 }
2655 
2656 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2657 				 struct ieee80211_vif *vif)
2658 {
2659 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2660 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2661 
2662 	iwl_mvm_prepare_mac_removal(mvm, vif);
2663 
2664 	mutex_lock(&mvm->mutex);
2665 
2666 	/* Handle AP stop while in CSA */
2667 	if (rcu_access_pointer(mvm->csa_vif) == vif) {
2668 		iwl_mvm_remove_time_event(mvm, mvmvif,
2669 					  &mvmvif->time_event_data);
2670 		RCU_INIT_POINTER(mvm->csa_vif, NULL);
2671 		mvmvif->csa_countdown = false;
2672 	}
2673 
2674 	if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2675 		RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2676 		mvm->csa_tx_block_bcn_timeout = 0;
2677 	}
2678 
2679 	mvmvif->ap_ibss_active = false;
2680 	mvm->ap_last_beacon_gp2 = 0;
2681 
2682 	if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
2683 		iwl_mvm_vif_set_low_latency(mvmvif, false,
2684 					    LOW_LATENCY_VIF_TYPE);
2685 		iwl_mvm_send_low_latency_cmd(mvm, false,  mvmvif->id);
2686 	}
2687 
2688 	iwl_mvm_bt_coex_vif_change(mvm);
2689 
2690 	iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2691 
2692 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2693 	if (vif->p2p && mvm->p2p_device_vif)
2694 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2695 
2696 	iwl_mvm_update_quotas(mvm, false, NULL);
2697 
2698 	/*
2699 	 * This is not very nice, but the simplest:
2700 	 * For older FWs removing the mcast sta before the bcast station may
2701 	 * cause assert 0x2b00.
2702 	 * This is fixed in later FW (which will stop beaconing when removing
2703 	 * bcast station).
2704 	 * So make the order of removal depend on the TLV
2705 	 */
2706 	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
2707 		iwl_mvm_rm_mcast_sta(mvm, vif);
2708 	iwl_mvm_send_rm_bcast_sta(mvm, vif);
2709 	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
2710 		iwl_mvm_rm_mcast_sta(mvm, vif);
2711 	iwl_mvm_binding_remove_vif(mvm, vif);
2712 
2713 	iwl_mvm_power_update_mac(mvm);
2714 
2715 	iwl_mvm_mac_ctxt_remove(mvm, vif);
2716 
2717 	mutex_unlock(&mvm->mutex);
2718 }
2719 
2720 static void
2721 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2722 				 struct ieee80211_vif *vif,
2723 				 struct ieee80211_bss_conf *bss_conf,
2724 				 u32 changes)
2725 {
2726 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2727 
2728 	/* Changes will be applied when the AP/IBSS is started */
2729 	if (!mvmvif->ap_ibss_active)
2730 		return;
2731 
2732 	if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2733 		       BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2734 	    iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2735 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2736 
2737 	/* Need to send a new beacon template to the FW */
2738 	if (changes & BSS_CHANGED_BEACON &&
2739 	    iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2740 		IWL_WARN(mvm, "Failed updating beacon data\n");
2741 
2742 	if (changes & BSS_CHANGED_TXPOWER) {
2743 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2744 				bss_conf->txpower);
2745 		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2746 	}
2747 
2748 	if (changes & BSS_CHANGED_FTM_RESPONDER) {
2749 		int ret = iwl_mvm_ftm_start_responder(mvm, vif);
2750 
2751 		if (ret)
2752 			IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n",
2753 				 ret);
2754 	}
2755 
2756 }
2757 
2758 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2759 				     struct ieee80211_vif *vif,
2760 				     struct ieee80211_bss_conf *bss_conf,
2761 				     u32 changes)
2762 {
2763 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2764 
2765 	/*
2766 	 * iwl_mvm_bss_info_changed_station() might call
2767 	 * iwl_mvm_protect_session(), which reads directly from
2768 	 * the device (the system time), so make sure it is available.
2769 	 */
2770 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2771 		return;
2772 
2773 	mutex_lock(&mvm->mutex);
2774 
2775 	if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2776 		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2777 
2778 	switch (vif->type) {
2779 	case NL80211_IFTYPE_STATION:
2780 		iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2781 		break;
2782 	case NL80211_IFTYPE_AP:
2783 	case NL80211_IFTYPE_ADHOC:
2784 		iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2785 		break;
2786 	case NL80211_IFTYPE_MONITOR:
2787 		if (changes & BSS_CHANGED_MU_GROUPS)
2788 			iwl_mvm_update_mu_groups(mvm, vif);
2789 		break;
2790 	default:
2791 		/* shouldn't happen */
2792 		WARN_ON_ONCE(1);
2793 	}
2794 
2795 	mutex_unlock(&mvm->mutex);
2796 	iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2797 }
2798 
2799 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2800 			       struct ieee80211_vif *vif,
2801 			       struct ieee80211_scan_request *hw_req)
2802 {
2803 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2804 	int ret;
2805 
2806 	if (hw_req->req.n_channels == 0 ||
2807 	    hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2808 		return -EINVAL;
2809 
2810 	mutex_lock(&mvm->mutex);
2811 	ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2812 	mutex_unlock(&mvm->mutex);
2813 
2814 	return ret;
2815 }
2816 
2817 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2818 				       struct ieee80211_vif *vif)
2819 {
2820 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2821 
2822 	mutex_lock(&mvm->mutex);
2823 
2824 	/* Due to a race condition, it's possible that mac80211 asks
2825 	 * us to stop a hw_scan when it's already stopped.  This can
2826 	 * happen, for instance, if we stopped the scan ourselves,
2827 	 * called ieee80211_scan_completed() and the userspace called
2828 	 * cancel scan scan before ieee80211_scan_work() could run.
2829 	 * To handle that, simply return if the scan is not running.
2830 	*/
2831 	if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2832 		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2833 
2834 	mutex_unlock(&mvm->mutex);
2835 }
2836 
2837 static void
2838 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2839 				  struct ieee80211_sta *sta, u16 tids,
2840 				  int num_frames,
2841 				  enum ieee80211_frame_release_type reason,
2842 				  bool more_data)
2843 {
2844 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2845 
2846 	/* Called when we need to transmit (a) frame(s) from mac80211 */
2847 
2848 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2849 					  tids, more_data, false);
2850 }
2851 
2852 static void
2853 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2854 				    struct ieee80211_sta *sta, u16 tids,
2855 				    int num_frames,
2856 				    enum ieee80211_frame_release_type reason,
2857 				    bool more_data)
2858 {
2859 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2860 
2861 	/* Called when we need to transmit (a) frame(s) from agg or dqa queue */
2862 
2863 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2864 					  tids, more_data, true);
2865 }
2866 
2867 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2868 				     enum sta_notify_cmd cmd,
2869 				     struct ieee80211_sta *sta)
2870 {
2871 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2872 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2873 	unsigned long txqs = 0, tids = 0;
2874 	int tid;
2875 
2876 	/*
2877 	 * If we have TVQM then we get too high queue numbers - luckily
2878 	 * we really shouldn't get here with that because such hardware
2879 	 * should have firmware supporting buffer station offload.
2880 	 */
2881 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
2882 		return;
2883 
2884 	spin_lock_bh(&mvmsta->lock);
2885 	for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) {
2886 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2887 
2888 		if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
2889 			continue;
2890 
2891 		__set_bit(tid_data->txq_id, &txqs);
2892 
2893 		if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
2894 			continue;
2895 
2896 		__set_bit(tid, &tids);
2897 	}
2898 
2899 	switch (cmd) {
2900 	case STA_NOTIFY_SLEEP:
2901 		for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2902 			ieee80211_sta_set_buffered(sta, tid, true);
2903 
2904 		if (txqs)
2905 			iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2906 		/*
2907 		 * The fw updates the STA to be asleep. Tx packets on the Tx
2908 		 * queues to this station will not be transmitted. The fw will
2909 		 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2910 		 */
2911 		break;
2912 	case STA_NOTIFY_AWAKE:
2913 		if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
2914 			break;
2915 
2916 		if (txqs)
2917 			iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2918 		iwl_mvm_sta_modify_ps_wake(mvm, sta);
2919 		break;
2920 	default:
2921 		break;
2922 	}
2923 	spin_unlock_bh(&mvmsta->lock);
2924 }
2925 
2926 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2927 				   struct ieee80211_vif *vif,
2928 				   enum sta_notify_cmd cmd,
2929 				   struct ieee80211_sta *sta)
2930 {
2931 	__iwl_mvm_mac_sta_notify(hw, cmd, sta);
2932 }
2933 
2934 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2935 {
2936 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
2937 	struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
2938 	struct ieee80211_sta *sta;
2939 	struct iwl_mvm_sta *mvmsta;
2940 	bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
2941 
2942 	if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
2943 		return;
2944 
2945 	rcu_read_lock();
2946 	sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
2947 	if (WARN_ON(IS_ERR_OR_NULL(sta))) {
2948 		rcu_read_unlock();
2949 		return;
2950 	}
2951 
2952 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
2953 
2954 	if (!mvmsta->vif ||
2955 	    mvmsta->vif->type != NL80211_IFTYPE_AP) {
2956 		rcu_read_unlock();
2957 		return;
2958 	}
2959 
2960 	if (mvmsta->sleeping != sleeping) {
2961 		mvmsta->sleeping = sleeping;
2962 		__iwl_mvm_mac_sta_notify(mvm->hw,
2963 			sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
2964 			sta);
2965 		ieee80211_sta_ps_transition(sta, sleeping);
2966 	}
2967 
2968 	if (sleeping) {
2969 		switch (notif->type) {
2970 		case IWL_MVM_PM_EVENT_AWAKE:
2971 		case IWL_MVM_PM_EVENT_ASLEEP:
2972 			break;
2973 		case IWL_MVM_PM_EVENT_UAPSD:
2974 			ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
2975 			break;
2976 		case IWL_MVM_PM_EVENT_PS_POLL:
2977 			ieee80211_sta_pspoll(sta);
2978 			break;
2979 		default:
2980 			break;
2981 		}
2982 	}
2983 
2984 	rcu_read_unlock();
2985 }
2986 
2987 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2988 				       struct ieee80211_vif *vif,
2989 				       struct ieee80211_sta *sta)
2990 {
2991 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2992 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2993 
2994 	/*
2995 	 * This is called before mac80211 does RCU synchronisation,
2996 	 * so here we already invalidate our internal RCU-protected
2997 	 * station pointer. The rest of the code will thus no longer
2998 	 * be able to find the station this way, and we don't rely
2999 	 * on further RCU synchronisation after the sta_state()
3000 	 * callback deleted the station.
3001 	 */
3002 	mutex_lock(&mvm->mutex);
3003 	if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
3004 		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
3005 				   ERR_PTR(-ENOENT));
3006 
3007 	mutex_unlock(&mvm->mutex);
3008 }
3009 
3010 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3011 				const u8 *bssid)
3012 {
3013 	int i;
3014 
3015 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3016 		struct iwl_mvm_tcm_mac *mdata;
3017 
3018 		mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id];
3019 		ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
3020 		mdata->opened_rx_ba_sessions = false;
3021 	}
3022 
3023 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
3024 		return;
3025 
3026 	if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
3027 		vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
3028 		return;
3029 	}
3030 
3031 	if (!vif->p2p &&
3032 	    (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
3033 		vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
3034 		return;
3035 	}
3036 
3037 	for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
3038 		if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) {
3039 			vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
3040 			return;
3041 		}
3042 	}
3043 
3044 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
3045 }
3046 
3047 static void
3048 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
3049 			   struct ieee80211_vif *vif, u8 *peer_addr,
3050 			   enum nl80211_tdls_operation action)
3051 {
3052 	struct iwl_fw_dbg_trigger_tlv *trig;
3053 	struct iwl_fw_dbg_trigger_tdls *tdls_trig;
3054 
3055 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
3056 				     FW_DBG_TRIGGER_TDLS);
3057 	if (!trig)
3058 		return;
3059 
3060 	tdls_trig = (void *)trig->data;
3061 
3062 	if (!(tdls_trig->action_bitmap & BIT(action)))
3063 		return;
3064 
3065 	if (tdls_trig->peer_mode &&
3066 	    memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
3067 		return;
3068 
3069 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
3070 				"TDLS event occurred, peer %pM, action %d",
3071 				peer_addr, action);
3072 }
3073 
3074 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
3075 				 struct ieee80211_vif *vif,
3076 				 struct ieee80211_sta *sta,
3077 				 enum ieee80211_sta_state old_state,
3078 				 enum ieee80211_sta_state new_state)
3079 {
3080 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3081 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3082 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3083 	int ret;
3084 
3085 	IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
3086 			   sta->addr, old_state, new_state);
3087 
3088 	/* this would be a mac80211 bug ... but don't crash */
3089 	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
3090 		return -EINVAL;
3091 
3092 	/*
3093 	 * If we are in a STA removal flow and in DQA mode:
3094 	 *
3095 	 * This is after the sync_rcu part, so the queues have already been
3096 	 * flushed. No more TXs on their way in mac80211's path, and no more in
3097 	 * the queues.
3098 	 * Also, we won't be getting any new TX frames for this station.
3099 	 * What we might have are deferred TX frames that need to be taken care
3100 	 * of.
3101 	 *
3102 	 * Drop any still-queued deferred-frame before removing the STA, and
3103 	 * make sure the worker is no longer handling frames for this STA.
3104 	 */
3105 	if (old_state == IEEE80211_STA_NONE &&
3106 	    new_state == IEEE80211_STA_NOTEXIST) {
3107 		flush_work(&mvm->add_stream_wk);
3108 
3109 		/*
3110 		 * No need to make sure deferred TX indication is off since the
3111 		 * worker will already remove it if it was on
3112 		 */
3113 	}
3114 
3115 	mutex_lock(&mvm->mutex);
3116 	/* track whether or not the station is associated */
3117 	mvm_sta->sta_state = new_state;
3118 
3119 	if (old_state == IEEE80211_STA_NOTEXIST &&
3120 	    new_state == IEEE80211_STA_NONE) {
3121 		/*
3122 		 * Firmware bug - it'll crash if the beacon interval is less
3123 		 * than 16. We can't avoid connecting at all, so refuse the
3124 		 * station state change, this will cause mac80211 to abandon
3125 		 * attempts to connect to this AP, and eventually wpa_s will
3126 		 * blacklist the AP...
3127 		 */
3128 		if (vif->type == NL80211_IFTYPE_STATION &&
3129 		    vif->bss_conf.beacon_int < 16) {
3130 			IWL_ERR(mvm,
3131 				"AP %pM beacon interval is %d, refusing due to firmware bug!\n",
3132 				sta->addr, vif->bss_conf.beacon_int);
3133 			ret = -EINVAL;
3134 			goto out_unlock;
3135 		}
3136 
3137 		if (sta->tdls &&
3138 		    (vif->p2p ||
3139 		     iwl_mvm_tdls_sta_count(mvm, NULL) ==
3140 						IWL_MVM_TDLS_STA_COUNT ||
3141 		     iwl_mvm_phy_ctx_count(mvm) > 1)) {
3142 			IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
3143 			ret = -EBUSY;
3144 			goto out_unlock;
3145 		}
3146 
3147 		ret = iwl_mvm_add_sta(mvm, vif, sta);
3148 		if (sta->tdls && ret == 0) {
3149 			iwl_mvm_recalc_tdls_state(mvm, vif, true);
3150 			iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
3151 						   NL80211_TDLS_SETUP);
3152 		}
3153 
3154 		sta->max_rc_amsdu_len = 1;
3155 	} else if (old_state == IEEE80211_STA_NONE &&
3156 		   new_state == IEEE80211_STA_AUTH) {
3157 		/*
3158 		 * EBS may be disabled due to previous failures reported by FW.
3159 		 * Reset EBS status here assuming environment has been changed.
3160 		 */
3161 		mvm->last_ebs_successful = true;
3162 		iwl_mvm_check_uapsd(mvm, vif, sta->addr);
3163 		ret = 0;
3164 	} else if (old_state == IEEE80211_STA_AUTH &&
3165 		   new_state == IEEE80211_STA_ASSOC) {
3166 		if (vif->type == NL80211_IFTYPE_AP) {
3167 			vif->bss_conf.he_support = sta->he_cap.has_he;
3168 			mvmvif->ap_assoc_sta_count++;
3169 			iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3170 			if (vif->bss_conf.he_support &&
3171 			    !iwlwifi_mod_params.disable_11ax)
3172 				iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id);
3173 		} else if (vif->type == NL80211_IFTYPE_STATION) {
3174 			vif->bss_conf.he_support = sta->he_cap.has_he;
3175 			iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3176 		}
3177 
3178 		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3179 				     false);
3180 		ret = iwl_mvm_update_sta(mvm, vif, sta);
3181 	} else if (old_state == IEEE80211_STA_ASSOC &&
3182 		   new_state == IEEE80211_STA_AUTHORIZED) {
3183 		ret = 0;
3184 
3185 		/* we don't support TDLS during DCM */
3186 		if (iwl_mvm_phy_ctx_count(mvm) > 1)
3187 			iwl_mvm_teardown_tdls_peers(mvm);
3188 
3189 		if (sta->tdls)
3190 			iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
3191 						   NL80211_TDLS_ENABLE_LINK);
3192 
3193 		/* enable beacon filtering */
3194 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
3195 
3196 		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3197 				     true);
3198 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
3199 		   new_state == IEEE80211_STA_ASSOC) {
3200 		/* disable beacon filtering */
3201 		ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3202 		WARN_ON(ret &&
3203 			!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
3204 				  &mvm->status));
3205 		ret = 0;
3206 	} else if (old_state == IEEE80211_STA_ASSOC &&
3207 		   new_state == IEEE80211_STA_AUTH) {
3208 		if (vif->type == NL80211_IFTYPE_AP) {
3209 			mvmvif->ap_assoc_sta_count--;
3210 			iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3211 		}
3212 		ret = 0;
3213 	} else if (old_state == IEEE80211_STA_AUTH &&
3214 		   new_state == IEEE80211_STA_NONE) {
3215 		ret = 0;
3216 	} else if (old_state == IEEE80211_STA_NONE &&
3217 		   new_state == IEEE80211_STA_NOTEXIST) {
3218 		ret = iwl_mvm_rm_sta(mvm, vif, sta);
3219 		if (sta->tdls) {
3220 			iwl_mvm_recalc_tdls_state(mvm, vif, false);
3221 			iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
3222 						   NL80211_TDLS_DISABLE_LINK);
3223 		}
3224 
3225 		if (unlikely(ret &&
3226 			     test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
3227 				      &mvm->status)))
3228 			ret = 0;
3229 	} else {
3230 		ret = -EIO;
3231 	}
3232  out_unlock:
3233 	mutex_unlock(&mvm->mutex);
3234 
3235 	if (sta->tdls && ret == 0) {
3236 		if (old_state == IEEE80211_STA_NOTEXIST &&
3237 		    new_state == IEEE80211_STA_NONE)
3238 			ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
3239 		else if (old_state == IEEE80211_STA_NONE &&
3240 			 new_state == IEEE80211_STA_NOTEXIST)
3241 			ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
3242 	}
3243 
3244 	return ret;
3245 }
3246 
3247 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3248 {
3249 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3250 
3251 	mvm->rts_threshold = value;
3252 
3253 	return 0;
3254 }
3255 
3256 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
3257 				  struct ieee80211_vif *vif,
3258 				  struct ieee80211_sta *sta, u32 changed)
3259 {
3260 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3261 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3262 
3263 	if (changed & (IEEE80211_RC_BW_CHANGED |
3264 		       IEEE80211_RC_SUPP_RATES_CHANGED |
3265 		       IEEE80211_RC_NSS_CHANGED))
3266 		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3267 				     true);
3268 
3269 	if (vif->type == NL80211_IFTYPE_STATION &&
3270 	    changed & IEEE80211_RC_NSS_CHANGED)
3271 		iwl_mvm_sf_update(mvm, vif, false);
3272 }
3273 
3274 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
3275 			       struct ieee80211_vif *vif, u16 ac,
3276 			       const struct ieee80211_tx_queue_params *params)
3277 {
3278 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3279 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3280 
3281 	mvmvif->queue_params[ac] = *params;
3282 
3283 	/*
3284 	 * No need to update right away, we'll get BSS_CHANGED_QOS
3285 	 * The exception is P2P_DEVICE interface which needs immediate update.
3286 	 */
3287 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
3288 		int ret;
3289 
3290 		mutex_lock(&mvm->mutex);
3291 		ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3292 		mutex_unlock(&mvm->mutex);
3293 		return ret;
3294 	}
3295 	return 0;
3296 }
3297 
3298 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
3299 				       struct ieee80211_vif *vif,
3300 				       u16 req_duration)
3301 {
3302 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3303 	u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3304 	u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
3305 
3306 	/*
3307 	 * iwl_mvm_protect_session() reads directly from the device
3308 	 * (the system time), so make sure it is available.
3309 	 */
3310 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
3311 		return;
3312 
3313 	if (req_duration > duration)
3314 		duration = req_duration;
3315 
3316 	mutex_lock(&mvm->mutex);
3317 	/* Try really hard to protect the session and hear a beacon */
3318 	iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
3319 	mutex_unlock(&mvm->mutex);
3320 
3321 	iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
3322 }
3323 
3324 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
3325 					struct ieee80211_vif *vif,
3326 					struct cfg80211_sched_scan_request *req,
3327 					struct ieee80211_scan_ies *ies)
3328 {
3329 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3330 
3331 	int ret;
3332 
3333 	mutex_lock(&mvm->mutex);
3334 
3335 	if (!vif->bss_conf.idle) {
3336 		ret = -EBUSY;
3337 		goto out;
3338 	}
3339 
3340 	ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
3341 
3342 out:
3343 	mutex_unlock(&mvm->mutex);
3344 	return ret;
3345 }
3346 
3347 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
3348 				       struct ieee80211_vif *vif)
3349 {
3350 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3351 	int ret;
3352 
3353 	mutex_lock(&mvm->mutex);
3354 
3355 	/* Due to a race condition, it's possible that mac80211 asks
3356 	 * us to stop a sched_scan when it's already stopped.  This
3357 	 * can happen, for instance, if we stopped the scan ourselves,
3358 	 * called ieee80211_sched_scan_stopped() and the userspace called
3359 	 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
3360 	 * could run.  To handle this, simply return if the scan is
3361 	 * not running.
3362 	*/
3363 	if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
3364 		mutex_unlock(&mvm->mutex);
3365 		return 0;
3366 	}
3367 
3368 	ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
3369 	mutex_unlock(&mvm->mutex);
3370 	iwl_mvm_wait_for_async_handlers(mvm);
3371 
3372 	return ret;
3373 }
3374 
3375 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3376 			       enum set_key_cmd cmd,
3377 			       struct ieee80211_vif *vif,
3378 			       struct ieee80211_sta *sta,
3379 			       struct ieee80211_key_conf *key)
3380 {
3381 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3382 	struct iwl_mvm_sta *mvmsta;
3383 	struct iwl_mvm_key_pn *ptk_pn;
3384 	int keyidx = key->keyidx;
3385 	int ret;
3386 	u8 key_offset;
3387 
3388 	if (iwlwifi_mod_params.swcrypto) {
3389 		IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
3390 		return -EOPNOTSUPP;
3391 	}
3392 
3393 	switch (key->cipher) {
3394 	case WLAN_CIPHER_SUITE_TKIP:
3395 		if (!mvm->trans->cfg->gen2) {
3396 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3397 			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3398 		} else if (vif->type == NL80211_IFTYPE_STATION) {
3399 			key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE;
3400 		} else {
3401 			IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n");
3402 			return -EOPNOTSUPP;
3403 		}
3404 		break;
3405 	case WLAN_CIPHER_SUITE_CCMP:
3406 	case WLAN_CIPHER_SUITE_GCMP:
3407 	case WLAN_CIPHER_SUITE_GCMP_256:
3408 		if (!iwl_mvm_has_new_tx_api(mvm))
3409 			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3410 		break;
3411 	case WLAN_CIPHER_SUITE_AES_CMAC:
3412 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3413 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3414 		WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
3415 		break;
3416 	case WLAN_CIPHER_SUITE_WEP40:
3417 	case WLAN_CIPHER_SUITE_WEP104:
3418 		if (vif->type == NL80211_IFTYPE_STATION)
3419 			break;
3420 		if (iwl_mvm_has_new_tx_api(mvm))
3421 			return -EOPNOTSUPP;
3422 		/* support HW crypto on TX */
3423 		return 0;
3424 	default:
3425 		/* currently FW supports only one optional cipher scheme */
3426 		if (hw->n_cipher_schemes &&
3427 		    hw->cipher_schemes->cipher == key->cipher)
3428 			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3429 		else
3430 			return -EOPNOTSUPP;
3431 	}
3432 
3433 	mutex_lock(&mvm->mutex);
3434 
3435 	switch (cmd) {
3436 	case SET_KEY:
3437 		if ((vif->type == NL80211_IFTYPE_ADHOC ||
3438 		     vif->type == NL80211_IFTYPE_AP) && !sta) {
3439 			/*
3440 			 * GTK on AP interface is a TX-only key, return 0;
3441 			 * on IBSS they're per-station and because we're lazy
3442 			 * we don't support them for RX, so do the same.
3443 			 * CMAC/GMAC in AP/IBSS modes must be done in software.
3444 			 */
3445 			if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3446 			    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3447 			    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3448 				ret = -EOPNOTSUPP;
3449 			else
3450 				ret = 0;
3451 
3452 			if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
3453 			    key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
3454 			    !iwl_mvm_has_new_tx_api(mvm)) {
3455 				key->hw_key_idx = STA_KEY_IDX_INVALID;
3456 				break;
3457 			}
3458 		}
3459 
3460 		/* During FW restart, in order to restore the state as it was,
3461 		 * don't try to reprogram keys we previously failed for.
3462 		 */
3463 		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3464 		    key->hw_key_idx == STA_KEY_IDX_INVALID) {
3465 			IWL_DEBUG_MAC80211(mvm,
3466 					   "skip invalid idx key programming during restart\n");
3467 			ret = 0;
3468 			break;
3469 		}
3470 
3471 		if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3472 		    sta && iwl_mvm_has_new_rx_api(mvm) &&
3473 		    key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
3474 		    (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
3475 		     key->cipher == WLAN_CIPHER_SUITE_GCMP ||
3476 		     key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
3477 			struct ieee80211_key_seq seq;
3478 			int tid, q;
3479 
3480 			mvmsta = iwl_mvm_sta_from_mac80211(sta);
3481 			WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
3482 			ptk_pn = kzalloc(struct_size(ptk_pn, q,
3483 						     mvm->trans->num_rx_queues),
3484 					 GFP_KERNEL);
3485 			if (!ptk_pn) {
3486 				ret = -ENOMEM;
3487 				break;
3488 			}
3489 
3490 			for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
3491 				ieee80211_get_key_rx_seq(key, tid, &seq);
3492 				for (q = 0; q < mvm->trans->num_rx_queues; q++)
3493 					memcpy(ptk_pn->q[q].pn[tid],
3494 					       seq.ccmp.pn,
3495 					       IEEE80211_CCMP_PN_LEN);
3496 			}
3497 
3498 			rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
3499 		}
3500 
3501 		/* in HW restart reuse the index, otherwise request a new one */
3502 		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3503 			key_offset = key->hw_key_idx;
3504 		else
3505 			key_offset = STA_KEY_IDX_INVALID;
3506 
3507 		IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3508 		ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3509 		if (ret) {
3510 			IWL_WARN(mvm, "set key failed\n");
3511 			key->hw_key_idx = STA_KEY_IDX_INVALID;
3512 			/*
3513 			 * can't add key for RX, but we don't need it
3514 			 * in the device for TX so still return 0,
3515 			 * unless we have new TX API where we cannot
3516 			 * put key material into the TX_CMD
3517 			 */
3518 			if (iwl_mvm_has_new_tx_api(mvm))
3519 				ret = -EOPNOTSUPP;
3520 			else
3521 				ret = 0;
3522 		}
3523 
3524 		break;
3525 	case DISABLE_KEY:
3526 		if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3527 			ret = 0;
3528 			break;
3529 		}
3530 
3531 		if (sta && iwl_mvm_has_new_rx_api(mvm) &&
3532 		    key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
3533 		    (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
3534 		     key->cipher == WLAN_CIPHER_SUITE_GCMP ||
3535 		     key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
3536 			mvmsta = iwl_mvm_sta_from_mac80211(sta);
3537 			ptk_pn = rcu_dereference_protected(
3538 						mvmsta->ptk_pn[keyidx],
3539 						lockdep_is_held(&mvm->mutex));
3540 			RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
3541 			if (ptk_pn)
3542 				kfree_rcu(ptk_pn, rcu_head);
3543 		}
3544 
3545 		IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3546 		ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3547 		break;
3548 	default:
3549 		ret = -EINVAL;
3550 	}
3551 
3552 	mutex_unlock(&mvm->mutex);
3553 	return ret;
3554 }
3555 
3556 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3557 					struct ieee80211_vif *vif,
3558 					struct ieee80211_key_conf *keyconf,
3559 					struct ieee80211_sta *sta,
3560 					u32 iv32, u16 *phase1key)
3561 {
3562 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3563 
3564 	if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3565 		return;
3566 
3567 	iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3568 }
3569 
3570 
3571 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3572 			       struct iwl_rx_packet *pkt, void *data)
3573 {
3574 	struct iwl_mvm *mvm =
3575 		container_of(notif_wait, struct iwl_mvm, notif_wait);
3576 	struct iwl_hs20_roc_res *resp;
3577 	int resp_len = iwl_rx_packet_payload_len(pkt);
3578 	struct iwl_mvm_time_event_data *te_data = data;
3579 
3580 	if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3581 		return true;
3582 
3583 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3584 		IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3585 		return true;
3586 	}
3587 
3588 	resp = (void *)pkt->data;
3589 
3590 	IWL_DEBUG_TE(mvm,
3591 		     "Aux ROC: Received response from ucode: status=%d uid=%d\n",
3592 		     resp->status, resp->event_unique_id);
3593 
3594 	te_data->uid = le32_to_cpu(resp->event_unique_id);
3595 	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3596 		     te_data->uid);
3597 
3598 	spin_lock_bh(&mvm->time_event_lock);
3599 	list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3600 	spin_unlock_bh(&mvm->time_event_lock);
3601 
3602 	return true;
3603 }
3604 
3605 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
3606 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
3607 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
3608 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
3609 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
3610 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3611 				    struct ieee80211_channel *channel,
3612 				    struct ieee80211_vif *vif,
3613 				    int duration)
3614 {
3615 	int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3616 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3617 	struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3618 	static const u16 time_event_response[] = { HOT_SPOT_CMD };
3619 	struct iwl_notification_wait wait_time_event;
3620 	u32 dtim_interval = vif->bss_conf.dtim_period *
3621 		vif->bss_conf.beacon_int;
3622 	u32 req_dur, delay;
3623 	struct iwl_hs20_roc_req aux_roc_req = {
3624 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3625 		.id_and_color =
3626 			cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3627 		.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3628 	};
3629 	struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm,
3630 		&aux_roc_req.channel_info);
3631 	u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm);
3632 
3633 	/* Set the channel info data */
3634 	iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
3635 			      (channel->band == NL80211_BAND_2GHZ) ?
3636 			       PHY_BAND_24 : PHY_BAND_5,
3637 			      PHY_VHT_CHANNEL_MODE20,
3638 			      0);
3639 
3640 	/* Set the time and duration */
3641 	tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg));
3642 
3643 	delay = AUX_ROC_MIN_DELAY;
3644 	req_dur = MSEC_TO_TU(duration);
3645 
3646 	/*
3647 	 * If we are associated we want the delay time to be at least one
3648 	 * dtim interval so that the FW can wait until after the DTIM and
3649 	 * then start the time event, this will potentially allow us to
3650 	 * remain off-channel for the max duration.
3651 	 * Since we want to use almost a whole dtim interval we would also
3652 	 * like the delay to be for 2-3 dtim intervals, in case there are
3653 	 * other time events with higher priority.
3654 	 */
3655 	if (vif->bss_conf.assoc) {
3656 		delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
3657 		/* We cannot remain off-channel longer than the DTIM interval */
3658 		if (dtim_interval <= req_dur) {
3659 			req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
3660 			if (req_dur <= AUX_ROC_MIN_DURATION)
3661 				req_dur = dtim_interval -
3662 					AUX_ROC_MIN_SAFETY_BUFFER;
3663 		}
3664 	}
3665 
3666 	tail->duration = cpu_to_le32(req_dur);
3667 	tail->apply_time_max_delay = cpu_to_le32(delay);
3668 
3669 	IWL_DEBUG_TE(mvm,
3670 		     "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
3671 		     channel->hw_value, req_dur, duration, delay,
3672 		     dtim_interval);
3673 	/* Set the node address */
3674 	memcpy(tail->node_addr, vif->addr, ETH_ALEN);
3675 
3676 	lockdep_assert_held(&mvm->mutex);
3677 
3678 	spin_lock_bh(&mvm->time_event_lock);
3679 
3680 	if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3681 		spin_unlock_bh(&mvm->time_event_lock);
3682 		return -EIO;
3683 	}
3684 
3685 	te_data->vif = vif;
3686 	te_data->duration = duration;
3687 	te_data->id = HOT_SPOT_CMD;
3688 
3689 	spin_unlock_bh(&mvm->time_event_lock);
3690 
3691 	/*
3692 	 * Use a notification wait, which really just processes the
3693 	 * command response and doesn't wait for anything, in order
3694 	 * to be able to process the response and get the UID inside
3695 	 * the RX path. Using CMD_WANT_SKB doesn't work because it
3696 	 * stores the buffer and then wakes up this thread, by which
3697 	 * time another notification (that the time event started)
3698 	 * might already be processed unsuccessfully.
3699 	 */
3700 	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3701 				   time_event_response,
3702 				   ARRAY_SIZE(time_event_response),
3703 				   iwl_mvm_rx_aux_roc, te_data);
3704 
3705 	res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len,
3706 				   &aux_roc_req);
3707 
3708 	if (res) {
3709 		IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3710 		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3711 		goto out_clear_te;
3712 	}
3713 
3714 	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
3715 	res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3716 	/* should never fail */
3717 	WARN_ON_ONCE(res);
3718 
3719 	if (res) {
3720  out_clear_te:
3721 		spin_lock_bh(&mvm->time_event_lock);
3722 		iwl_mvm_te_clear_data(mvm, te_data);
3723 		spin_unlock_bh(&mvm->time_event_lock);
3724 	}
3725 
3726 	return res;
3727 }
3728 
3729 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3730 		       struct ieee80211_vif *vif,
3731 		       struct ieee80211_channel *channel,
3732 		       int duration,
3733 		       enum ieee80211_roc_type type)
3734 {
3735 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3736 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3737 	struct cfg80211_chan_def chandef;
3738 	struct iwl_mvm_phy_ctxt *phy_ctxt;
3739 	int ret, i;
3740 
3741 	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3742 			   duration, type);
3743 
3744 	/*
3745 	 * Flush the done work, just in case it's still pending, so that
3746 	 * the work it does can complete and we can accept new frames.
3747 	 */
3748 	flush_work(&mvm->roc_done_wk);
3749 
3750 	mutex_lock(&mvm->mutex);
3751 
3752 	switch (vif->type) {
3753 	case NL80211_IFTYPE_STATION:
3754 		if (fw_has_capa(&mvm->fw->ucode_capa,
3755 				IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3756 			/* Use aux roc framework (HS20) */
3757 			ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3758 						       vif, duration);
3759 			goto out_unlock;
3760 		}
3761 		IWL_ERR(mvm, "hotspot not supported\n");
3762 		ret = -EINVAL;
3763 		goto out_unlock;
3764 	case NL80211_IFTYPE_P2P_DEVICE:
3765 		/* handle below */
3766 		break;
3767 	default:
3768 		IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3769 		ret = -EINVAL;
3770 		goto out_unlock;
3771 	}
3772 
3773 	for (i = 0; i < NUM_PHY_CTX; i++) {
3774 		phy_ctxt = &mvm->phy_ctxts[i];
3775 		if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3776 			continue;
3777 
3778 		if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3779 			/*
3780 			 * Unbind the P2P_DEVICE from the current PHY context,
3781 			 * and if the PHY context is not used remove it.
3782 			 */
3783 			ret = iwl_mvm_binding_remove_vif(mvm, vif);
3784 			if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3785 				goto out_unlock;
3786 
3787 			iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3788 
3789 			/* Bind the P2P_DEVICE to the current PHY Context */
3790 			mvmvif->phy_ctxt = phy_ctxt;
3791 
3792 			ret = iwl_mvm_binding_add_vif(mvm, vif);
3793 			if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3794 				goto out_unlock;
3795 
3796 			iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3797 			goto schedule_time_event;
3798 		}
3799 	}
3800 
3801 	/* Need to update the PHY context only if the ROC channel changed */
3802 	if (channel == mvmvif->phy_ctxt->channel)
3803 		goto schedule_time_event;
3804 
3805 	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3806 
3807 	/*
3808 	 * Change the PHY context configuration as it is currently referenced
3809 	 * only by the P2P Device MAC
3810 	 */
3811 	if (mvmvif->phy_ctxt->ref == 1) {
3812 		ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3813 					       &chandef, 1, 1);
3814 		if (ret)
3815 			goto out_unlock;
3816 	} else {
3817 		/*
3818 		 * The PHY context is shared with other MACs. Need to remove the
3819 		 * P2P Device from the binding, allocate an new PHY context and
3820 		 * create a new binding
3821 		 */
3822 		phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3823 		if (!phy_ctxt) {
3824 			ret = -ENOSPC;
3825 			goto out_unlock;
3826 		}
3827 
3828 		ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3829 					       1, 1);
3830 		if (ret) {
3831 			IWL_ERR(mvm, "Failed to change PHY context\n");
3832 			goto out_unlock;
3833 		}
3834 
3835 		/* Unbind the P2P_DEVICE from the current PHY context */
3836 		ret = iwl_mvm_binding_remove_vif(mvm, vif);
3837 		if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3838 			goto out_unlock;
3839 
3840 		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3841 
3842 		/* Bind the P2P_DEVICE to the new allocated PHY context */
3843 		mvmvif->phy_ctxt = phy_ctxt;
3844 
3845 		ret = iwl_mvm_binding_add_vif(mvm, vif);
3846 		if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3847 			goto out_unlock;
3848 
3849 		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3850 	}
3851 
3852 schedule_time_event:
3853 	/* Schedule the time events */
3854 	ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3855 
3856 out_unlock:
3857 	mutex_unlock(&mvm->mutex);
3858 	IWL_DEBUG_MAC80211(mvm, "leave\n");
3859 	return ret;
3860 }
3861 
3862 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3863 {
3864 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3865 
3866 	IWL_DEBUG_MAC80211(mvm, "enter\n");
3867 
3868 	mutex_lock(&mvm->mutex);
3869 	iwl_mvm_stop_roc(mvm);
3870 	mutex_unlock(&mvm->mutex);
3871 
3872 	IWL_DEBUG_MAC80211(mvm, "leave\n");
3873 	return 0;
3874 }
3875 
3876 struct iwl_mvm_ftm_responder_iter_data {
3877 	bool responder;
3878 	struct ieee80211_chanctx_conf *ctx;
3879 };
3880 
3881 static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
3882 					       struct ieee80211_vif *vif)
3883 {
3884 	struct iwl_mvm_ftm_responder_iter_data *data = _data;
3885 
3886 	if (rcu_access_pointer(vif->chanctx_conf) == data->ctx &&
3887 	    vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params)
3888 		data->responder = true;
3889 }
3890 
3891 static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
3892 					     struct ieee80211_chanctx_conf *ctx)
3893 {
3894 	struct iwl_mvm_ftm_responder_iter_data data = {
3895 		.responder = false,
3896 		.ctx = ctx,
3897 	};
3898 
3899 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
3900 					IEEE80211_IFACE_ITER_NORMAL,
3901 					iwl_mvm_ftm_responder_chanctx_iter,
3902 					&data);
3903 	return data.responder;
3904 }
3905 
3906 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3907 				 struct ieee80211_chanctx_conf *ctx)
3908 {
3909 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3910 	struct iwl_mvm_phy_ctxt *phy_ctxt;
3911 	bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx);
3912 	struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def;
3913 	int ret;
3914 
3915 	lockdep_assert_held(&mvm->mutex);
3916 
3917 	IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3918 
3919 	phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3920 	if (!phy_ctxt) {
3921 		ret = -ENOSPC;
3922 		goto out;
3923 	}
3924 
3925 	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
3926 				       ctx->rx_chains_static,
3927 				       ctx->rx_chains_dynamic);
3928 	if (ret) {
3929 		IWL_ERR(mvm, "Failed to add PHY context\n");
3930 		goto out;
3931 	}
3932 
3933 	iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3934 	*phy_ctxt_id = phy_ctxt->id;
3935 out:
3936 	return ret;
3937 }
3938 
3939 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3940 			       struct ieee80211_chanctx_conf *ctx)
3941 {
3942 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3943 	int ret;
3944 
3945 	mutex_lock(&mvm->mutex);
3946 	ret = __iwl_mvm_add_chanctx(mvm, ctx);
3947 	mutex_unlock(&mvm->mutex);
3948 
3949 	return ret;
3950 }
3951 
3952 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3953 				     struct ieee80211_chanctx_conf *ctx)
3954 {
3955 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3956 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3957 
3958 	lockdep_assert_held(&mvm->mutex);
3959 
3960 	iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3961 }
3962 
3963 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3964 				   struct ieee80211_chanctx_conf *ctx)
3965 {
3966 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3967 
3968 	mutex_lock(&mvm->mutex);
3969 	__iwl_mvm_remove_chanctx(mvm, ctx);
3970 	mutex_unlock(&mvm->mutex);
3971 }
3972 
3973 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3974 				   struct ieee80211_chanctx_conf *ctx,
3975 				   u32 changed)
3976 {
3977 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3978 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3979 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3980 	bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx);
3981 	struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def;
3982 
3983 	if (WARN_ONCE((phy_ctxt->ref > 1) &&
3984 		      (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3985 				   IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3986 				   IEEE80211_CHANCTX_CHANGE_RADAR |
3987 				   IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3988 		      "Cannot change PHY. Ref=%d, changed=0x%X\n",
3989 		      phy_ctxt->ref, changed))
3990 		return;
3991 
3992 	mutex_lock(&mvm->mutex);
3993 
3994 	/* we are only changing the min_width, may be a noop */
3995 	if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
3996 		if (phy_ctxt->width == def->width)
3997 			goto out_unlock;
3998 
3999 		/* we are just toggling between 20_NOHT and 20 */
4000 		if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
4001 		    def->width <= NL80211_CHAN_WIDTH_20)
4002 			goto out_unlock;
4003 	}
4004 
4005 	iwl_mvm_bt_coex_vif_change(mvm);
4006 	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
4007 				 ctx->rx_chains_static,
4008 				 ctx->rx_chains_dynamic);
4009 
4010 out_unlock:
4011 	mutex_unlock(&mvm->mutex);
4012 }
4013 
4014 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
4015 					struct ieee80211_vif *vif,
4016 					struct ieee80211_chanctx_conf *ctx,
4017 					bool switching_chanctx)
4018 {
4019 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
4020 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
4021 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4022 	int ret;
4023 
4024 	lockdep_assert_held(&mvm->mutex);
4025 
4026 	mvmvif->phy_ctxt = phy_ctxt;
4027 
4028 	switch (vif->type) {
4029 	case NL80211_IFTYPE_AP:
4030 		/* only needed if we're switching chanctx (i.e. during CSA) */
4031 		if (switching_chanctx) {
4032 			mvmvif->ap_ibss_active = true;
4033 			break;
4034 		}
4035 		/* fall through */
4036 	case NL80211_IFTYPE_ADHOC:
4037 		/*
4038 		 * The AP binding flow is handled as part of the start_ap flow
4039 		 * (in bss_info_changed), similarly for IBSS.
4040 		 */
4041 		ret = 0;
4042 		goto out;
4043 	case NL80211_IFTYPE_STATION:
4044 		mvmvif->csa_bcn_pending = false;
4045 		break;
4046 	case NL80211_IFTYPE_MONITOR:
4047 		/* always disable PS when a monitor interface is active */
4048 		mvmvif->ps_disabled = true;
4049 		break;
4050 	default:
4051 		ret = -EINVAL;
4052 		goto out;
4053 	}
4054 
4055 	ret = iwl_mvm_binding_add_vif(mvm, vif);
4056 	if (ret)
4057 		goto out;
4058 
4059 	/*
4060 	 * Power state must be updated before quotas,
4061 	 * otherwise fw will complain.
4062 	 */
4063 	iwl_mvm_power_update_mac(mvm);
4064 
4065 	/* Setting the quota at this stage is only required for monitor
4066 	 * interfaces. For the other types, the bss_info changed flow
4067 	 * will handle quota settings.
4068 	 */
4069 	if (vif->type == NL80211_IFTYPE_MONITOR) {
4070 		mvmvif->monitor_active = true;
4071 		ret = iwl_mvm_update_quotas(mvm, false, NULL);
4072 		if (ret)
4073 			goto out_remove_binding;
4074 
4075 		ret = iwl_mvm_add_snif_sta(mvm, vif);
4076 		if (ret)
4077 			goto out_remove_binding;
4078 
4079 	}
4080 
4081 	/* Handle binding during CSA */
4082 	if (vif->type == NL80211_IFTYPE_AP) {
4083 		iwl_mvm_update_quotas(mvm, false, NULL);
4084 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
4085 	}
4086 
4087 	if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
4088 		mvmvif->csa_bcn_pending = true;
4089 
4090 		if (!fw_has_capa(&mvm->fw->ucode_capa,
4091 				 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
4092 			u32 duration = 3 * vif->bss_conf.beacon_int;
4093 
4094 
4095 			/* iwl_mvm_protect_session() reads directly from the
4096 			 * device (the system time), so make sure it is
4097 			 * available.
4098 			 */
4099 			ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
4100 			if (ret)
4101 				goto out_remove_binding;
4102 
4103 			/* Protect the session to make sure we hear the first
4104 			 * beacon on the new channel.
4105 			 */
4106 			iwl_mvm_protect_session(mvm, vif, duration, duration,
4107 						vif->bss_conf.beacon_int / 2,
4108 						true);
4109 
4110 			iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
4111 		}
4112 
4113 		iwl_mvm_update_quotas(mvm, false, NULL);
4114 	}
4115 
4116 	goto out;
4117 
4118 out_remove_binding:
4119 	iwl_mvm_binding_remove_vif(mvm, vif);
4120 	iwl_mvm_power_update_mac(mvm);
4121 out:
4122 	if (ret)
4123 		mvmvif->phy_ctxt = NULL;
4124 	return ret;
4125 }
4126 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
4127 				      struct ieee80211_vif *vif,
4128 				      struct ieee80211_chanctx_conf *ctx)
4129 {
4130 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4131 	int ret;
4132 
4133 	mutex_lock(&mvm->mutex);
4134 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
4135 	mutex_unlock(&mvm->mutex);
4136 
4137 	return ret;
4138 }
4139 
4140 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
4141 					   struct ieee80211_vif *vif,
4142 					   struct ieee80211_chanctx_conf *ctx,
4143 					   bool switching_chanctx)
4144 {
4145 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4146 	struct ieee80211_vif *disabled_vif = NULL;
4147 
4148 	lockdep_assert_held(&mvm->mutex);
4149 
4150 	iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
4151 
4152 	switch (vif->type) {
4153 	case NL80211_IFTYPE_ADHOC:
4154 		goto out;
4155 	case NL80211_IFTYPE_MONITOR:
4156 		mvmvif->monitor_active = false;
4157 		mvmvif->ps_disabled = false;
4158 		iwl_mvm_rm_snif_sta(mvm, vif);
4159 		break;
4160 	case NL80211_IFTYPE_AP:
4161 		/* This part is triggered only during CSA */
4162 		if (!switching_chanctx || !mvmvif->ap_ibss_active)
4163 			goto out;
4164 
4165 		mvmvif->csa_countdown = false;
4166 
4167 		/* Set CS bit on all the stations */
4168 		iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
4169 
4170 		/* Save blocked iface, the timeout is set on the next beacon */
4171 		rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
4172 
4173 		mvmvif->ap_ibss_active = false;
4174 		break;
4175 	case NL80211_IFTYPE_STATION:
4176 		if (!switching_chanctx)
4177 			break;
4178 
4179 		disabled_vif = vif;
4180 
4181 		if (!fw_has_capa(&mvm->fw->ucode_capa,
4182 				 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
4183 			iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
4184 		break;
4185 	default:
4186 		break;
4187 	}
4188 
4189 	iwl_mvm_update_quotas(mvm, false, disabled_vif);
4190 	iwl_mvm_binding_remove_vif(mvm, vif);
4191 
4192 out:
4193 	mvmvif->phy_ctxt = NULL;
4194 	iwl_mvm_power_update_mac(mvm);
4195 }
4196 
4197 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
4198 					 struct ieee80211_vif *vif,
4199 					 struct ieee80211_chanctx_conf *ctx)
4200 {
4201 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4202 
4203 	mutex_lock(&mvm->mutex);
4204 	__iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
4205 	mutex_unlock(&mvm->mutex);
4206 }
4207 
4208 static int
4209 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
4210 				struct ieee80211_vif_chanctx_switch *vifs)
4211 {
4212 	int ret;
4213 
4214 	mutex_lock(&mvm->mutex);
4215 	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
4216 	__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
4217 
4218 	ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
4219 	if (ret) {
4220 		IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
4221 		goto out_reassign;
4222 	}
4223 
4224 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
4225 					   true);
4226 	if (ret) {
4227 		IWL_ERR(mvm,
4228 			"failed to assign new_ctx during channel switch\n");
4229 		goto out_remove;
4230 	}
4231 
4232 	/* we don't support TDLS during DCM - can be caused by channel switch */
4233 	if (iwl_mvm_phy_ctx_count(mvm) > 1)
4234 		iwl_mvm_teardown_tdls_peers(mvm);
4235 
4236 	goto out;
4237 
4238 out_remove:
4239 	__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
4240 
4241 out_reassign:
4242 	if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
4243 		IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
4244 		goto out_restart;
4245 	}
4246 
4247 	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
4248 					 true)) {
4249 		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
4250 		goto out_restart;
4251 	}
4252 
4253 	goto out;
4254 
4255 out_restart:
4256 	/* things keep failing, better restart the hw */
4257 	iwl_mvm_nic_restart(mvm, false);
4258 
4259 out:
4260 	mutex_unlock(&mvm->mutex);
4261 
4262 	return ret;
4263 }
4264 
4265 static int
4266 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
4267 				    struct ieee80211_vif_chanctx_switch *vifs)
4268 {
4269 	int ret;
4270 
4271 	mutex_lock(&mvm->mutex);
4272 	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
4273 
4274 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
4275 					   true);
4276 	if (ret) {
4277 		IWL_ERR(mvm,
4278 			"failed to assign new_ctx during channel switch\n");
4279 		goto out_reassign;
4280 	}
4281 
4282 	goto out;
4283 
4284 out_reassign:
4285 	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
4286 					 true)) {
4287 		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
4288 		goto out_restart;
4289 	}
4290 
4291 	goto out;
4292 
4293 out_restart:
4294 	/* things keep failing, better restart the hw */
4295 	iwl_mvm_nic_restart(mvm, false);
4296 
4297 out:
4298 	mutex_unlock(&mvm->mutex);
4299 
4300 	return ret;
4301 }
4302 
4303 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
4304 				      struct ieee80211_vif_chanctx_switch *vifs,
4305 				      int n_vifs,
4306 				      enum ieee80211_chanctx_switch_mode mode)
4307 {
4308 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4309 	int ret;
4310 
4311 	/* we only support a single-vif right now */
4312 	if (n_vifs > 1)
4313 		return -EOPNOTSUPP;
4314 
4315 	switch (mode) {
4316 	case CHANCTX_SWMODE_SWAP_CONTEXTS:
4317 		ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
4318 		break;
4319 	case CHANCTX_SWMODE_REASSIGN_VIF:
4320 		ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
4321 		break;
4322 	default:
4323 		ret = -EOPNOTSUPP;
4324 		break;
4325 	}
4326 
4327 	return ret;
4328 }
4329 
4330 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
4331 {
4332 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4333 
4334 	return mvm->ibss_manager;
4335 }
4336 
4337 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
4338 			   struct ieee80211_sta *sta,
4339 			   bool set)
4340 {
4341 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4342 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4343 
4344 	if (!mvm_sta || !mvm_sta->vif) {
4345 		IWL_ERR(mvm, "Station is not associated to a vif\n");
4346 		return -EINVAL;
4347 	}
4348 
4349 	return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
4350 }
4351 
4352 #ifdef CONFIG_NL80211_TESTMODE
4353 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
4354 	[IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
4355 	[IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
4356 	[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
4357 };
4358 
4359 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
4360 				      struct ieee80211_vif *vif,
4361 				      void *data, int len)
4362 {
4363 	struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
4364 	int err;
4365 	u32 noa_duration;
4366 
4367 	err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
4368 			NULL);
4369 	if (err)
4370 		return err;
4371 
4372 	if (!tb[IWL_MVM_TM_ATTR_CMD])
4373 		return -EINVAL;
4374 
4375 	switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
4376 	case IWL_MVM_TM_CMD_SET_NOA:
4377 		if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
4378 		    !vif->bss_conf.enable_beacon ||
4379 		    !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
4380 			return -EINVAL;
4381 
4382 		noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
4383 		if (noa_duration >= vif->bss_conf.beacon_int)
4384 			return -EINVAL;
4385 
4386 		mvm->noa_duration = noa_duration;
4387 		mvm->noa_vif = vif;
4388 
4389 		return iwl_mvm_update_quotas(mvm, true, NULL);
4390 	case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
4391 		/* must be associated client vif - ignore authorized */
4392 		if (!vif || vif->type != NL80211_IFTYPE_STATION ||
4393 		    !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
4394 		    !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
4395 			return -EINVAL;
4396 
4397 		if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
4398 			return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
4399 		return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
4400 	}
4401 
4402 	return -EOPNOTSUPP;
4403 }
4404 
4405 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
4406 				    struct ieee80211_vif *vif,
4407 				    void *data, int len)
4408 {
4409 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4410 	int err;
4411 
4412 	mutex_lock(&mvm->mutex);
4413 	err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
4414 	mutex_unlock(&mvm->mutex);
4415 
4416 	return err;
4417 }
4418 #endif
4419 
4420 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
4421 				   struct ieee80211_vif *vif,
4422 				   struct ieee80211_channel_switch *chsw)
4423 {
4424 	/* By implementing this operation, we prevent mac80211 from
4425 	 * starting its own channel switch timer, so that we can call
4426 	 * ieee80211_chswitch_done() ourselves at the right time
4427 	 * (which is when the absence time event starts).
4428 	 */
4429 
4430 	IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
4431 			   "dummy channel switch op\n");
4432 }
4433 
4434 static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm,
4435 				       struct ieee80211_vif *vif,
4436 				       struct ieee80211_channel_switch *chsw)
4437 {
4438 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4439 	struct iwl_chan_switch_te_cmd cmd = {
4440 		.mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
4441 							  mvmvif->color)),
4442 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
4443 		.tsf = cpu_to_le32(chsw->timestamp),
4444 		.cs_count = chsw->count,
4445 	};
4446 
4447 	lockdep_assert_held(&mvm->mutex);
4448 
4449 	return iwl_mvm_send_cmd_pdu(mvm,
4450 				    WIDE_ID(MAC_CONF_GROUP,
4451 					    CHANNEL_SWITCH_TIME_EVENT_CMD),
4452 				    0, sizeof(cmd), &cmd);
4453 }
4454 
4455 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
4456 				      struct ieee80211_vif *vif,
4457 				      struct ieee80211_channel_switch *chsw)
4458 {
4459 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4460 	struct ieee80211_vif *csa_vif;
4461 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4462 	u32 apply_time;
4463 	int ret;
4464 
4465 	mutex_lock(&mvm->mutex);
4466 
4467 	mvmvif->csa_failed = false;
4468 
4469 	IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
4470 			   chsw->chandef.center_freq1);
4471 
4472 	iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
4473 				       ieee80211_vif_to_wdev(vif),
4474 				       FW_DBG_TRIGGER_CHANNEL_SWITCH);
4475 
4476 	switch (vif->type) {
4477 	case NL80211_IFTYPE_AP:
4478 		csa_vif =
4479 			rcu_dereference_protected(mvm->csa_vif,
4480 						  lockdep_is_held(&mvm->mutex));
4481 		if (WARN_ONCE(csa_vif && csa_vif->csa_active,
4482 			      "Another CSA is already in progress")) {
4483 			ret = -EBUSY;
4484 			goto out_unlock;
4485 		}
4486 
4487 		/* we still didn't unblock tx. prevent new CS meanwhile */
4488 		if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
4489 					      lockdep_is_held(&mvm->mutex))) {
4490 			ret = -EBUSY;
4491 			goto out_unlock;
4492 		}
4493 
4494 		rcu_assign_pointer(mvm->csa_vif, vif);
4495 
4496 		if (WARN_ONCE(mvmvif->csa_countdown,
4497 			      "Previous CSA countdown didn't complete")) {
4498 			ret = -EBUSY;
4499 			goto out_unlock;
4500 		}
4501 
4502 		mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
4503 
4504 		break;
4505 	case NL80211_IFTYPE_STATION:
4506 		/* Schedule the time event to a bit before beacon 1,
4507 		 * to make sure we're in the new channel when the
4508 		 * GO/AP arrives. In case count <= 1 immediately schedule the
4509 		 * TE (this might result with some packet loss or connection
4510 		 * loss).
4511 		 */
4512 		if (chsw->count <= 1)
4513 			apply_time = 0;
4514 		else
4515 			apply_time = chsw->device_timestamp +
4516 				((vif->bss_conf.beacon_int * (chsw->count - 1) -
4517 				  IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
4518 
4519 		if (chsw->block_tx)
4520 			iwl_mvm_csa_client_absent(mvm, vif);
4521 
4522 		if (mvmvif->bf_data.bf_enabled) {
4523 			ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
4524 			if (ret)
4525 				goto out_unlock;
4526 		}
4527 
4528 		if (fw_has_capa(&mvm->fw->ucode_capa,
4529 				IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
4530 			iwl_mvm_schedule_client_csa(mvm, vif, chsw);
4531 		else
4532 			iwl_mvm_schedule_csa_period(mvm, vif,
4533 						    vif->bss_conf.beacon_int,
4534 						    apply_time);
4535 		break;
4536 	default:
4537 		break;
4538 	}
4539 
4540 	mvmvif->ps_disabled = true;
4541 
4542 	ret = iwl_mvm_power_update_ps(mvm);
4543 	if (ret)
4544 		goto out_unlock;
4545 
4546 	/* we won't be on this channel any longer */
4547 	iwl_mvm_teardown_tdls_peers(mvm);
4548 
4549 out_unlock:
4550 	mutex_unlock(&mvm->mutex);
4551 
4552 	return ret;
4553 }
4554 
4555 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
4556 				       struct ieee80211_vif *vif)
4557 {
4558 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4559 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4560 	int ret;
4561 
4562 	mutex_lock(&mvm->mutex);
4563 
4564 	if (mvmvif->csa_failed) {
4565 		mvmvif->csa_failed = false;
4566 		ret = -EIO;
4567 		goto out_unlock;
4568 	}
4569 
4570 	if (vif->type == NL80211_IFTYPE_STATION) {
4571 		struct iwl_mvm_sta *mvmsta;
4572 
4573 		mvmvif->csa_bcn_pending = false;
4574 		mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
4575 							  mvmvif->ap_sta_id);
4576 
4577 		if (WARN_ON(!mvmsta)) {
4578 			ret = -EIO;
4579 			goto out_unlock;
4580 		}
4581 
4582 		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
4583 
4584 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
4585 
4586 		ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
4587 		if (ret)
4588 			goto out_unlock;
4589 
4590 		iwl_mvm_stop_session_protection(mvm, vif);
4591 	}
4592 
4593 	mvmvif->ps_disabled = false;
4594 
4595 	ret = iwl_mvm_power_update_ps(mvm);
4596 
4597 out_unlock:
4598 	mutex_unlock(&mvm->mutex);
4599 
4600 	return ret;
4601 }
4602 
4603 static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
4604 {
4605 	int i;
4606 
4607 	if (!iwl_mvm_has_new_tx_api(mvm)) {
4608 		if (drop) {
4609 			mutex_lock(&mvm->mutex);
4610 			iwl_mvm_flush_tx_path(mvm,
4611 				iwl_mvm_flushable_queues(mvm) & queues, 0);
4612 			mutex_unlock(&mvm->mutex);
4613 		} else {
4614 			iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
4615 		}
4616 		return;
4617 	}
4618 
4619 	mutex_lock(&mvm->mutex);
4620 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
4621 		struct ieee80211_sta *sta;
4622 
4623 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4624 						lockdep_is_held(&mvm->mutex));
4625 		if (IS_ERR_OR_NULL(sta))
4626 			continue;
4627 
4628 		if (drop)
4629 			iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
4630 		else
4631 			iwl_mvm_wait_sta_queues_empty(mvm,
4632 					iwl_mvm_sta_from_mac80211(sta));
4633 	}
4634 	mutex_unlock(&mvm->mutex);
4635 }
4636 
4637 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
4638 			      struct ieee80211_vif *vif, u32 queues, bool drop)
4639 {
4640 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4641 	struct iwl_mvm_vif *mvmvif;
4642 	struct iwl_mvm_sta *mvmsta;
4643 	struct ieee80211_sta *sta;
4644 	int i;
4645 	u32 msk = 0;
4646 
4647 	if (!vif) {
4648 		iwl_mvm_flush_no_vif(mvm, queues, drop);
4649 		return;
4650 	}
4651 
4652 	if (vif->type != NL80211_IFTYPE_STATION)
4653 		return;
4654 
4655 	/* Make sure we're done with the deferred traffic before flushing */
4656 	flush_work(&mvm->add_stream_wk);
4657 
4658 	mutex_lock(&mvm->mutex);
4659 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
4660 
4661 	/* flush the AP-station and all TDLS peers */
4662 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
4663 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4664 						lockdep_is_held(&mvm->mutex));
4665 		if (IS_ERR_OR_NULL(sta))
4666 			continue;
4667 
4668 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
4669 		if (mvmsta->vif != vif)
4670 			continue;
4671 
4672 		/* make sure only TDLS peers or the AP are flushed */
4673 		WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
4674 
4675 		if (drop) {
4676 			if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
4677 				IWL_ERR(mvm, "flush request fail\n");
4678 		} else {
4679 			msk |= mvmsta->tfd_queue_msk;
4680 			if (iwl_mvm_has_new_tx_api(mvm))
4681 				iwl_mvm_wait_sta_queues_empty(mvm, mvmsta);
4682 		}
4683 	}
4684 
4685 	mutex_unlock(&mvm->mutex);
4686 
4687 	/* this can take a while, and we may need/want other operations
4688 	 * to succeed while doing this, so do it without the mutex held
4689 	 */
4690 	if (!drop && !iwl_mvm_has_new_tx_api(mvm))
4691 		iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
4692 }
4693 
4694 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
4695 				  struct survey_info *survey)
4696 {
4697 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4698 	int ret;
4699 
4700 	memset(survey, 0, sizeof(*survey));
4701 
4702 	/* only support global statistics right now */
4703 	if (idx != 0)
4704 		return -ENOENT;
4705 
4706 	if (!fw_has_capa(&mvm->fw->ucode_capa,
4707 			 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4708 		return -ENOENT;
4709 
4710 	mutex_lock(&mvm->mutex);
4711 
4712 	if (iwl_mvm_firmware_running(mvm)) {
4713 		ret = iwl_mvm_request_statistics(mvm, false);
4714 		if (ret)
4715 			goto out;
4716 	}
4717 
4718 	survey->filled = SURVEY_INFO_TIME |
4719 			 SURVEY_INFO_TIME_RX |
4720 			 SURVEY_INFO_TIME_TX |
4721 			 SURVEY_INFO_TIME_SCAN;
4722 	survey->time = mvm->accu_radio_stats.on_time_rf +
4723 		       mvm->radio_stats.on_time_rf;
4724 	do_div(survey->time, USEC_PER_MSEC);
4725 
4726 	survey->time_rx = mvm->accu_radio_stats.rx_time +
4727 			  mvm->radio_stats.rx_time;
4728 	do_div(survey->time_rx, USEC_PER_MSEC);
4729 
4730 	survey->time_tx = mvm->accu_radio_stats.tx_time +
4731 			  mvm->radio_stats.tx_time;
4732 	do_div(survey->time_tx, USEC_PER_MSEC);
4733 
4734 	survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4735 			    mvm->radio_stats.on_time_scan;
4736 	do_div(survey->time_scan, USEC_PER_MSEC);
4737 
4738 	ret = 0;
4739  out:
4740 	mutex_unlock(&mvm->mutex);
4741 	return ret;
4742 }
4743 
4744 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4745 				       struct ieee80211_vif *vif,
4746 				       struct ieee80211_sta *sta,
4747 				       struct station_info *sinfo)
4748 {
4749 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4750 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4751 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4752 
4753 	if (mvmsta->avg_energy) {
4754 		sinfo->signal_avg = mvmsta->avg_energy;
4755 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
4756 	}
4757 
4758 	/* if beacon filtering isn't on mac80211 does it anyway */
4759 	if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4760 		return;
4761 
4762 	if (!vif->bss_conf.assoc)
4763 		return;
4764 
4765 	mutex_lock(&mvm->mutex);
4766 
4767 	if (mvmvif->ap_sta_id != mvmsta->sta_id)
4768 		goto unlock;
4769 
4770 	if (iwl_mvm_request_statistics(mvm, false))
4771 		goto unlock;
4772 
4773 	sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4774 			   mvmvif->beacon_stats.accu_num_beacons;
4775 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
4776 	if (mvmvif->beacon_stats.avg_signal) {
4777 		/* firmware only reports a value after RXing a few beacons */
4778 		sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4779 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4780 	}
4781  unlock:
4782 	mutex_unlock(&mvm->mutex);
4783 }
4784 
4785 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4786 					struct ieee80211_vif *vif,
4787 					const struct ieee80211_event *event)
4788 {
4789 #define CHECK_MLME_TRIGGER(_cnt, _fmt...)				\
4790 	do {								\
4791 		if ((trig_mlme->_cnt) && --(trig_mlme->_cnt))		\
4792 			break;						\
4793 		iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt);	\
4794 	} while (0)
4795 
4796 	struct iwl_fw_dbg_trigger_tlv *trig;
4797 	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4798 
4799 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
4800 				     FW_DBG_TRIGGER_MLME);
4801 	if (!trig)
4802 		return;
4803 
4804 	trig_mlme = (void *)trig->data;
4805 
4806 	if (event->u.mlme.data == ASSOC_EVENT) {
4807 		if (event->u.mlme.status == MLME_DENIED)
4808 			CHECK_MLME_TRIGGER(stop_assoc_denied,
4809 					   "DENIED ASSOC: reason %d",
4810 					    event->u.mlme.reason);
4811 		else if (event->u.mlme.status == MLME_TIMEOUT)
4812 			CHECK_MLME_TRIGGER(stop_assoc_timeout,
4813 					   "ASSOC TIMEOUT");
4814 	} else if (event->u.mlme.data == AUTH_EVENT) {
4815 		if (event->u.mlme.status == MLME_DENIED)
4816 			CHECK_MLME_TRIGGER(stop_auth_denied,
4817 					   "DENIED AUTH: reason %d",
4818 					   event->u.mlme.reason);
4819 		else if (event->u.mlme.status == MLME_TIMEOUT)
4820 			CHECK_MLME_TRIGGER(stop_auth_timeout,
4821 					   "AUTH TIMEOUT");
4822 	} else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4823 		CHECK_MLME_TRIGGER(stop_rx_deauth,
4824 				   "DEAUTH RX %d", event->u.mlme.reason);
4825 	} else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4826 		CHECK_MLME_TRIGGER(stop_tx_deauth,
4827 				   "DEAUTH TX %d", event->u.mlme.reason);
4828 	}
4829 #undef CHECK_MLME_TRIGGER
4830 }
4831 
4832 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4833 					  struct ieee80211_vif *vif,
4834 					  const struct ieee80211_event *event)
4835 {
4836 	struct iwl_fw_dbg_trigger_tlv *trig;
4837 	struct iwl_fw_dbg_trigger_ba *ba_trig;
4838 
4839 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
4840 				     FW_DBG_TRIGGER_BA);
4841 	if (!trig)
4842 		return;
4843 
4844 	ba_trig = (void *)trig->data;
4845 
4846 	if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4847 		return;
4848 
4849 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
4850 				"BAR received from %pM, tid %d, ssn %d",
4851 				event->u.ba.sta->addr, event->u.ba.tid,
4852 				event->u.ba.ssn);
4853 }
4854 
4855 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4856 				       struct ieee80211_vif *vif,
4857 				       const struct ieee80211_event *event)
4858 {
4859 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4860 
4861 	switch (event->type) {
4862 	case MLME_EVENT:
4863 		iwl_mvm_event_mlme_callback(mvm, vif, event);
4864 		break;
4865 	case BAR_RX_EVENT:
4866 		iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4867 		break;
4868 	case BA_FRAME_TIMEOUT:
4869 		iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta,
4870 						     event->u.ba.tid);
4871 		break;
4872 	default:
4873 		break;
4874 	}
4875 }
4876 
4877 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4878 				     struct iwl_mvm_internal_rxq_notif *notif,
4879 				     u32 size)
4880 {
4881 	u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4882 	int ret;
4883 
4884 	lockdep_assert_held(&mvm->mutex);
4885 
4886 	if (!iwl_mvm_has_new_rx_api(mvm))
4887 		return;
4888 
4889 	notif->cookie = mvm->queue_sync_cookie;
4890 
4891 	if (notif->sync)
4892 		atomic_set(&mvm->queue_sync_counter,
4893 			   mvm->trans->num_rx_queues);
4894 
4895 	ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4896 	if (ret) {
4897 		IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4898 		goto out;
4899 	}
4900 
4901 	if (notif->sync) {
4902 		ret = wait_event_timeout(mvm->rx_sync_waitq,
4903 					 atomic_read(&mvm->queue_sync_counter) == 0 ||
4904 					 iwl_mvm_is_radio_killed(mvm),
4905 					 HZ);
4906 		WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm));
4907 	}
4908 
4909 out:
4910 	atomic_set(&mvm->queue_sync_counter, 0);
4911 	mvm->queue_sync_cookie++;
4912 }
4913 
4914 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4915 {
4916 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4917 	struct iwl_mvm_internal_rxq_notif data = {
4918 		.type = IWL_MVM_RXQ_EMPTY,
4919 		.sync = 1,
4920 	};
4921 
4922 	mutex_lock(&mvm->mutex);
4923 	iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4924 	mutex_unlock(&mvm->mutex);
4925 }
4926 
4927 static int
4928 iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw,
4929 				    struct ieee80211_vif *vif,
4930 				    struct cfg80211_ftm_responder_stats *stats)
4931 {
4932 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4933 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4934 
4935 	if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
4936 	    !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder)
4937 		return -EINVAL;
4938 
4939 	mutex_lock(&mvm->mutex);
4940 	*stats = mvm->ftm_resp_stats;
4941 	mutex_unlock(&mvm->mutex);
4942 
4943 	stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) |
4944 			BIT(NL80211_FTM_STATS_PARTIAL_NUM) |
4945 			BIT(NL80211_FTM_STATS_FAILED_NUM) |
4946 			BIT(NL80211_FTM_STATS_ASAP_NUM) |
4947 			BIT(NL80211_FTM_STATS_NON_ASAP_NUM) |
4948 			BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) |
4949 			BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) |
4950 			BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) |
4951 			BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM);
4952 
4953 	return 0;
4954 }
4955 
4956 static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw,
4957 			      struct ieee80211_vif *vif,
4958 			      struct cfg80211_pmsr_request *request)
4959 {
4960 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4961 	int ret;
4962 
4963 	mutex_lock(&mvm->mutex);
4964 	ret = iwl_mvm_ftm_start(mvm, vif, request);
4965 	mutex_unlock(&mvm->mutex);
4966 
4967 	return ret;
4968 }
4969 
4970 static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw,
4971 			       struct ieee80211_vif *vif,
4972 			       struct cfg80211_pmsr_request *request)
4973 {
4974 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4975 
4976 	mutex_lock(&mvm->mutex);
4977 	iwl_mvm_ftm_abort(mvm, request);
4978 	mutex_unlock(&mvm->mutex);
4979 }
4980 
4981 static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
4982 {
4983 	u8 protocol = ip_hdr(skb)->protocol;
4984 
4985 	if (!IS_ENABLED(CONFIG_INET))
4986 		return false;
4987 
4988 	return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
4989 }
4990 
4991 static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
4992 				      struct sk_buff *head,
4993 				      struct sk_buff *skb)
4994 {
4995 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4996 
4997 	/* For now don't aggregate IPv6 in AMSDU */
4998 	if (skb->protocol != htons(ETH_P_IP))
4999 		return false;
5000 
5001 	if (!iwl_mvm_is_csum_supported(mvm))
5002 		return true;
5003 
5004 	return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head);
5005 }
5006 
5007 const struct ieee80211_ops iwl_mvm_hw_ops = {
5008 	.tx = iwl_mvm_mac_tx,
5009 	.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
5010 	.ampdu_action = iwl_mvm_mac_ampdu_action,
5011 	.start = iwl_mvm_mac_start,
5012 	.reconfig_complete = iwl_mvm_mac_reconfig_complete,
5013 	.stop = iwl_mvm_mac_stop,
5014 	.add_interface = iwl_mvm_mac_add_interface,
5015 	.remove_interface = iwl_mvm_mac_remove_interface,
5016 	.config = iwl_mvm_mac_config,
5017 	.prepare_multicast = iwl_mvm_prepare_multicast,
5018 	.configure_filter = iwl_mvm_configure_filter,
5019 	.config_iface_filter = iwl_mvm_config_iface_filter,
5020 	.bss_info_changed = iwl_mvm_bss_info_changed,
5021 	.hw_scan = iwl_mvm_mac_hw_scan,
5022 	.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
5023 	.sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
5024 	.sta_state = iwl_mvm_mac_sta_state,
5025 	.sta_notify = iwl_mvm_mac_sta_notify,
5026 	.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
5027 	.release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
5028 	.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
5029 	.sta_rc_update = iwl_mvm_sta_rc_update,
5030 	.conf_tx = iwl_mvm_mac_conf_tx,
5031 	.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
5032 	.mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
5033 	.flush = iwl_mvm_mac_flush,
5034 	.sched_scan_start = iwl_mvm_mac_sched_scan_start,
5035 	.sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
5036 	.set_key = iwl_mvm_mac_set_key,
5037 	.update_tkip_key = iwl_mvm_mac_update_tkip_key,
5038 	.remain_on_channel = iwl_mvm_roc,
5039 	.cancel_remain_on_channel = iwl_mvm_cancel_roc,
5040 	.add_chanctx = iwl_mvm_add_chanctx,
5041 	.remove_chanctx = iwl_mvm_remove_chanctx,
5042 	.change_chanctx = iwl_mvm_change_chanctx,
5043 	.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
5044 	.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
5045 	.switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
5046 
5047 	.start_ap = iwl_mvm_start_ap_ibss,
5048 	.stop_ap = iwl_mvm_stop_ap_ibss,
5049 	.join_ibss = iwl_mvm_start_ap_ibss,
5050 	.leave_ibss = iwl_mvm_stop_ap_ibss,
5051 
5052 	.tx_last_beacon = iwl_mvm_tx_last_beacon,
5053 
5054 	.set_tim = iwl_mvm_set_tim,
5055 
5056 	.channel_switch = iwl_mvm_channel_switch,
5057 	.pre_channel_switch = iwl_mvm_pre_channel_switch,
5058 	.post_channel_switch = iwl_mvm_post_channel_switch,
5059 
5060 	.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
5061 	.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
5062 	.tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
5063 
5064 	.event_callback = iwl_mvm_mac_event_callback,
5065 
5066 	.sync_rx_queues = iwl_mvm_sync_rx_queues,
5067 
5068 	CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
5069 
5070 #ifdef CONFIG_PM_SLEEP
5071 	/* look at d3.c */
5072 	.suspend = iwl_mvm_suspend,
5073 	.resume = iwl_mvm_resume,
5074 	.set_wakeup = iwl_mvm_set_wakeup,
5075 	.set_rekey_data = iwl_mvm_set_rekey_data,
5076 #if IS_ENABLED(CONFIG_IPV6)
5077 	.ipv6_addr_change = iwl_mvm_ipv6_addr_change,
5078 #endif
5079 	.set_default_unicast_key = iwl_mvm_set_default_unicast_key,
5080 #endif
5081 	.get_survey = iwl_mvm_mac_get_survey,
5082 	.sta_statistics = iwl_mvm_mac_sta_statistics,
5083 	.get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats,
5084 	.start_pmsr = iwl_mvm_start_pmsr,
5085 	.abort_pmsr = iwl_mvm_abort_pmsr,
5086 
5087 	.can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate,
5088 #ifdef CONFIG_IWLWIFI_DEBUGFS
5089 	.sta_add_debugfs = iwl_mvm_sta_add_debugfs,
5090 #endif
5091 };
5092