xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/mac.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "mac.h"
20 
21 #include <net/mac80211.h>
22 #include <linux/etherdevice.h>
23 #include <linux/acpi.h>
24 
25 #include "hif.h"
26 #include "core.h"
27 #include "debug.h"
28 #include "wmi.h"
29 #include "htt.h"
30 #include "txrx.h"
31 #include "testmode.h"
32 #include "wmi.h"
33 #include "wmi-tlv.h"
34 #include "wmi-ops.h"
35 #include "wow.h"
36 
37 /*********/
38 /* Rates */
39 /*********/
40 
41 static struct ieee80211_rate ath10k_rates[] = {
42 	{ .bitrate = 10,
43 	  .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
44 	{ .bitrate = 20,
45 	  .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
46 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
47 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
48 	{ .bitrate = 55,
49 	  .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
50 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
51 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
52 	{ .bitrate = 110,
53 	  .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
54 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
55 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
56 
57 	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
58 	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
59 	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
60 	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
61 	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
62 	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
63 	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
64 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
65 };
66 
67 static struct ieee80211_rate ath10k_rates_rev2[] = {
68 	{ .bitrate = 10,
69 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
70 	{ .bitrate = 20,
71 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
72 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
73 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
74 	{ .bitrate = 55,
75 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
76 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
77 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
78 	{ .bitrate = 110,
79 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
80 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
81 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
82 
83 	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
84 	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
85 	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
86 	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
87 	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
88 	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
89 	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
90 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
91 };
92 
93 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
94 
95 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
96 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
97 			     ATH10K_MAC_FIRST_OFDM_RATE_IDX)
98 #define ath10k_g_rates (ath10k_rates + 0)
99 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
100 
101 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
102 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
103 
104 static bool ath10k_mac_bitrate_is_cck(int bitrate)
105 {
106 	switch (bitrate) {
107 	case 10:
108 	case 20:
109 	case 55:
110 	case 110:
111 		return true;
112 	}
113 
114 	return false;
115 }
116 
117 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
118 {
119 	return DIV_ROUND_UP(bitrate, 5) |
120 	       (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
121 }
122 
123 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
124 			     u8 hw_rate, bool cck)
125 {
126 	const struct ieee80211_rate *rate;
127 	int i;
128 
129 	for (i = 0; i < sband->n_bitrates; i++) {
130 		rate = &sband->bitrates[i];
131 
132 		if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
133 			continue;
134 
135 		if (rate->hw_value == hw_rate)
136 			return i;
137 		else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
138 			 rate->hw_value_short == hw_rate)
139 			return i;
140 	}
141 
142 	return 0;
143 }
144 
145 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
146 			     u32 bitrate)
147 {
148 	int i;
149 
150 	for (i = 0; i < sband->n_bitrates; i++)
151 		if (sband->bitrates[i].bitrate == bitrate)
152 			return i;
153 
154 	return 0;
155 }
156 
157 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
158 {
159 	switch ((mcs_map >> (2 * nss)) & 0x3) {
160 	case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
161 	case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
162 	case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
163 	}
164 	return 0;
165 }
166 
167 static u32
168 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
169 {
170 	int nss;
171 
172 	for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
173 		if (ht_mcs_mask[nss])
174 			return nss + 1;
175 
176 	return 1;
177 }
178 
179 static u32
180 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
181 {
182 	int nss;
183 
184 	for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
185 		if (vht_mcs_mask[nss])
186 			return nss + 1;
187 
188 	return 1;
189 }
190 
191 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
192 {
193 	enum wmi_host_platform_type platform_type;
194 	int ret;
195 
196 	if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
197 		platform_type = WMI_HOST_PLATFORM_LOW_PERF;
198 	else
199 		platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
200 
201 	ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
202 
203 	if (ret && ret != -EOPNOTSUPP) {
204 		ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
205 		return ret;
206 	}
207 
208 	return 0;
209 }
210 
211 /**********/
212 /* Crypto */
213 /**********/
214 
215 static int ath10k_send_key(struct ath10k_vif *arvif,
216 			   struct ieee80211_key_conf *key,
217 			   enum set_key_cmd cmd,
218 			   const u8 *macaddr, u32 flags)
219 {
220 	struct ath10k *ar = arvif->ar;
221 	struct wmi_vdev_install_key_arg arg = {
222 		.vdev_id = arvif->vdev_id,
223 		.key_idx = key->keyidx,
224 		.key_len = key->keylen,
225 		.key_data = key->key,
226 		.key_flags = flags,
227 		.macaddr = macaddr,
228 	};
229 
230 	lockdep_assert_held(&arvif->ar->conf_mutex);
231 
232 	switch (key->cipher) {
233 	case WLAN_CIPHER_SUITE_CCMP:
234 		arg.key_cipher = WMI_CIPHER_AES_CCM;
235 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
236 		break;
237 	case WLAN_CIPHER_SUITE_TKIP:
238 		arg.key_cipher = WMI_CIPHER_TKIP;
239 		arg.key_txmic_len = 8;
240 		arg.key_rxmic_len = 8;
241 		break;
242 	case WLAN_CIPHER_SUITE_WEP40:
243 	case WLAN_CIPHER_SUITE_WEP104:
244 		arg.key_cipher = WMI_CIPHER_WEP;
245 		break;
246 	case WLAN_CIPHER_SUITE_CCMP_256:
247 		arg.key_cipher = WMI_CIPHER_AES_CCM;
248 		break;
249 	case WLAN_CIPHER_SUITE_GCMP:
250 	case WLAN_CIPHER_SUITE_GCMP_256:
251 		arg.key_cipher = WMI_CIPHER_AES_GCM;
252 		break;
253 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
254 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
255 	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
256 	case WLAN_CIPHER_SUITE_AES_CMAC:
257 		WARN_ON(1);
258 		return -EINVAL;
259 	default:
260 		ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
261 		return -EOPNOTSUPP;
262 	}
263 
264 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
265 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
266 
267 	if (cmd == DISABLE_KEY) {
268 		arg.key_cipher = WMI_CIPHER_NONE;
269 		arg.key_data = NULL;
270 	}
271 
272 	return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
273 }
274 
275 static int ath10k_install_key(struct ath10k_vif *arvif,
276 			      struct ieee80211_key_conf *key,
277 			      enum set_key_cmd cmd,
278 			      const u8 *macaddr, u32 flags)
279 {
280 	struct ath10k *ar = arvif->ar;
281 	int ret;
282 	unsigned long time_left;
283 
284 	lockdep_assert_held(&ar->conf_mutex);
285 
286 	reinit_completion(&ar->install_key_done);
287 
288 	if (arvif->nohwcrypt)
289 		return 1;
290 
291 	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
292 	if (ret)
293 		return ret;
294 
295 	time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
296 	if (time_left == 0)
297 		return -ETIMEDOUT;
298 
299 	return 0;
300 }
301 
302 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
303 					const u8 *addr)
304 {
305 	struct ath10k *ar = arvif->ar;
306 	struct ath10k_peer *peer;
307 	int ret;
308 	int i;
309 	u32 flags;
310 
311 	lockdep_assert_held(&ar->conf_mutex);
312 
313 	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
314 		    arvif->vif->type != NL80211_IFTYPE_ADHOC &&
315 		    arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
316 		return -EINVAL;
317 
318 	spin_lock_bh(&ar->data_lock);
319 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
320 	spin_unlock_bh(&ar->data_lock);
321 
322 	if (!peer)
323 		return -ENOENT;
324 
325 	for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
326 		if (arvif->wep_keys[i] == NULL)
327 			continue;
328 
329 		switch (arvif->vif->type) {
330 		case NL80211_IFTYPE_AP:
331 			flags = WMI_KEY_PAIRWISE;
332 
333 			if (arvif->def_wep_key_idx == i)
334 				flags |= WMI_KEY_TX_USAGE;
335 
336 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
337 						 SET_KEY, addr, flags);
338 			if (ret < 0)
339 				return ret;
340 			break;
341 		case NL80211_IFTYPE_ADHOC:
342 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
343 						 SET_KEY, addr,
344 						 WMI_KEY_PAIRWISE);
345 			if (ret < 0)
346 				return ret;
347 
348 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
349 						 SET_KEY, addr, WMI_KEY_GROUP);
350 			if (ret < 0)
351 				return ret;
352 			break;
353 		default:
354 			WARN_ON(1);
355 			return -EINVAL;
356 		}
357 
358 		spin_lock_bh(&ar->data_lock);
359 		peer->keys[i] = arvif->wep_keys[i];
360 		spin_unlock_bh(&ar->data_lock);
361 	}
362 
363 	/* In some cases (notably with static WEP IBSS with multiple keys)
364 	 * multicast Tx becomes broken. Both pairwise and groupwise keys are
365 	 * installed already. Using WMI_KEY_TX_USAGE in different combinations
366 	 * didn't seem help. Using def_keyid vdev parameter seems to be
367 	 * effective so use that.
368 	 *
369 	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
370 	 */
371 	if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
372 		return 0;
373 
374 	if (arvif->def_wep_key_idx == -1)
375 		return 0;
376 
377 	ret = ath10k_wmi_vdev_set_param(arvif->ar,
378 					arvif->vdev_id,
379 					arvif->ar->wmi.vdev_param->def_keyid,
380 					arvif->def_wep_key_idx);
381 	if (ret) {
382 		ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
383 			    arvif->vdev_id, ret);
384 		return ret;
385 	}
386 
387 	return 0;
388 }
389 
390 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
391 				  const u8 *addr)
392 {
393 	struct ath10k *ar = arvif->ar;
394 	struct ath10k_peer *peer;
395 	int first_errno = 0;
396 	int ret;
397 	int i;
398 	u32 flags = 0;
399 
400 	lockdep_assert_held(&ar->conf_mutex);
401 
402 	spin_lock_bh(&ar->data_lock);
403 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
404 	spin_unlock_bh(&ar->data_lock);
405 
406 	if (!peer)
407 		return -ENOENT;
408 
409 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
410 		if (peer->keys[i] == NULL)
411 			continue;
412 
413 		/* key flags are not required to delete the key */
414 		ret = ath10k_install_key(arvif, peer->keys[i],
415 					 DISABLE_KEY, addr, flags);
416 		if (ret < 0 && first_errno == 0)
417 			first_errno = ret;
418 
419 		if (ret < 0)
420 			ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
421 				    i, ret);
422 
423 		spin_lock_bh(&ar->data_lock);
424 		peer->keys[i] = NULL;
425 		spin_unlock_bh(&ar->data_lock);
426 	}
427 
428 	return first_errno;
429 }
430 
431 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
432 				    u8 keyidx)
433 {
434 	struct ath10k_peer *peer;
435 	int i;
436 
437 	lockdep_assert_held(&ar->data_lock);
438 
439 	/* We don't know which vdev this peer belongs to,
440 	 * since WMI doesn't give us that information.
441 	 *
442 	 * FIXME: multi-bss needs to be handled.
443 	 */
444 	peer = ath10k_peer_find(ar, 0, addr);
445 	if (!peer)
446 		return false;
447 
448 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
449 		if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
450 			return true;
451 	}
452 
453 	return false;
454 }
455 
456 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
457 				 struct ieee80211_key_conf *key)
458 {
459 	struct ath10k *ar = arvif->ar;
460 	struct ath10k_peer *peer;
461 	u8 addr[ETH_ALEN];
462 	int first_errno = 0;
463 	int ret;
464 	int i;
465 	u32 flags = 0;
466 
467 	lockdep_assert_held(&ar->conf_mutex);
468 
469 	for (;;) {
470 		/* since ath10k_install_key we can't hold data_lock all the
471 		 * time, so we try to remove the keys incrementally
472 		 */
473 		spin_lock_bh(&ar->data_lock);
474 		i = 0;
475 		list_for_each_entry(peer, &ar->peers, list) {
476 			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
477 				if (peer->keys[i] == key) {
478 					ether_addr_copy(addr, peer->addr);
479 					peer->keys[i] = NULL;
480 					break;
481 				}
482 			}
483 
484 			if (i < ARRAY_SIZE(peer->keys))
485 				break;
486 		}
487 		spin_unlock_bh(&ar->data_lock);
488 
489 		if (i == ARRAY_SIZE(peer->keys))
490 			break;
491 		/* key flags are not required to delete the key */
492 		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
493 		if (ret < 0 && first_errno == 0)
494 			first_errno = ret;
495 
496 		if (ret)
497 			ath10k_warn(ar, "failed to remove key for %pM: %d\n",
498 				    addr, ret);
499 	}
500 
501 	return first_errno;
502 }
503 
504 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
505 					 struct ieee80211_key_conf *key)
506 {
507 	struct ath10k *ar = arvif->ar;
508 	struct ath10k_peer *peer;
509 	int ret;
510 
511 	lockdep_assert_held(&ar->conf_mutex);
512 
513 	list_for_each_entry(peer, &ar->peers, list) {
514 		if (ether_addr_equal(peer->addr, arvif->vif->addr))
515 			continue;
516 
517 		if (ether_addr_equal(peer->addr, arvif->bssid))
518 			continue;
519 
520 		if (peer->keys[key->keyidx] == key)
521 			continue;
522 
523 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
524 			   arvif->vdev_id, key->keyidx);
525 
526 		ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
527 		if (ret) {
528 			ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
529 				    arvif->vdev_id, peer->addr, ret);
530 			return ret;
531 		}
532 	}
533 
534 	return 0;
535 }
536 
537 /*********************/
538 /* General utilities */
539 /*********************/
540 
541 static inline enum wmi_phy_mode
542 chan_to_phymode(const struct cfg80211_chan_def *chandef)
543 {
544 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
545 
546 	switch (chandef->chan->band) {
547 	case NL80211_BAND_2GHZ:
548 		switch (chandef->width) {
549 		case NL80211_CHAN_WIDTH_20_NOHT:
550 			if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
551 				phymode = MODE_11B;
552 			else
553 				phymode = MODE_11G;
554 			break;
555 		case NL80211_CHAN_WIDTH_20:
556 			phymode = MODE_11NG_HT20;
557 			break;
558 		case NL80211_CHAN_WIDTH_40:
559 			phymode = MODE_11NG_HT40;
560 			break;
561 		case NL80211_CHAN_WIDTH_5:
562 		case NL80211_CHAN_WIDTH_10:
563 		case NL80211_CHAN_WIDTH_80:
564 		case NL80211_CHAN_WIDTH_80P80:
565 		case NL80211_CHAN_WIDTH_160:
566 			phymode = MODE_UNKNOWN;
567 			break;
568 		}
569 		break;
570 	case NL80211_BAND_5GHZ:
571 		switch (chandef->width) {
572 		case NL80211_CHAN_WIDTH_20_NOHT:
573 			phymode = MODE_11A;
574 			break;
575 		case NL80211_CHAN_WIDTH_20:
576 			phymode = MODE_11NA_HT20;
577 			break;
578 		case NL80211_CHAN_WIDTH_40:
579 			phymode = MODE_11NA_HT40;
580 			break;
581 		case NL80211_CHAN_WIDTH_80:
582 			phymode = MODE_11AC_VHT80;
583 			break;
584 		case NL80211_CHAN_WIDTH_160:
585 			phymode = MODE_11AC_VHT160;
586 			break;
587 		case NL80211_CHAN_WIDTH_80P80:
588 			phymode = MODE_11AC_VHT80_80;
589 			break;
590 		case NL80211_CHAN_WIDTH_5:
591 		case NL80211_CHAN_WIDTH_10:
592 			phymode = MODE_UNKNOWN;
593 			break;
594 		}
595 		break;
596 	default:
597 		break;
598 	}
599 
600 	WARN_ON(phymode == MODE_UNKNOWN);
601 	return phymode;
602 }
603 
604 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
605 {
606 /*
607  * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
608  *   0 for no restriction
609  *   1 for 1/4 us
610  *   2 for 1/2 us
611  *   3 for 1 us
612  *   4 for 2 us
613  *   5 for 4 us
614  *   6 for 8 us
615  *   7 for 16 us
616  */
617 	switch (mpdudensity) {
618 	case 0:
619 		return 0;
620 	case 1:
621 	case 2:
622 	case 3:
623 	/* Our lower layer calculations limit our precision to
624 	 * 1 microsecond
625 	 */
626 		return 1;
627 	case 4:
628 		return 2;
629 	case 5:
630 		return 4;
631 	case 6:
632 		return 8;
633 	case 7:
634 		return 16;
635 	default:
636 		return 0;
637 	}
638 }
639 
640 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
641 			struct cfg80211_chan_def *def)
642 {
643 	struct ieee80211_chanctx_conf *conf;
644 
645 	rcu_read_lock();
646 	conf = rcu_dereference(vif->chanctx_conf);
647 	if (!conf) {
648 		rcu_read_unlock();
649 		return -ENOENT;
650 	}
651 
652 	*def = conf->def;
653 	rcu_read_unlock();
654 
655 	return 0;
656 }
657 
658 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
659 					 struct ieee80211_chanctx_conf *conf,
660 					 void *data)
661 {
662 	int *num = data;
663 
664 	(*num)++;
665 }
666 
667 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
668 {
669 	int num = 0;
670 
671 	ieee80211_iter_chan_contexts_atomic(ar->hw,
672 					    ath10k_mac_num_chanctxs_iter,
673 					    &num);
674 
675 	return num;
676 }
677 
678 static void
679 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
680 				struct ieee80211_chanctx_conf *conf,
681 				void *data)
682 {
683 	struct cfg80211_chan_def **def = data;
684 
685 	*def = &conf->def;
686 }
687 
688 static int ath10k_peer_create(struct ath10k *ar,
689 			      struct ieee80211_vif *vif,
690 			      struct ieee80211_sta *sta,
691 			      u32 vdev_id,
692 			      const u8 *addr,
693 			      enum wmi_peer_type peer_type)
694 {
695 	struct ath10k_vif *arvif;
696 	struct ath10k_peer *peer;
697 	int num_peers = 0;
698 	int ret;
699 
700 	lockdep_assert_held(&ar->conf_mutex);
701 
702 	num_peers = ar->num_peers;
703 
704 	/* Each vdev consumes a peer entry as well */
705 	list_for_each_entry(arvif, &ar->arvifs, list)
706 		num_peers++;
707 
708 	if (num_peers >= ar->max_num_peers)
709 		return -ENOBUFS;
710 
711 	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
712 	if (ret) {
713 		ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
714 			    addr, vdev_id, ret);
715 		return ret;
716 	}
717 
718 	ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
719 	if (ret) {
720 		ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
721 			    addr, vdev_id, ret);
722 		return ret;
723 	}
724 
725 	spin_lock_bh(&ar->data_lock);
726 
727 	peer = ath10k_peer_find(ar, vdev_id, addr);
728 	if (!peer) {
729 		spin_unlock_bh(&ar->data_lock);
730 		ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
731 			    addr, vdev_id);
732 		ath10k_wmi_peer_delete(ar, vdev_id, addr);
733 		return -ENOENT;
734 	}
735 
736 	peer->vif = vif;
737 	peer->sta = sta;
738 
739 	spin_unlock_bh(&ar->data_lock);
740 
741 	ar->num_peers++;
742 
743 	return 0;
744 }
745 
746 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
747 {
748 	struct ath10k *ar = arvif->ar;
749 	u32 param;
750 	int ret;
751 
752 	param = ar->wmi.pdev_param->sta_kickout_th;
753 	ret = ath10k_wmi_pdev_set_param(ar, param,
754 					ATH10K_KICKOUT_THRESHOLD);
755 	if (ret) {
756 		ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
757 			    arvif->vdev_id, ret);
758 		return ret;
759 	}
760 
761 	param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
762 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
763 					ATH10K_KEEPALIVE_MIN_IDLE);
764 	if (ret) {
765 		ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
766 			    arvif->vdev_id, ret);
767 		return ret;
768 	}
769 
770 	param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
771 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
772 					ATH10K_KEEPALIVE_MAX_IDLE);
773 	if (ret) {
774 		ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
775 			    arvif->vdev_id, ret);
776 		return ret;
777 	}
778 
779 	param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
780 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
781 					ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
782 	if (ret) {
783 		ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
784 			    arvif->vdev_id, ret);
785 		return ret;
786 	}
787 
788 	return 0;
789 }
790 
791 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
792 {
793 	struct ath10k *ar = arvif->ar;
794 	u32 vdev_param;
795 
796 	vdev_param = ar->wmi.vdev_param->rts_threshold;
797 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
798 }
799 
800 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
801 {
802 	int ret;
803 
804 	lockdep_assert_held(&ar->conf_mutex);
805 
806 	ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
807 	if (ret)
808 		return ret;
809 
810 	ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
811 	if (ret)
812 		return ret;
813 
814 	ar->num_peers--;
815 
816 	return 0;
817 }
818 
819 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
820 {
821 	struct ath10k_peer *peer, *tmp;
822 	int peer_id;
823 	int i;
824 
825 	lockdep_assert_held(&ar->conf_mutex);
826 
827 	spin_lock_bh(&ar->data_lock);
828 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
829 		if (peer->vdev_id != vdev_id)
830 			continue;
831 
832 		ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
833 			    peer->addr, vdev_id);
834 
835 		for_each_set_bit(peer_id, peer->peer_ids,
836 				 ATH10K_MAX_NUM_PEER_IDS) {
837 			ar->peer_map[peer_id] = NULL;
838 		}
839 
840 		/* Double check that peer is properly un-referenced from
841 		 * the peer_map
842 		 */
843 		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
844 			if (ar->peer_map[i] == peer) {
845 				ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
846 					    peer->addr, peer, i);
847 				ar->peer_map[i] = NULL;
848 			}
849 		}
850 
851 		list_del(&peer->list);
852 		kfree(peer);
853 		ar->num_peers--;
854 	}
855 	spin_unlock_bh(&ar->data_lock);
856 }
857 
858 static void ath10k_peer_cleanup_all(struct ath10k *ar)
859 {
860 	struct ath10k_peer *peer, *tmp;
861 	int i;
862 
863 	lockdep_assert_held(&ar->conf_mutex);
864 
865 	spin_lock_bh(&ar->data_lock);
866 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
867 		list_del(&peer->list);
868 		kfree(peer);
869 	}
870 
871 	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
872 		ar->peer_map[i] = NULL;
873 
874 	spin_unlock_bh(&ar->data_lock);
875 
876 	ar->num_peers = 0;
877 	ar->num_stations = 0;
878 }
879 
880 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
881 				       struct ieee80211_sta *sta,
882 				       enum wmi_tdls_peer_state state)
883 {
884 	int ret;
885 	struct wmi_tdls_peer_update_cmd_arg arg = {};
886 	struct wmi_tdls_peer_capab_arg cap = {};
887 	struct wmi_channel_arg chan_arg = {};
888 
889 	lockdep_assert_held(&ar->conf_mutex);
890 
891 	arg.vdev_id = vdev_id;
892 	arg.peer_state = state;
893 	ether_addr_copy(arg.addr, sta->addr);
894 
895 	cap.peer_max_sp = sta->max_sp;
896 	cap.peer_uapsd_queues = sta->uapsd_queues;
897 
898 	if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
899 	    !sta->tdls_initiator)
900 		cap.is_peer_responder = 1;
901 
902 	ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
903 	if (ret) {
904 		ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
905 			    arg.addr, vdev_id, ret);
906 		return ret;
907 	}
908 
909 	return 0;
910 }
911 
912 /************************/
913 /* Interface management */
914 /************************/
915 
916 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
917 {
918 	struct ath10k *ar = arvif->ar;
919 
920 	lockdep_assert_held(&ar->data_lock);
921 
922 	if (!arvif->beacon)
923 		return;
924 
925 	if (!arvif->beacon_buf)
926 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
927 				 arvif->beacon->len, DMA_TO_DEVICE);
928 
929 	if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
930 		    arvif->beacon_state != ATH10K_BEACON_SENT))
931 		return;
932 
933 	dev_kfree_skb_any(arvif->beacon);
934 
935 	arvif->beacon = NULL;
936 	arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
937 }
938 
939 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
940 {
941 	struct ath10k *ar = arvif->ar;
942 
943 	lockdep_assert_held(&ar->data_lock);
944 
945 	ath10k_mac_vif_beacon_free(arvif);
946 
947 	if (arvif->beacon_buf) {
948 		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
949 				  arvif->beacon_buf, arvif->beacon_paddr);
950 		arvif->beacon_buf = NULL;
951 	}
952 }
953 
954 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
955 {
956 	unsigned long time_left;
957 
958 	lockdep_assert_held(&ar->conf_mutex);
959 
960 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
961 		return -ESHUTDOWN;
962 
963 	time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
964 						ATH10K_VDEV_SETUP_TIMEOUT_HZ);
965 	if (time_left == 0)
966 		return -ETIMEDOUT;
967 
968 	return 0;
969 }
970 
971 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
972 {
973 	struct cfg80211_chan_def *chandef = NULL;
974 	struct ieee80211_channel *channel = NULL;
975 	struct wmi_vdev_start_request_arg arg = {};
976 	int ret = 0;
977 
978 	lockdep_assert_held(&ar->conf_mutex);
979 
980 	ieee80211_iter_chan_contexts_atomic(ar->hw,
981 					    ath10k_mac_get_any_chandef_iter,
982 					    &chandef);
983 	if (WARN_ON_ONCE(!chandef))
984 		return -ENOENT;
985 
986 	channel = chandef->chan;
987 
988 	arg.vdev_id = vdev_id;
989 	arg.channel.freq = channel->center_freq;
990 	arg.channel.band_center_freq1 = chandef->center_freq1;
991 	arg.channel.band_center_freq2 = chandef->center_freq2;
992 
993 	/* TODO setup this dynamically, what in case we
994 	 * don't have any vifs?
995 	 */
996 	arg.channel.mode = chan_to_phymode(chandef);
997 	arg.channel.chan_radar =
998 			!!(channel->flags & IEEE80211_CHAN_RADAR);
999 
1000 	arg.channel.min_power = 0;
1001 	arg.channel.max_power = channel->max_power * 2;
1002 	arg.channel.max_reg_power = channel->max_reg_power * 2;
1003 	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
1004 
1005 	reinit_completion(&ar->vdev_setup_done);
1006 
1007 	ret = ath10k_wmi_vdev_start(ar, &arg);
1008 	if (ret) {
1009 		ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
1010 			    vdev_id, ret);
1011 		return ret;
1012 	}
1013 
1014 	ret = ath10k_vdev_setup_sync(ar);
1015 	if (ret) {
1016 		ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
1017 			    vdev_id, ret);
1018 		return ret;
1019 	}
1020 
1021 	ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
1022 	if (ret) {
1023 		ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
1024 			    vdev_id, ret);
1025 		goto vdev_stop;
1026 	}
1027 
1028 	ar->monitor_vdev_id = vdev_id;
1029 
1030 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1031 		   ar->monitor_vdev_id);
1032 	return 0;
1033 
1034 vdev_stop:
1035 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1036 	if (ret)
1037 		ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1038 			    ar->monitor_vdev_id, ret);
1039 
1040 	return ret;
1041 }
1042 
1043 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1044 {
1045 	int ret = 0;
1046 
1047 	lockdep_assert_held(&ar->conf_mutex);
1048 
1049 	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1050 	if (ret)
1051 		ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1052 			    ar->monitor_vdev_id, ret);
1053 
1054 	reinit_completion(&ar->vdev_setup_done);
1055 
1056 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1057 	if (ret)
1058 		ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
1059 			    ar->monitor_vdev_id, ret);
1060 
1061 	ret = ath10k_vdev_setup_sync(ar);
1062 	if (ret)
1063 		ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1064 			    ar->monitor_vdev_id, ret);
1065 
1066 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1067 		   ar->monitor_vdev_id);
1068 	return ret;
1069 }
1070 
1071 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1072 {
1073 	int bit, ret = 0;
1074 
1075 	lockdep_assert_held(&ar->conf_mutex);
1076 
1077 	if (ar->free_vdev_map == 0) {
1078 		ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1079 		return -ENOMEM;
1080 	}
1081 
1082 	bit = __ffs64(ar->free_vdev_map);
1083 
1084 	ar->monitor_vdev_id = bit;
1085 
1086 	ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1087 				     WMI_VDEV_TYPE_MONITOR,
1088 				     0, ar->mac_addr);
1089 	if (ret) {
1090 		ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1091 			    ar->monitor_vdev_id, ret);
1092 		return ret;
1093 	}
1094 
1095 	ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1096 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1097 		   ar->monitor_vdev_id);
1098 
1099 	return 0;
1100 }
1101 
1102 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1103 {
1104 	int ret = 0;
1105 
1106 	lockdep_assert_held(&ar->conf_mutex);
1107 
1108 	ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1109 	if (ret) {
1110 		ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1111 			    ar->monitor_vdev_id, ret);
1112 		return ret;
1113 	}
1114 
1115 	ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1116 
1117 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1118 		   ar->monitor_vdev_id);
1119 	return ret;
1120 }
1121 
1122 static int ath10k_monitor_start(struct ath10k *ar)
1123 {
1124 	int ret;
1125 
1126 	lockdep_assert_held(&ar->conf_mutex);
1127 
1128 	ret = ath10k_monitor_vdev_create(ar);
1129 	if (ret) {
1130 		ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1131 		return ret;
1132 	}
1133 
1134 	ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1135 	if (ret) {
1136 		ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1137 		ath10k_monitor_vdev_delete(ar);
1138 		return ret;
1139 	}
1140 
1141 	ar->monitor_started = true;
1142 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1143 
1144 	return 0;
1145 }
1146 
1147 static int ath10k_monitor_stop(struct ath10k *ar)
1148 {
1149 	int ret;
1150 
1151 	lockdep_assert_held(&ar->conf_mutex);
1152 
1153 	ret = ath10k_monitor_vdev_stop(ar);
1154 	if (ret) {
1155 		ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1156 		return ret;
1157 	}
1158 
1159 	ret = ath10k_monitor_vdev_delete(ar);
1160 	if (ret) {
1161 		ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1162 		return ret;
1163 	}
1164 
1165 	ar->monitor_started = false;
1166 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1167 
1168 	return 0;
1169 }
1170 
1171 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1172 {
1173 	int num_ctx;
1174 
1175 	/* At least one chanctx is required to derive a channel to start
1176 	 * monitor vdev on.
1177 	 */
1178 	num_ctx = ath10k_mac_num_chanctxs(ar);
1179 	if (num_ctx == 0)
1180 		return false;
1181 
1182 	/* If there's already an existing special monitor interface then don't
1183 	 * bother creating another monitor vdev.
1184 	 */
1185 	if (ar->monitor_arvif)
1186 		return false;
1187 
1188 	return ar->monitor ||
1189 	       (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
1190 			  ar->running_fw->fw_file.fw_features) &&
1191 		(ar->filter_flags & FIF_OTHER_BSS)) ||
1192 	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1193 }
1194 
1195 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1196 {
1197 	int num_ctx;
1198 
1199 	num_ctx = ath10k_mac_num_chanctxs(ar);
1200 
1201 	/* FIXME: Current interface combinations and cfg80211/mac80211 code
1202 	 * shouldn't allow this but make sure to prevent handling the following
1203 	 * case anyway since multi-channel DFS hasn't been tested at all.
1204 	 */
1205 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1206 		return false;
1207 
1208 	return true;
1209 }
1210 
1211 static int ath10k_monitor_recalc(struct ath10k *ar)
1212 {
1213 	bool needed;
1214 	bool allowed;
1215 	int ret;
1216 
1217 	lockdep_assert_held(&ar->conf_mutex);
1218 
1219 	needed = ath10k_mac_monitor_vdev_is_needed(ar);
1220 	allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1221 
1222 	ath10k_dbg(ar, ATH10K_DBG_MAC,
1223 		   "mac monitor recalc started? %d needed? %d allowed? %d\n",
1224 		   ar->monitor_started, needed, allowed);
1225 
1226 	if (WARN_ON(needed && !allowed)) {
1227 		if (ar->monitor_started) {
1228 			ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1229 
1230 			ret = ath10k_monitor_stop(ar);
1231 			if (ret)
1232 				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1233 					    ret);
1234 				/* not serious */
1235 		}
1236 
1237 		return -EPERM;
1238 	}
1239 
1240 	if (needed == ar->monitor_started)
1241 		return 0;
1242 
1243 	if (needed)
1244 		return ath10k_monitor_start(ar);
1245 	else
1246 		return ath10k_monitor_stop(ar);
1247 }
1248 
1249 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
1250 {
1251 	struct ath10k *ar = arvif->ar;
1252 
1253 	lockdep_assert_held(&ar->conf_mutex);
1254 
1255 	if (!arvif->is_started) {
1256 		ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
1257 		return false;
1258 	}
1259 
1260 	return true;
1261 }
1262 
1263 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
1264 {
1265 	struct ath10k *ar = arvif->ar;
1266 	u32 vdev_param;
1267 
1268 	lockdep_assert_held(&ar->conf_mutex);
1269 
1270 	vdev_param = ar->wmi.vdev_param->protection_mode;
1271 
1272 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
1273 		   arvif->vdev_id, arvif->use_cts_prot);
1274 
1275 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1276 					 arvif->use_cts_prot ? 1 : 0);
1277 }
1278 
1279 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1280 {
1281 	struct ath10k *ar = arvif->ar;
1282 	u32 vdev_param, rts_cts = 0;
1283 
1284 	lockdep_assert_held(&ar->conf_mutex);
1285 
1286 	vdev_param = ar->wmi.vdev_param->enable_rtscts;
1287 
1288 	rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1289 
1290 	if (arvif->num_legacy_stations > 0)
1291 		rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1292 			      WMI_RTSCTS_PROFILE);
1293 	else
1294 		rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1295 			      WMI_RTSCTS_PROFILE);
1296 
1297 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
1298 		   arvif->vdev_id, rts_cts);
1299 
1300 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1301 					 rts_cts);
1302 }
1303 
1304 static int ath10k_start_cac(struct ath10k *ar)
1305 {
1306 	int ret;
1307 
1308 	lockdep_assert_held(&ar->conf_mutex);
1309 
1310 	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1311 
1312 	ret = ath10k_monitor_recalc(ar);
1313 	if (ret) {
1314 		ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1315 		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1316 		return ret;
1317 	}
1318 
1319 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1320 		   ar->monitor_vdev_id);
1321 
1322 	return 0;
1323 }
1324 
1325 static int ath10k_stop_cac(struct ath10k *ar)
1326 {
1327 	lockdep_assert_held(&ar->conf_mutex);
1328 
1329 	/* CAC is not running - do nothing */
1330 	if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1331 		return 0;
1332 
1333 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1334 	ath10k_monitor_stop(ar);
1335 
1336 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1337 
1338 	return 0;
1339 }
1340 
1341 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1342 				      struct ieee80211_chanctx_conf *conf,
1343 				      void *data)
1344 {
1345 	bool *ret = data;
1346 
1347 	if (!*ret && conf->radar_enabled)
1348 		*ret = true;
1349 }
1350 
1351 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1352 {
1353 	bool has_radar = false;
1354 
1355 	ieee80211_iter_chan_contexts_atomic(ar->hw,
1356 					    ath10k_mac_has_radar_iter,
1357 					    &has_radar);
1358 
1359 	return has_radar;
1360 }
1361 
1362 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1363 {
1364 	int ret;
1365 
1366 	lockdep_assert_held(&ar->conf_mutex);
1367 
1368 	ath10k_stop_cac(ar);
1369 
1370 	if (!ath10k_mac_has_radar_enabled(ar))
1371 		return;
1372 
1373 	if (ar->num_started_vdevs > 0)
1374 		return;
1375 
1376 	ret = ath10k_start_cac(ar);
1377 	if (ret) {
1378 		/*
1379 		 * Not possible to start CAC on current channel so starting
1380 		 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1381 		 * by indicating that radar was detected.
1382 		 */
1383 		ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1384 		ieee80211_radar_detected(ar->hw);
1385 	}
1386 }
1387 
1388 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1389 {
1390 	struct ath10k *ar = arvif->ar;
1391 	int ret;
1392 
1393 	lockdep_assert_held(&ar->conf_mutex);
1394 
1395 	reinit_completion(&ar->vdev_setup_done);
1396 
1397 	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1398 	if (ret) {
1399 		ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1400 			    arvif->vdev_id, ret);
1401 		return ret;
1402 	}
1403 
1404 	ret = ath10k_vdev_setup_sync(ar);
1405 	if (ret) {
1406 		ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n",
1407 			    arvif->vdev_id, ret);
1408 		return ret;
1409 	}
1410 
1411 	WARN_ON(ar->num_started_vdevs == 0);
1412 
1413 	if (ar->num_started_vdevs != 0) {
1414 		ar->num_started_vdevs--;
1415 		ath10k_recalc_radar_detection(ar);
1416 	}
1417 
1418 	return ret;
1419 }
1420 
1421 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1422 				     const struct cfg80211_chan_def *chandef,
1423 				     bool restart)
1424 {
1425 	struct ath10k *ar = arvif->ar;
1426 	struct wmi_vdev_start_request_arg arg = {};
1427 	int ret = 0;
1428 
1429 	lockdep_assert_held(&ar->conf_mutex);
1430 
1431 	reinit_completion(&ar->vdev_setup_done);
1432 
1433 	arg.vdev_id = arvif->vdev_id;
1434 	arg.dtim_period = arvif->dtim_period;
1435 	arg.bcn_intval = arvif->beacon_interval;
1436 
1437 	arg.channel.freq = chandef->chan->center_freq;
1438 	arg.channel.band_center_freq1 = chandef->center_freq1;
1439 	arg.channel.band_center_freq2 = chandef->center_freq2;
1440 	arg.channel.mode = chan_to_phymode(chandef);
1441 
1442 	arg.channel.min_power = 0;
1443 	arg.channel.max_power = chandef->chan->max_power * 2;
1444 	arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1445 	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1446 
1447 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1448 		arg.ssid = arvif->u.ap.ssid;
1449 		arg.ssid_len = arvif->u.ap.ssid_len;
1450 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1451 
1452 		/* For now allow DFS for AP mode */
1453 		arg.channel.chan_radar =
1454 			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1455 	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1456 		arg.ssid = arvif->vif->bss_conf.ssid;
1457 		arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1458 	}
1459 
1460 	ath10k_dbg(ar, ATH10K_DBG_MAC,
1461 		   "mac vdev %d start center_freq %d phymode %s\n",
1462 		   arg.vdev_id, arg.channel.freq,
1463 		   ath10k_wmi_phymode_str(arg.channel.mode));
1464 
1465 	if (restart)
1466 		ret = ath10k_wmi_vdev_restart(ar, &arg);
1467 	else
1468 		ret = ath10k_wmi_vdev_start(ar, &arg);
1469 
1470 	if (ret) {
1471 		ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1472 			    arg.vdev_id, ret);
1473 		return ret;
1474 	}
1475 
1476 	ret = ath10k_vdev_setup_sync(ar);
1477 	if (ret) {
1478 		ath10k_warn(ar,
1479 			    "failed to synchronize setup for vdev %i restart %d: %d\n",
1480 			    arg.vdev_id, restart, ret);
1481 		return ret;
1482 	}
1483 
1484 	ar->num_started_vdevs++;
1485 	ath10k_recalc_radar_detection(ar);
1486 
1487 	return ret;
1488 }
1489 
1490 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1491 			     const struct cfg80211_chan_def *def)
1492 {
1493 	return ath10k_vdev_start_restart(arvif, def, false);
1494 }
1495 
1496 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1497 			       const struct cfg80211_chan_def *def)
1498 {
1499 	return ath10k_vdev_start_restart(arvif, def, true);
1500 }
1501 
1502 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1503 				       struct sk_buff *bcn)
1504 {
1505 	struct ath10k *ar = arvif->ar;
1506 	struct ieee80211_mgmt *mgmt;
1507 	const u8 *p2p_ie;
1508 	int ret;
1509 
1510 	if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1511 		return 0;
1512 
1513 	mgmt = (void *)bcn->data;
1514 	p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1515 					 mgmt->u.beacon.variable,
1516 					 bcn->len - (mgmt->u.beacon.variable -
1517 						     bcn->data));
1518 	if (!p2p_ie)
1519 		return -ENOENT;
1520 
1521 	ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1522 	if (ret) {
1523 		ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1524 			    arvif->vdev_id, ret);
1525 		return ret;
1526 	}
1527 
1528 	return 0;
1529 }
1530 
1531 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1532 				       u8 oui_type, size_t ie_offset)
1533 {
1534 	size_t len;
1535 	const u8 *next;
1536 	const u8 *end;
1537 	u8 *ie;
1538 
1539 	if (WARN_ON(skb->len < ie_offset))
1540 		return -EINVAL;
1541 
1542 	ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1543 					   skb->data + ie_offset,
1544 					   skb->len - ie_offset);
1545 	if (!ie)
1546 		return -ENOENT;
1547 
1548 	len = ie[1] + 2;
1549 	end = skb->data + skb->len;
1550 	next = ie + len;
1551 
1552 	if (WARN_ON(next > end))
1553 		return -EINVAL;
1554 
1555 	memmove(ie, next, end - next);
1556 	skb_trim(skb, skb->len - len);
1557 
1558 	return 0;
1559 }
1560 
1561 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1562 {
1563 	struct ath10k *ar = arvif->ar;
1564 	struct ieee80211_hw *hw = ar->hw;
1565 	struct ieee80211_vif *vif = arvif->vif;
1566 	struct ieee80211_mutable_offsets offs = {};
1567 	struct sk_buff *bcn;
1568 	int ret;
1569 
1570 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1571 		return 0;
1572 
1573 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1574 	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1575 		return 0;
1576 
1577 	bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1578 	if (!bcn) {
1579 		ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1580 		return -EPERM;
1581 	}
1582 
1583 	ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1584 	if (ret) {
1585 		ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1586 		kfree_skb(bcn);
1587 		return ret;
1588 	}
1589 
1590 	/* P2P IE is inserted by firmware automatically (as configured above)
1591 	 * so remove it from the base beacon template to avoid duplicate P2P
1592 	 * IEs in beacon frames.
1593 	 */
1594 	ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1595 				    offsetof(struct ieee80211_mgmt,
1596 					     u.beacon.variable));
1597 
1598 	ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1599 				  0, NULL, 0);
1600 	kfree_skb(bcn);
1601 
1602 	if (ret) {
1603 		ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1604 			    ret);
1605 		return ret;
1606 	}
1607 
1608 	return 0;
1609 }
1610 
1611 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1612 {
1613 	struct ath10k *ar = arvif->ar;
1614 	struct ieee80211_hw *hw = ar->hw;
1615 	struct ieee80211_vif *vif = arvif->vif;
1616 	struct sk_buff *prb;
1617 	int ret;
1618 
1619 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1620 		return 0;
1621 
1622 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1623 		return 0;
1624 
1625 	prb = ieee80211_proberesp_get(hw, vif);
1626 	if (!prb) {
1627 		ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1628 		return -EPERM;
1629 	}
1630 
1631 	ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1632 	kfree_skb(prb);
1633 
1634 	if (ret) {
1635 		ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1636 			    ret);
1637 		return ret;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1644 {
1645 	struct ath10k *ar = arvif->ar;
1646 	struct cfg80211_chan_def def;
1647 	int ret;
1648 
1649 	/* When originally vdev is started during assign_vif_chanctx() some
1650 	 * information is missing, notably SSID. Firmware revisions with beacon
1651 	 * offloading require the SSID to be provided during vdev (re)start to
1652 	 * handle hidden SSID properly.
1653 	 *
1654 	 * Vdev restart must be done after vdev has been both started and
1655 	 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1656 	 * deliver vdev restart response event causing timeouts during vdev
1657 	 * syncing in ath10k.
1658 	 *
1659 	 * Note: The vdev down/up and template reinstallation could be skipped
1660 	 * since only wmi-tlv firmware are known to have beacon offload and
1661 	 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1662 	 * response delivery. It's probably more robust to keep it as is.
1663 	 */
1664 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1665 		return 0;
1666 
1667 	if (WARN_ON(!arvif->is_started))
1668 		return -EINVAL;
1669 
1670 	if (WARN_ON(!arvif->is_up))
1671 		return -EINVAL;
1672 
1673 	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1674 		return -EINVAL;
1675 
1676 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1677 	if (ret) {
1678 		ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1679 			    arvif->vdev_id, ret);
1680 		return ret;
1681 	}
1682 
1683 	/* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1684 	 * firmware will crash upon vdev up.
1685 	 */
1686 
1687 	ret = ath10k_mac_setup_bcn_tmpl(arvif);
1688 	if (ret) {
1689 		ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1690 		return ret;
1691 	}
1692 
1693 	ret = ath10k_mac_setup_prb_tmpl(arvif);
1694 	if (ret) {
1695 		ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1696 		return ret;
1697 	}
1698 
1699 	ret = ath10k_vdev_restart(arvif, &def);
1700 	if (ret) {
1701 		ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1702 			    arvif->vdev_id, ret);
1703 		return ret;
1704 	}
1705 
1706 	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1707 				 arvif->bssid);
1708 	if (ret) {
1709 		ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1710 			    arvif->vdev_id, ret);
1711 		return ret;
1712 	}
1713 
1714 	return 0;
1715 }
1716 
1717 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1718 				     struct ieee80211_bss_conf *info)
1719 {
1720 	struct ath10k *ar = arvif->ar;
1721 	int ret = 0;
1722 
1723 	lockdep_assert_held(&arvif->ar->conf_mutex);
1724 
1725 	if (!info->enable_beacon) {
1726 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1727 		if (ret)
1728 			ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1729 				    arvif->vdev_id, ret);
1730 
1731 		arvif->is_up = false;
1732 
1733 		spin_lock_bh(&arvif->ar->data_lock);
1734 		ath10k_mac_vif_beacon_free(arvif);
1735 		spin_unlock_bh(&arvif->ar->data_lock);
1736 
1737 		return;
1738 	}
1739 
1740 	arvif->tx_seq_no = 0x1000;
1741 
1742 	arvif->aid = 0;
1743 	ether_addr_copy(arvif->bssid, info->bssid);
1744 
1745 	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1746 				 arvif->bssid);
1747 	if (ret) {
1748 		ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1749 			    arvif->vdev_id, ret);
1750 		return;
1751 	}
1752 
1753 	arvif->is_up = true;
1754 
1755 	ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1756 	if (ret) {
1757 		ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1758 			    arvif->vdev_id, ret);
1759 		return;
1760 	}
1761 
1762 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1763 }
1764 
1765 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1766 				struct ieee80211_bss_conf *info,
1767 				const u8 self_peer[ETH_ALEN])
1768 {
1769 	struct ath10k *ar = arvif->ar;
1770 	u32 vdev_param;
1771 	int ret = 0;
1772 
1773 	lockdep_assert_held(&arvif->ar->conf_mutex);
1774 
1775 	if (!info->ibss_joined) {
1776 		if (is_zero_ether_addr(arvif->bssid))
1777 			return;
1778 
1779 		eth_zero_addr(arvif->bssid);
1780 
1781 		return;
1782 	}
1783 
1784 	vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1785 	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1786 					ATH10K_DEFAULT_ATIM);
1787 	if (ret)
1788 		ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1789 			    arvif->vdev_id, ret);
1790 }
1791 
1792 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1793 {
1794 	struct ath10k *ar = arvif->ar;
1795 	u32 param;
1796 	u32 value;
1797 	int ret;
1798 
1799 	lockdep_assert_held(&arvif->ar->conf_mutex);
1800 
1801 	if (arvif->u.sta.uapsd)
1802 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1803 	else
1804 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1805 
1806 	param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1807 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1808 	if (ret) {
1809 		ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1810 			    value, arvif->vdev_id, ret);
1811 		return ret;
1812 	}
1813 
1814 	return 0;
1815 }
1816 
1817 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1818 {
1819 	struct ath10k *ar = arvif->ar;
1820 	u32 param;
1821 	u32 value;
1822 	int ret;
1823 
1824 	lockdep_assert_held(&arvif->ar->conf_mutex);
1825 
1826 	if (arvif->u.sta.uapsd)
1827 		value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1828 	else
1829 		value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1830 
1831 	param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1832 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1833 					  param, value);
1834 	if (ret) {
1835 		ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1836 			    value, arvif->vdev_id, ret);
1837 		return ret;
1838 	}
1839 
1840 	return 0;
1841 }
1842 
1843 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1844 {
1845 	struct ath10k_vif *arvif;
1846 	int num = 0;
1847 
1848 	lockdep_assert_held(&ar->conf_mutex);
1849 
1850 	list_for_each_entry(arvif, &ar->arvifs, list)
1851 		if (arvif->is_started)
1852 			num++;
1853 
1854 	return num;
1855 }
1856 
1857 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1858 {
1859 	struct ath10k *ar = arvif->ar;
1860 	struct ieee80211_vif *vif = arvif->vif;
1861 	struct ieee80211_conf *conf = &ar->hw->conf;
1862 	enum wmi_sta_powersave_param param;
1863 	enum wmi_sta_ps_mode psmode;
1864 	int ret;
1865 	int ps_timeout;
1866 	bool enable_ps;
1867 
1868 	lockdep_assert_held(&arvif->ar->conf_mutex);
1869 
1870 	if (arvif->vif->type != NL80211_IFTYPE_STATION)
1871 		return 0;
1872 
1873 	enable_ps = arvif->ps;
1874 
1875 	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1876 	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1877 		      ar->running_fw->fw_file.fw_features)) {
1878 		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1879 			    arvif->vdev_id);
1880 		enable_ps = false;
1881 	}
1882 
1883 	if (!arvif->is_started) {
1884 		/* mac80211 can update vif powersave state while disconnected.
1885 		 * Firmware doesn't behave nicely and consumes more power than
1886 		 * necessary if PS is disabled on a non-started vdev. Hence
1887 		 * force-enable PS for non-running vdevs.
1888 		 */
1889 		psmode = WMI_STA_PS_MODE_ENABLED;
1890 	} else if (enable_ps) {
1891 		psmode = WMI_STA_PS_MODE_ENABLED;
1892 		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1893 
1894 		ps_timeout = conf->dynamic_ps_timeout;
1895 		if (ps_timeout == 0) {
1896 			/* Firmware doesn't like 0 */
1897 			ps_timeout = ieee80211_tu_to_usec(
1898 				vif->bss_conf.beacon_int) / 1000;
1899 		}
1900 
1901 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1902 						  ps_timeout);
1903 		if (ret) {
1904 			ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1905 				    arvif->vdev_id, ret);
1906 			return ret;
1907 		}
1908 	} else {
1909 		psmode = WMI_STA_PS_MODE_DISABLED;
1910 	}
1911 
1912 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1913 		   arvif->vdev_id, psmode ? "enable" : "disable");
1914 
1915 	ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1916 	if (ret) {
1917 		ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1918 			    psmode, arvif->vdev_id, ret);
1919 		return ret;
1920 	}
1921 
1922 	return 0;
1923 }
1924 
1925 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1926 {
1927 	struct ath10k *ar = arvif->ar;
1928 	struct wmi_sta_keepalive_arg arg = {};
1929 	int ret;
1930 
1931 	lockdep_assert_held(&arvif->ar->conf_mutex);
1932 
1933 	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1934 		return 0;
1935 
1936 	if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1937 		return 0;
1938 
1939 	/* Some firmware revisions have a bug and ignore the `enabled` field.
1940 	 * Instead use the interval to disable the keepalive.
1941 	 */
1942 	arg.vdev_id = arvif->vdev_id;
1943 	arg.enabled = 1;
1944 	arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1945 	arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1946 
1947 	ret = ath10k_wmi_sta_keepalive(ar, &arg);
1948 	if (ret) {
1949 		ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1950 			    arvif->vdev_id, ret);
1951 		return ret;
1952 	}
1953 
1954 	return 0;
1955 }
1956 
1957 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1958 {
1959 	struct ath10k *ar = arvif->ar;
1960 	struct ieee80211_vif *vif = arvif->vif;
1961 	int ret;
1962 
1963 	lockdep_assert_held(&arvif->ar->conf_mutex);
1964 
1965 	if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1966 		return;
1967 
1968 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1969 		return;
1970 
1971 	if (!vif->csa_active)
1972 		return;
1973 
1974 	if (!arvif->is_up)
1975 		return;
1976 
1977 	if (!ieee80211_csa_is_complete(vif)) {
1978 		ieee80211_csa_update_counter(vif);
1979 
1980 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
1981 		if (ret)
1982 			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1983 				    ret);
1984 
1985 		ret = ath10k_mac_setup_prb_tmpl(arvif);
1986 		if (ret)
1987 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1988 				    ret);
1989 	} else {
1990 		ieee80211_csa_finish(vif);
1991 	}
1992 }
1993 
1994 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1995 {
1996 	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1997 						ap_csa_work);
1998 	struct ath10k *ar = arvif->ar;
1999 
2000 	mutex_lock(&ar->conf_mutex);
2001 	ath10k_mac_vif_ap_csa_count_down(arvif);
2002 	mutex_unlock(&ar->conf_mutex);
2003 }
2004 
2005 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
2006 					  struct ieee80211_vif *vif)
2007 {
2008 	struct sk_buff *skb = data;
2009 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
2010 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2011 
2012 	if (vif->type != NL80211_IFTYPE_STATION)
2013 		return;
2014 
2015 	if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
2016 		return;
2017 
2018 	cancel_delayed_work(&arvif->connection_loss_work);
2019 }
2020 
2021 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
2022 {
2023 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
2024 						   IEEE80211_IFACE_ITER_NORMAL,
2025 						   ath10k_mac_handle_beacon_iter,
2026 						   skb);
2027 }
2028 
2029 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
2030 					       struct ieee80211_vif *vif)
2031 {
2032 	u32 *vdev_id = data;
2033 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2034 	struct ath10k *ar = arvif->ar;
2035 	struct ieee80211_hw *hw = ar->hw;
2036 
2037 	if (arvif->vdev_id != *vdev_id)
2038 		return;
2039 
2040 	if (!arvif->is_up)
2041 		return;
2042 
2043 	ieee80211_beacon_loss(vif);
2044 
2045 	/* Firmware doesn't report beacon loss events repeatedly. If AP probe
2046 	 * (done by mac80211) succeeds but beacons do not resume then it
2047 	 * doesn't make sense to continue operation. Queue connection loss work
2048 	 * which can be cancelled when beacon is received.
2049 	 */
2050 	ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
2051 				     ATH10K_CONNECTION_LOSS_HZ);
2052 }
2053 
2054 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
2055 {
2056 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
2057 						   IEEE80211_IFACE_ITER_NORMAL,
2058 						   ath10k_mac_handle_beacon_miss_iter,
2059 						   &vdev_id);
2060 }
2061 
2062 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2063 {
2064 	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2065 						connection_loss_work.work);
2066 	struct ieee80211_vif *vif = arvif->vif;
2067 
2068 	if (!arvif->is_up)
2069 		return;
2070 
2071 	ieee80211_connection_loss(vif);
2072 }
2073 
2074 /**********************/
2075 /* Station management */
2076 /**********************/
2077 
2078 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2079 					     struct ieee80211_vif *vif)
2080 {
2081 	/* Some firmware revisions have unstable STA powersave when listen
2082 	 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2083 	 * generate NullFunc frames properly even if buffered frames have been
2084 	 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2085 	 * buffered frames. Often pinging the device from AP would simply fail.
2086 	 *
2087 	 * As a workaround set it to 1.
2088 	 */
2089 	if (vif->type == NL80211_IFTYPE_STATION)
2090 		return 1;
2091 
2092 	return ar->hw->conf.listen_interval;
2093 }
2094 
2095 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2096 				      struct ieee80211_vif *vif,
2097 				      struct ieee80211_sta *sta,
2098 				      struct wmi_peer_assoc_complete_arg *arg)
2099 {
2100 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2101 	u32 aid;
2102 
2103 	lockdep_assert_held(&ar->conf_mutex);
2104 
2105 	if (vif->type == NL80211_IFTYPE_STATION)
2106 		aid = vif->bss_conf.aid;
2107 	else
2108 		aid = sta->aid;
2109 
2110 	ether_addr_copy(arg->addr, sta->addr);
2111 	arg->vdev_id = arvif->vdev_id;
2112 	arg->peer_aid = aid;
2113 	arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2114 	arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2115 	arg->peer_num_spatial_streams = 1;
2116 	arg->peer_caps = vif->bss_conf.assoc_capability;
2117 }
2118 
2119 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2120 				       struct ieee80211_vif *vif,
2121 				       struct ieee80211_sta *sta,
2122 				       struct wmi_peer_assoc_complete_arg *arg)
2123 {
2124 	struct ieee80211_bss_conf *info = &vif->bss_conf;
2125 	struct cfg80211_chan_def def;
2126 	struct cfg80211_bss *bss;
2127 	const u8 *rsnie = NULL;
2128 	const u8 *wpaie = NULL;
2129 
2130 	lockdep_assert_held(&ar->conf_mutex);
2131 
2132 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2133 		return;
2134 
2135 	bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2136 			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2137 	if (bss) {
2138 		const struct cfg80211_bss_ies *ies;
2139 
2140 		rcu_read_lock();
2141 		rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2142 
2143 		ies = rcu_dereference(bss->ies);
2144 
2145 		wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2146 						WLAN_OUI_TYPE_MICROSOFT_WPA,
2147 						ies->data,
2148 						ies->len);
2149 		rcu_read_unlock();
2150 		cfg80211_put_bss(ar->hw->wiphy, bss);
2151 	}
2152 
2153 	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
2154 	if (rsnie || wpaie) {
2155 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2156 		arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2157 	}
2158 
2159 	if (wpaie) {
2160 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2161 		arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2162 	}
2163 
2164 	if (sta->mfp &&
2165 	    test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2166 		     ar->running_fw->fw_file.fw_features)) {
2167 		arg->peer_flags |= ar->wmi.peer_flags->pmf;
2168 	}
2169 }
2170 
2171 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2172 				      struct ieee80211_vif *vif,
2173 				      struct ieee80211_sta *sta,
2174 				      struct wmi_peer_assoc_complete_arg *arg)
2175 {
2176 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2177 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2178 	struct cfg80211_chan_def def;
2179 	const struct ieee80211_supported_band *sband;
2180 	const struct ieee80211_rate *rates;
2181 	enum nl80211_band band;
2182 	u32 ratemask;
2183 	u8 rate;
2184 	int i;
2185 
2186 	lockdep_assert_held(&ar->conf_mutex);
2187 
2188 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2189 		return;
2190 
2191 	band = def.chan->band;
2192 	sband = ar->hw->wiphy->bands[band];
2193 	ratemask = sta->supp_rates[band];
2194 	ratemask &= arvif->bitrate_mask.control[band].legacy;
2195 	rates = sband->bitrates;
2196 
2197 	rateset->num_rates = 0;
2198 
2199 	for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2200 		if (!(ratemask & 1))
2201 			continue;
2202 
2203 		rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2204 		rateset->rates[rateset->num_rates] = rate;
2205 		rateset->num_rates++;
2206 	}
2207 }
2208 
2209 static bool
2210 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2211 {
2212 	int nss;
2213 
2214 	for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2215 		if (ht_mcs_mask[nss])
2216 			return false;
2217 
2218 	return true;
2219 }
2220 
2221 static bool
2222 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2223 {
2224 	int nss;
2225 
2226 	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2227 		if (vht_mcs_mask[nss])
2228 			return false;
2229 
2230 	return true;
2231 }
2232 
2233 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2234 				   struct ieee80211_vif *vif,
2235 				   struct ieee80211_sta *sta,
2236 				   struct wmi_peer_assoc_complete_arg *arg)
2237 {
2238 	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2239 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2240 	struct cfg80211_chan_def def;
2241 	enum nl80211_band band;
2242 	const u8 *ht_mcs_mask;
2243 	const u16 *vht_mcs_mask;
2244 	int i, n;
2245 	u8 max_nss;
2246 	u32 stbc;
2247 
2248 	lockdep_assert_held(&ar->conf_mutex);
2249 
2250 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2251 		return;
2252 
2253 	if (!ht_cap->ht_supported)
2254 		return;
2255 
2256 	band = def.chan->band;
2257 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2258 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2259 
2260 	if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2261 	    ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2262 		return;
2263 
2264 	arg->peer_flags |= ar->wmi.peer_flags->ht;
2265 	arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2266 				    ht_cap->ampdu_factor)) - 1;
2267 
2268 	arg->peer_mpdu_density =
2269 		ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2270 
2271 	arg->peer_ht_caps = ht_cap->cap;
2272 	arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2273 
2274 	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2275 		arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2276 
2277 	if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2278 		arg->peer_flags |= ar->wmi.peer_flags->bw40;
2279 		arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2280 	}
2281 
2282 	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2283 		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2284 			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2285 
2286 		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2287 			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2288 	}
2289 
2290 	if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2291 		arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2292 		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2293 	}
2294 
2295 	if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2296 		stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2297 		stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2298 		stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2299 		arg->peer_rate_caps |= stbc;
2300 		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2301 	}
2302 
2303 	if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2304 		arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2305 	else if (ht_cap->mcs.rx_mask[1])
2306 		arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2307 
2308 	for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2309 		if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2310 		    (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2311 			max_nss = (i / 8) + 1;
2312 			arg->peer_ht_rates.rates[n++] = i;
2313 		}
2314 
2315 	/*
2316 	 * This is a workaround for HT-enabled STAs which break the spec
2317 	 * and have no HT capabilities RX mask (no HT RX MCS map).
2318 	 *
2319 	 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2320 	 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2321 	 *
2322 	 * Firmware asserts if such situation occurs.
2323 	 */
2324 	if (n == 0) {
2325 		arg->peer_ht_rates.num_rates = 8;
2326 		for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2327 			arg->peer_ht_rates.rates[i] = i;
2328 	} else {
2329 		arg->peer_ht_rates.num_rates = n;
2330 		arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2331 	}
2332 
2333 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2334 		   arg->addr,
2335 		   arg->peer_ht_rates.num_rates,
2336 		   arg->peer_num_spatial_streams);
2337 }
2338 
2339 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2340 				    struct ath10k_vif *arvif,
2341 				    struct ieee80211_sta *sta)
2342 {
2343 	u32 uapsd = 0;
2344 	u32 max_sp = 0;
2345 	int ret = 0;
2346 
2347 	lockdep_assert_held(&ar->conf_mutex);
2348 
2349 	if (sta->wme && sta->uapsd_queues) {
2350 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2351 			   sta->uapsd_queues, sta->max_sp);
2352 
2353 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2354 			uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2355 				 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2356 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2357 			uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2358 				 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2359 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2360 			uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2361 				 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2362 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2363 			uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2364 				 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2365 
2366 		if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2367 			max_sp = sta->max_sp;
2368 
2369 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2370 						 sta->addr,
2371 						 WMI_AP_PS_PEER_PARAM_UAPSD,
2372 						 uapsd);
2373 		if (ret) {
2374 			ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2375 				    arvif->vdev_id, ret);
2376 			return ret;
2377 		}
2378 
2379 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2380 						 sta->addr,
2381 						 WMI_AP_PS_PEER_PARAM_MAX_SP,
2382 						 max_sp);
2383 		if (ret) {
2384 			ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2385 				    arvif->vdev_id, ret);
2386 			return ret;
2387 		}
2388 
2389 		/* TODO setup this based on STA listen interval and
2390 		 * beacon interval. Currently we don't know
2391 		 * sta->listen_interval - mac80211 patch required.
2392 		 * Currently use 10 seconds
2393 		 */
2394 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2395 						 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2396 						 10);
2397 		if (ret) {
2398 			ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2399 				    arvif->vdev_id, ret);
2400 			return ret;
2401 		}
2402 	}
2403 
2404 	return 0;
2405 }
2406 
2407 static u16
2408 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2409 			      const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2410 {
2411 	int idx_limit;
2412 	int nss;
2413 	u16 mcs_map;
2414 	u16 mcs;
2415 
2416 	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2417 		mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2418 			  vht_mcs_limit[nss];
2419 
2420 		if (mcs_map)
2421 			idx_limit = fls(mcs_map) - 1;
2422 		else
2423 			idx_limit = -1;
2424 
2425 		switch (idx_limit) {
2426 		case 0: /* fall through */
2427 		case 1: /* fall through */
2428 		case 2: /* fall through */
2429 		case 3: /* fall through */
2430 		case 4: /* fall through */
2431 		case 5: /* fall through */
2432 		case 6: /* fall through */
2433 		default:
2434 			/* see ath10k_mac_can_set_bitrate_mask() */
2435 			WARN_ON(1);
2436 			/* fall through */
2437 		case -1:
2438 			mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2439 			break;
2440 		case 7:
2441 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2442 			break;
2443 		case 8:
2444 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2445 			break;
2446 		case 9:
2447 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2448 			break;
2449 		}
2450 
2451 		tx_mcs_set &= ~(0x3 << (nss * 2));
2452 		tx_mcs_set |= mcs << (nss * 2);
2453 	}
2454 
2455 	return tx_mcs_set;
2456 }
2457 
2458 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2459 				    struct ieee80211_vif *vif,
2460 				    struct ieee80211_sta *sta,
2461 				    struct wmi_peer_assoc_complete_arg *arg)
2462 {
2463 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2464 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2465 	struct cfg80211_chan_def def;
2466 	enum nl80211_band band;
2467 	const u16 *vht_mcs_mask;
2468 	u8 ampdu_factor;
2469 	u8 max_nss, vht_mcs;
2470 	int i;
2471 
2472 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2473 		return;
2474 
2475 	if (!vht_cap->vht_supported)
2476 		return;
2477 
2478 	band = def.chan->band;
2479 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2480 
2481 	if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2482 		return;
2483 
2484 	arg->peer_flags |= ar->wmi.peer_flags->vht;
2485 
2486 	if (def.chan->band == NL80211_BAND_2GHZ)
2487 		arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2488 
2489 	arg->peer_vht_caps = vht_cap->cap;
2490 
2491 	ampdu_factor = (vht_cap->cap &
2492 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2493 		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2494 
2495 	/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2496 	 * zero in VHT IE. Using it would result in degraded throughput.
2497 	 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2498 	 * it if VHT max_mpdu is smaller.
2499 	 */
2500 	arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2501 				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2502 					ampdu_factor)) - 1);
2503 
2504 	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2505 		arg->peer_flags |= ar->wmi.peer_flags->bw80;
2506 
2507 	if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
2508 		arg->peer_flags |= ar->wmi.peer_flags->bw160;
2509 
2510 	/* Calculate peer NSS capability from VHT capabilities if STA
2511 	 * supports VHT.
2512 	 */
2513 	for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
2514 		vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
2515 			  (2 * i) & 3;
2516 
2517 		if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
2518 		    vht_mcs_mask[i])
2519 			max_nss = i + 1;
2520 	}
2521 	arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2522 	arg->peer_vht_rates.rx_max_rate =
2523 		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2524 	arg->peer_vht_rates.rx_mcs_set =
2525 		__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2526 	arg->peer_vht_rates.tx_max_rate =
2527 		__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2528 	arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2529 		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2530 
2531 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2532 		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2533 
2534 	if (arg->peer_vht_rates.rx_max_rate &&
2535 	    (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
2536 		switch (arg->peer_vht_rates.rx_max_rate) {
2537 		case 1560:
2538 			/* Must be 2x2 at 160Mhz is all it can do. */
2539 			arg->peer_bw_rxnss_override = 2;
2540 			break;
2541 		case 780:
2542 			/* Can only do 1x1 at 160Mhz (Long Guard Interval) */
2543 			arg->peer_bw_rxnss_override = 1;
2544 			break;
2545 		}
2546 	}
2547 }
2548 
2549 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2550 				    struct ieee80211_vif *vif,
2551 				    struct ieee80211_sta *sta,
2552 				    struct wmi_peer_assoc_complete_arg *arg)
2553 {
2554 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2555 
2556 	switch (arvif->vdev_type) {
2557 	case WMI_VDEV_TYPE_AP:
2558 		if (sta->wme)
2559 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2560 
2561 		if (sta->wme && sta->uapsd_queues) {
2562 			arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2563 			arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2564 		}
2565 		break;
2566 	case WMI_VDEV_TYPE_STA:
2567 		if (sta->wme)
2568 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2569 		break;
2570 	case WMI_VDEV_TYPE_IBSS:
2571 		if (sta->wme)
2572 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2573 		break;
2574 	default:
2575 		break;
2576 	}
2577 
2578 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2579 		   sta->addr, !!(arg->peer_flags &
2580 		   arvif->ar->wmi.peer_flags->qos));
2581 }
2582 
2583 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2584 {
2585 	return sta->supp_rates[NL80211_BAND_2GHZ] >>
2586 	       ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2587 }
2588 
2589 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
2590 						    struct ieee80211_sta *sta)
2591 {
2592 	if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
2593 		switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
2594 		case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
2595 			return MODE_11AC_VHT160;
2596 		case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
2597 			return MODE_11AC_VHT80_80;
2598 		default:
2599 			/* not sure if this is a valid case? */
2600 			return MODE_11AC_VHT160;
2601 		}
2602 	}
2603 
2604 	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2605 		return MODE_11AC_VHT80;
2606 
2607 	if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2608 		return MODE_11AC_VHT40;
2609 
2610 	if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2611 		return MODE_11AC_VHT20;
2612 
2613 	return MODE_UNKNOWN;
2614 }
2615 
2616 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2617 					struct ieee80211_vif *vif,
2618 					struct ieee80211_sta *sta,
2619 					struct wmi_peer_assoc_complete_arg *arg)
2620 {
2621 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2622 	struct cfg80211_chan_def def;
2623 	enum nl80211_band band;
2624 	const u8 *ht_mcs_mask;
2625 	const u16 *vht_mcs_mask;
2626 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
2627 
2628 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2629 		return;
2630 
2631 	band = def.chan->band;
2632 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2633 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2634 
2635 	switch (band) {
2636 	case NL80211_BAND_2GHZ:
2637 		if (sta->vht_cap.vht_supported &&
2638 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2639 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2640 				phymode = MODE_11AC_VHT40;
2641 			else
2642 				phymode = MODE_11AC_VHT20;
2643 		} else if (sta->ht_cap.ht_supported &&
2644 			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2645 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2646 				phymode = MODE_11NG_HT40;
2647 			else
2648 				phymode = MODE_11NG_HT20;
2649 		} else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2650 			phymode = MODE_11G;
2651 		} else {
2652 			phymode = MODE_11B;
2653 		}
2654 
2655 		break;
2656 	case NL80211_BAND_5GHZ:
2657 		/*
2658 		 * Check VHT first.
2659 		 */
2660 		if (sta->vht_cap.vht_supported &&
2661 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2662 			phymode = ath10k_mac_get_phymode_vht(ar, sta);
2663 		} else if (sta->ht_cap.ht_supported &&
2664 			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2665 			if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2666 				phymode = MODE_11NA_HT40;
2667 			else
2668 				phymode = MODE_11NA_HT20;
2669 		} else {
2670 			phymode = MODE_11A;
2671 		}
2672 
2673 		break;
2674 	default:
2675 		break;
2676 	}
2677 
2678 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2679 		   sta->addr, ath10k_wmi_phymode_str(phymode));
2680 
2681 	arg->peer_phymode = phymode;
2682 	WARN_ON(phymode == MODE_UNKNOWN);
2683 }
2684 
2685 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2686 				     struct ieee80211_vif *vif,
2687 				     struct ieee80211_sta *sta,
2688 				     struct wmi_peer_assoc_complete_arg *arg)
2689 {
2690 	lockdep_assert_held(&ar->conf_mutex);
2691 
2692 	memset(arg, 0, sizeof(*arg));
2693 
2694 	ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2695 	ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2696 	ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2697 	ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2698 	ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2699 	ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2700 	ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2701 
2702 	return 0;
2703 }
2704 
2705 static const u32 ath10k_smps_map[] = {
2706 	[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2707 	[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2708 	[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2709 	[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2710 };
2711 
2712 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2713 				  const u8 *addr,
2714 				  const struct ieee80211_sta_ht_cap *ht_cap)
2715 {
2716 	int smps;
2717 
2718 	if (!ht_cap->ht_supported)
2719 		return 0;
2720 
2721 	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2722 	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2723 
2724 	if (smps >= ARRAY_SIZE(ath10k_smps_map))
2725 		return -EINVAL;
2726 
2727 	return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2728 					 WMI_PEER_SMPS_STATE,
2729 					 ath10k_smps_map[smps]);
2730 }
2731 
2732 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2733 				      struct ieee80211_vif *vif,
2734 				      struct ieee80211_sta_vht_cap vht_cap)
2735 {
2736 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2737 	int ret;
2738 	u32 param;
2739 	u32 value;
2740 
2741 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2742 		return 0;
2743 
2744 	if (!(ar->vht_cap_info &
2745 	      (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2746 	       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2747 	       IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2748 	       IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2749 		return 0;
2750 
2751 	param = ar->wmi.vdev_param->txbf;
2752 	value = 0;
2753 
2754 	if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2755 		return 0;
2756 
2757 	/* The following logic is correct. If a remote STA advertises support
2758 	 * for being a beamformer then we should enable us being a beamformee.
2759 	 */
2760 
2761 	if (ar->vht_cap_info &
2762 	    (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2763 	     IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2764 		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2765 			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2766 
2767 		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2768 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2769 	}
2770 
2771 	if (ar->vht_cap_info &
2772 	    (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2773 	     IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2774 		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2775 			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2776 
2777 		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2778 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2779 	}
2780 
2781 	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2782 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2783 
2784 	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2785 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2786 
2787 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2788 	if (ret) {
2789 		ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2790 			    value, ret);
2791 		return ret;
2792 	}
2793 
2794 	return 0;
2795 }
2796 
2797 /* can be called only in mac80211 callbacks due to `key_count` usage */
2798 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2799 			     struct ieee80211_vif *vif,
2800 			     struct ieee80211_bss_conf *bss_conf)
2801 {
2802 	struct ath10k *ar = hw->priv;
2803 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2804 	struct ieee80211_sta_ht_cap ht_cap;
2805 	struct ieee80211_sta_vht_cap vht_cap;
2806 	struct wmi_peer_assoc_complete_arg peer_arg;
2807 	struct ieee80211_sta *ap_sta;
2808 	int ret;
2809 
2810 	lockdep_assert_held(&ar->conf_mutex);
2811 
2812 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2813 		   arvif->vdev_id, arvif->bssid, arvif->aid);
2814 
2815 	rcu_read_lock();
2816 
2817 	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2818 	if (!ap_sta) {
2819 		ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2820 			    bss_conf->bssid, arvif->vdev_id);
2821 		rcu_read_unlock();
2822 		return;
2823 	}
2824 
2825 	/* ap_sta must be accessed only within rcu section which must be left
2826 	 * before calling ath10k_setup_peer_smps() which might sleep.
2827 	 */
2828 	ht_cap = ap_sta->ht_cap;
2829 	vht_cap = ap_sta->vht_cap;
2830 
2831 	ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2832 	if (ret) {
2833 		ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2834 			    bss_conf->bssid, arvif->vdev_id, ret);
2835 		rcu_read_unlock();
2836 		return;
2837 	}
2838 
2839 	rcu_read_unlock();
2840 
2841 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2842 	if (ret) {
2843 		ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2844 			    bss_conf->bssid, arvif->vdev_id, ret);
2845 		return;
2846 	}
2847 
2848 	ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2849 	if (ret) {
2850 		ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2851 			    arvif->vdev_id, ret);
2852 		return;
2853 	}
2854 
2855 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2856 	if (ret) {
2857 		ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2858 			    arvif->vdev_id, bss_conf->bssid, ret);
2859 		return;
2860 	}
2861 
2862 	ath10k_dbg(ar, ATH10K_DBG_MAC,
2863 		   "mac vdev %d up (associated) bssid %pM aid %d\n",
2864 		   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2865 
2866 	WARN_ON(arvif->is_up);
2867 
2868 	arvif->aid = bss_conf->aid;
2869 	ether_addr_copy(arvif->bssid, bss_conf->bssid);
2870 
2871 	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2872 	if (ret) {
2873 		ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2874 			    arvif->vdev_id, ret);
2875 		return;
2876 	}
2877 
2878 	arvif->is_up = true;
2879 
2880 	/* Workaround: Some firmware revisions (tested with qca6174
2881 	 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2882 	 * poked with peer param command.
2883 	 */
2884 	ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2885 					WMI_PEER_DUMMY_VAR, 1);
2886 	if (ret) {
2887 		ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2888 			    arvif->bssid, arvif->vdev_id, ret);
2889 		return;
2890 	}
2891 }
2892 
2893 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2894 				struct ieee80211_vif *vif)
2895 {
2896 	struct ath10k *ar = hw->priv;
2897 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2898 	struct ieee80211_sta_vht_cap vht_cap = {};
2899 	int ret;
2900 
2901 	lockdep_assert_held(&ar->conf_mutex);
2902 
2903 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2904 		   arvif->vdev_id, arvif->bssid);
2905 
2906 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2907 	if (ret)
2908 		ath10k_warn(ar, "failed to down vdev %i: %d\n",
2909 			    arvif->vdev_id, ret);
2910 
2911 	arvif->def_wep_key_idx = -1;
2912 
2913 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2914 	if (ret) {
2915 		ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2916 			    arvif->vdev_id, ret);
2917 		return;
2918 	}
2919 
2920 	arvif->is_up = false;
2921 
2922 	cancel_delayed_work_sync(&arvif->connection_loss_work);
2923 }
2924 
2925 static int ath10k_station_assoc(struct ath10k *ar,
2926 				struct ieee80211_vif *vif,
2927 				struct ieee80211_sta *sta,
2928 				bool reassoc)
2929 {
2930 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2931 	struct wmi_peer_assoc_complete_arg peer_arg;
2932 	int ret = 0;
2933 
2934 	lockdep_assert_held(&ar->conf_mutex);
2935 
2936 	ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2937 	if (ret) {
2938 		ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2939 			    sta->addr, arvif->vdev_id, ret);
2940 		return ret;
2941 	}
2942 
2943 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2944 	if (ret) {
2945 		ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2946 			    sta->addr, arvif->vdev_id, ret);
2947 		return ret;
2948 	}
2949 
2950 	/* Re-assoc is run only to update supported rates for given station. It
2951 	 * doesn't make much sense to reconfigure the peer completely.
2952 	 */
2953 	if (!reassoc) {
2954 		ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2955 					     &sta->ht_cap);
2956 		if (ret) {
2957 			ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2958 				    arvif->vdev_id, ret);
2959 			return ret;
2960 		}
2961 
2962 		ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2963 		if (ret) {
2964 			ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2965 				    sta->addr, arvif->vdev_id, ret);
2966 			return ret;
2967 		}
2968 
2969 		if (!sta->wme) {
2970 			arvif->num_legacy_stations++;
2971 			ret  = ath10k_recalc_rtscts_prot(arvif);
2972 			if (ret) {
2973 				ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2974 					    arvif->vdev_id, ret);
2975 				return ret;
2976 			}
2977 		}
2978 
2979 		/* Plumb cached keys only for static WEP */
2980 		if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) {
2981 			ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2982 			if (ret) {
2983 				ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2984 					    arvif->vdev_id, ret);
2985 				return ret;
2986 			}
2987 		}
2988 	}
2989 
2990 	return ret;
2991 }
2992 
2993 static int ath10k_station_disassoc(struct ath10k *ar,
2994 				   struct ieee80211_vif *vif,
2995 				   struct ieee80211_sta *sta)
2996 {
2997 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
2998 	int ret = 0;
2999 
3000 	lockdep_assert_held(&ar->conf_mutex);
3001 
3002 	if (!sta->wme) {
3003 		arvif->num_legacy_stations--;
3004 		ret = ath10k_recalc_rtscts_prot(arvif);
3005 		if (ret) {
3006 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
3007 				    arvif->vdev_id, ret);
3008 			return ret;
3009 		}
3010 	}
3011 
3012 	ret = ath10k_clear_peer_keys(arvif, sta->addr);
3013 	if (ret) {
3014 		ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
3015 			    arvif->vdev_id, ret);
3016 		return ret;
3017 	}
3018 
3019 	return ret;
3020 }
3021 
3022 /**************/
3023 /* Regulatory */
3024 /**************/
3025 
3026 static int ath10k_update_channel_list(struct ath10k *ar)
3027 {
3028 	struct ieee80211_hw *hw = ar->hw;
3029 	struct ieee80211_supported_band **bands;
3030 	enum nl80211_band band;
3031 	struct ieee80211_channel *channel;
3032 	struct wmi_scan_chan_list_arg arg = {0};
3033 	struct wmi_channel_arg *ch;
3034 	bool passive;
3035 	int len;
3036 	int ret;
3037 	int i;
3038 
3039 	lockdep_assert_held(&ar->conf_mutex);
3040 
3041 	bands = hw->wiphy->bands;
3042 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
3043 		if (!bands[band])
3044 			continue;
3045 
3046 		for (i = 0; i < bands[band]->n_channels; i++) {
3047 			if (bands[band]->channels[i].flags &
3048 			    IEEE80211_CHAN_DISABLED)
3049 				continue;
3050 
3051 			arg.n_channels++;
3052 		}
3053 	}
3054 
3055 	len = sizeof(struct wmi_channel_arg) * arg.n_channels;
3056 	arg.channels = kzalloc(len, GFP_KERNEL);
3057 	if (!arg.channels)
3058 		return -ENOMEM;
3059 
3060 	ch = arg.channels;
3061 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
3062 		if (!bands[band])
3063 			continue;
3064 
3065 		for (i = 0; i < bands[band]->n_channels; i++) {
3066 			channel = &bands[band]->channels[i];
3067 
3068 			if (channel->flags & IEEE80211_CHAN_DISABLED)
3069 				continue;
3070 
3071 			ch->allow_ht = true;
3072 
3073 			/* FIXME: when should we really allow VHT? */
3074 			ch->allow_vht = true;
3075 
3076 			ch->allow_ibss =
3077 				!(channel->flags & IEEE80211_CHAN_NO_IR);
3078 
3079 			ch->ht40plus =
3080 				!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
3081 
3082 			ch->chan_radar =
3083 				!!(channel->flags & IEEE80211_CHAN_RADAR);
3084 
3085 			passive = channel->flags & IEEE80211_CHAN_NO_IR;
3086 			ch->passive = passive;
3087 
3088 			ch->freq = channel->center_freq;
3089 			ch->band_center_freq1 = channel->center_freq;
3090 			ch->min_power = 0;
3091 			ch->max_power = channel->max_power * 2;
3092 			ch->max_reg_power = channel->max_reg_power * 2;
3093 			ch->max_antenna_gain = channel->max_antenna_gain * 2;
3094 			ch->reg_class_id = 0; /* FIXME */
3095 
3096 			/* FIXME: why use only legacy modes, why not any
3097 			 * HT/VHT modes? Would that even make any
3098 			 * difference?
3099 			 */
3100 			if (channel->band == NL80211_BAND_2GHZ)
3101 				ch->mode = MODE_11G;
3102 			else
3103 				ch->mode = MODE_11A;
3104 
3105 			if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
3106 				continue;
3107 
3108 			ath10k_dbg(ar, ATH10K_DBG_WMI,
3109 				   "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
3110 				    ch - arg.channels, arg.n_channels,
3111 				   ch->freq, ch->max_power, ch->max_reg_power,
3112 				   ch->max_antenna_gain, ch->mode);
3113 
3114 			ch++;
3115 		}
3116 	}
3117 
3118 	ret = ath10k_wmi_scan_chan_list(ar, &arg);
3119 	kfree(arg.channels);
3120 
3121 	return ret;
3122 }
3123 
3124 static enum wmi_dfs_region
3125 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3126 {
3127 	switch (dfs_region) {
3128 	case NL80211_DFS_UNSET:
3129 		return WMI_UNINIT_DFS_DOMAIN;
3130 	case NL80211_DFS_FCC:
3131 		return WMI_FCC_DFS_DOMAIN;
3132 	case NL80211_DFS_ETSI:
3133 		return WMI_ETSI_DFS_DOMAIN;
3134 	case NL80211_DFS_JP:
3135 		return WMI_MKK4_DFS_DOMAIN;
3136 	}
3137 	return WMI_UNINIT_DFS_DOMAIN;
3138 }
3139 
3140 static void ath10k_regd_update(struct ath10k *ar)
3141 {
3142 	struct reg_dmn_pair_mapping *regpair;
3143 	int ret;
3144 	enum wmi_dfs_region wmi_dfs_reg;
3145 	enum nl80211_dfs_regions nl_dfs_reg;
3146 
3147 	lockdep_assert_held(&ar->conf_mutex);
3148 
3149 	ret = ath10k_update_channel_list(ar);
3150 	if (ret)
3151 		ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3152 
3153 	regpair = ar->ath_common.regulatory.regpair;
3154 
3155 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3156 		nl_dfs_reg = ar->dfs_detector->region;
3157 		wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3158 	} else {
3159 		wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3160 	}
3161 
3162 	/* Target allows setting up per-band regdomain but ath_common provides
3163 	 * a combined one only
3164 	 */
3165 	ret = ath10k_wmi_pdev_set_regdomain(ar,
3166 					    regpair->reg_domain,
3167 					    regpair->reg_domain, /* 2ghz */
3168 					    regpair->reg_domain, /* 5ghz */
3169 					    regpair->reg_2ghz_ctl,
3170 					    regpair->reg_5ghz_ctl,
3171 					    wmi_dfs_reg);
3172 	if (ret)
3173 		ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3174 }
3175 
3176 static void ath10k_mac_update_channel_list(struct ath10k *ar,
3177 					   struct ieee80211_supported_band *band)
3178 {
3179 	int i;
3180 
3181 	if (ar->low_5ghz_chan && ar->high_5ghz_chan) {
3182 		for (i = 0; i < band->n_channels; i++) {
3183 			if (band->channels[i].center_freq < ar->low_5ghz_chan ||
3184 			    band->channels[i].center_freq > ar->high_5ghz_chan)
3185 				band->channels[i].flags |=
3186 					IEEE80211_CHAN_DISABLED;
3187 		}
3188 	}
3189 }
3190 
3191 static void ath10k_reg_notifier(struct wiphy *wiphy,
3192 				struct regulatory_request *request)
3193 {
3194 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3195 	struct ath10k *ar = hw->priv;
3196 	bool result;
3197 
3198 	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3199 
3200 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3201 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3202 			   request->dfs_region);
3203 		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3204 							  request->dfs_region);
3205 		if (!result)
3206 			ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3207 				    request->dfs_region);
3208 	}
3209 
3210 	mutex_lock(&ar->conf_mutex);
3211 	if (ar->state == ATH10K_STATE_ON)
3212 		ath10k_regd_update(ar);
3213 	mutex_unlock(&ar->conf_mutex);
3214 
3215 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
3216 		ath10k_mac_update_channel_list(ar,
3217 					       ar->hw->wiphy->bands[NL80211_BAND_5GHZ]);
3218 }
3219 
3220 /***************/
3221 /* TX handlers */
3222 /***************/
3223 
3224 enum ath10k_mac_tx_path {
3225 	ATH10K_MAC_TX_HTT,
3226 	ATH10K_MAC_TX_HTT_MGMT,
3227 	ATH10K_MAC_TX_WMI_MGMT,
3228 	ATH10K_MAC_TX_UNKNOWN,
3229 };
3230 
3231 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3232 {
3233 	lockdep_assert_held(&ar->htt.tx_lock);
3234 
3235 	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3236 	ar->tx_paused |= BIT(reason);
3237 	ieee80211_stop_queues(ar->hw);
3238 }
3239 
3240 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3241 				      struct ieee80211_vif *vif)
3242 {
3243 	struct ath10k *ar = data;
3244 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
3245 
3246 	if (arvif->tx_paused)
3247 		return;
3248 
3249 	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3250 }
3251 
3252 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3253 {
3254 	lockdep_assert_held(&ar->htt.tx_lock);
3255 
3256 	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3257 	ar->tx_paused &= ~BIT(reason);
3258 
3259 	if (ar->tx_paused)
3260 		return;
3261 
3262 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3263 						   IEEE80211_IFACE_ITER_RESUME_ALL,
3264 						   ath10k_mac_tx_unlock_iter,
3265 						   ar);
3266 
3267 	ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3268 }
3269 
3270 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3271 {
3272 	struct ath10k *ar = arvif->ar;
3273 
3274 	lockdep_assert_held(&ar->htt.tx_lock);
3275 
3276 	WARN_ON(reason >= BITS_PER_LONG);
3277 	arvif->tx_paused |= BIT(reason);
3278 	ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3279 }
3280 
3281 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3282 {
3283 	struct ath10k *ar = arvif->ar;
3284 
3285 	lockdep_assert_held(&ar->htt.tx_lock);
3286 
3287 	WARN_ON(reason >= BITS_PER_LONG);
3288 	arvif->tx_paused &= ~BIT(reason);
3289 
3290 	if (ar->tx_paused)
3291 		return;
3292 
3293 	if (arvif->tx_paused)
3294 		return;
3295 
3296 	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3297 }
3298 
3299 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3300 					   enum wmi_tlv_tx_pause_id pause_id,
3301 					   enum wmi_tlv_tx_pause_action action)
3302 {
3303 	struct ath10k *ar = arvif->ar;
3304 
3305 	lockdep_assert_held(&ar->htt.tx_lock);
3306 
3307 	switch (action) {
3308 	case WMI_TLV_TX_PAUSE_ACTION_STOP:
3309 		ath10k_mac_vif_tx_lock(arvif, pause_id);
3310 		break;
3311 	case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3312 		ath10k_mac_vif_tx_unlock(arvif, pause_id);
3313 		break;
3314 	default:
3315 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
3316 			   "received unknown tx pause action %d on vdev %i, ignoring\n",
3317 			    action, arvif->vdev_id);
3318 		break;
3319 	}
3320 }
3321 
3322 struct ath10k_mac_tx_pause {
3323 	u32 vdev_id;
3324 	enum wmi_tlv_tx_pause_id pause_id;
3325 	enum wmi_tlv_tx_pause_action action;
3326 };
3327 
3328 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3329 					    struct ieee80211_vif *vif)
3330 {
3331 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
3332 	struct ath10k_mac_tx_pause *arg = data;
3333 
3334 	if (arvif->vdev_id != arg->vdev_id)
3335 		return;
3336 
3337 	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3338 }
3339 
3340 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3341 				     enum wmi_tlv_tx_pause_id pause_id,
3342 				     enum wmi_tlv_tx_pause_action action)
3343 {
3344 	struct ath10k_mac_tx_pause arg = {
3345 		.vdev_id = vdev_id,
3346 		.pause_id = pause_id,
3347 		.action = action,
3348 	};
3349 
3350 	spin_lock_bh(&ar->htt.tx_lock);
3351 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3352 						   IEEE80211_IFACE_ITER_RESUME_ALL,
3353 						   ath10k_mac_handle_tx_pause_iter,
3354 						   &arg);
3355 	spin_unlock_bh(&ar->htt.tx_lock);
3356 }
3357 
3358 static enum ath10k_hw_txrx_mode
3359 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3360 			   struct ieee80211_vif *vif,
3361 			   struct ieee80211_sta *sta,
3362 			   struct sk_buff *skb)
3363 {
3364 	const struct ieee80211_hdr *hdr = (void *)skb->data;
3365 	__le16 fc = hdr->frame_control;
3366 
3367 	if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3368 		return ATH10K_HW_TXRX_RAW;
3369 
3370 	if (ieee80211_is_mgmt(fc))
3371 		return ATH10K_HW_TXRX_MGMT;
3372 
3373 	/* Workaround:
3374 	 *
3375 	 * NullFunc frames are mostly used to ping if a client or AP are still
3376 	 * reachable and responsive. This implies tx status reports must be
3377 	 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3378 	 * come to a conclusion that the other end disappeared and tear down
3379 	 * BSS connection or it can never disconnect from BSS/client (which is
3380 	 * the case).
3381 	 *
3382 	 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3383 	 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3384 	 * which seems to deliver correct tx reports for NullFunc frames. The
3385 	 * downside of using it is it ignores client powersave state so it can
3386 	 * end up disconnecting sleeping clients in AP mode. It should fix STA
3387 	 * mode though because AP don't sleep.
3388 	 */
3389 	if (ar->htt.target_version_major < 3 &&
3390 	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3391 	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3392 		      ar->running_fw->fw_file.fw_features))
3393 		return ATH10K_HW_TXRX_MGMT;
3394 
3395 	/* Workaround:
3396 	 *
3397 	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3398 	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3399 	 * to work with Ethernet txmode so use it.
3400 	 *
3401 	 * FIXME: Check if raw mode works with TDLS.
3402 	 */
3403 	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3404 		return ATH10K_HW_TXRX_ETHERNET;
3405 
3406 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3407 		return ATH10K_HW_TXRX_RAW;
3408 
3409 	return ATH10K_HW_TXRX_NATIVE_WIFI;
3410 }
3411 
3412 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3413 				     struct sk_buff *skb)
3414 {
3415 	const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3416 	const struct ieee80211_hdr *hdr = (void *)skb->data;
3417 	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3418 			 IEEE80211_TX_CTL_INJECTED;
3419 
3420 	if (!ieee80211_has_protected(hdr->frame_control))
3421 		return false;
3422 
3423 	if ((info->flags & mask) == mask)
3424 		return false;
3425 
3426 	if (vif)
3427 		return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
3428 
3429 	return true;
3430 }
3431 
3432 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3433  * Control in the header.
3434  */
3435 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3436 {
3437 	struct ieee80211_hdr *hdr = (void *)skb->data;
3438 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3439 	u8 *qos_ctl;
3440 
3441 	if (!ieee80211_is_data_qos(hdr->frame_control))
3442 		return;
3443 
3444 	qos_ctl = ieee80211_get_qos_ctl(hdr);
3445 	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3446 		skb->data, (void *)qos_ctl - (void *)skb->data);
3447 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3448 
3449 	/* Some firmware revisions don't handle sending QoS NullFunc well.
3450 	 * These frames are mainly used for CQM purposes so it doesn't really
3451 	 * matter whether QoS NullFunc or NullFunc are sent.
3452 	 */
3453 	hdr = (void *)skb->data;
3454 	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3455 		cb->flags &= ~ATH10K_SKB_F_QOS;
3456 
3457 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3458 }
3459 
3460 static void ath10k_tx_h_8023(struct sk_buff *skb)
3461 {
3462 	struct ieee80211_hdr *hdr;
3463 	struct rfc1042_hdr *rfc1042;
3464 	struct ethhdr *eth;
3465 	size_t hdrlen;
3466 	u8 da[ETH_ALEN];
3467 	u8 sa[ETH_ALEN];
3468 	__be16 type;
3469 
3470 	hdr = (void *)skb->data;
3471 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
3472 	rfc1042 = (void *)skb->data + hdrlen;
3473 
3474 	ether_addr_copy(da, ieee80211_get_DA(hdr));
3475 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
3476 	type = rfc1042->snap_type;
3477 
3478 	skb_pull(skb, hdrlen + sizeof(*rfc1042));
3479 	skb_push(skb, sizeof(*eth));
3480 
3481 	eth = (void *)skb->data;
3482 	ether_addr_copy(eth->h_dest, da);
3483 	ether_addr_copy(eth->h_source, sa);
3484 	eth->h_proto = type;
3485 }
3486 
3487 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3488 				       struct ieee80211_vif *vif,
3489 				       struct sk_buff *skb)
3490 {
3491 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3492 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
3493 
3494 	/* This is case only for P2P_GO */
3495 	if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3496 		return;
3497 
3498 	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3499 		spin_lock_bh(&ar->data_lock);
3500 		if (arvif->u.ap.noa_data)
3501 			if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3502 					      GFP_ATOMIC))
3503 				skb_put_data(skb, arvif->u.ap.noa_data,
3504 					     arvif->u.ap.noa_len);
3505 		spin_unlock_bh(&ar->data_lock);
3506 	}
3507 }
3508 
3509 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3510 				    struct ieee80211_vif *vif,
3511 				    struct ieee80211_txq *txq,
3512 				    struct sk_buff *skb)
3513 {
3514 	struct ieee80211_hdr *hdr = (void *)skb->data;
3515 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3516 
3517 	cb->flags = 0;
3518 	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3519 		cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3520 
3521 	if (ieee80211_is_mgmt(hdr->frame_control))
3522 		cb->flags |= ATH10K_SKB_F_MGMT;
3523 
3524 	if (ieee80211_is_data_qos(hdr->frame_control))
3525 		cb->flags |= ATH10K_SKB_F_QOS;
3526 
3527 	cb->vif = vif;
3528 	cb->txq = txq;
3529 }
3530 
3531 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3532 {
3533 	/* FIXME: Not really sure since when the behaviour changed. At some
3534 	 * point new firmware stopped requiring creation of peer entries for
3535 	 * offchannel tx (and actually creating them causes issues with wmi-htc
3536 	 * tx credit replenishment and reliability). Assuming it's at least 3.4
3537 	 * because that's when the `freq` was introduced to TX_FRM HTT command.
3538 	 */
3539 	return (ar->htt.target_version_major >= 3 &&
3540 		ar->htt.target_version_minor >= 4 &&
3541 		ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3542 }
3543 
3544 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3545 {
3546 	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3547 	int ret = 0;
3548 
3549 	spin_lock_bh(&ar->data_lock);
3550 
3551 	if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3552 		ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3553 		ret = -ENOSPC;
3554 		goto unlock;
3555 	}
3556 
3557 	__skb_queue_tail(q, skb);
3558 	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3559 
3560 unlock:
3561 	spin_unlock_bh(&ar->data_lock);
3562 
3563 	return ret;
3564 }
3565 
3566 static enum ath10k_mac_tx_path
3567 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3568 			   struct sk_buff *skb,
3569 			   enum ath10k_hw_txrx_mode txmode)
3570 {
3571 	switch (txmode) {
3572 	case ATH10K_HW_TXRX_RAW:
3573 	case ATH10K_HW_TXRX_NATIVE_WIFI:
3574 	case ATH10K_HW_TXRX_ETHERNET:
3575 		return ATH10K_MAC_TX_HTT;
3576 	case ATH10K_HW_TXRX_MGMT:
3577 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3578 			     ar->running_fw->fw_file.fw_features) ||
3579 			     test_bit(WMI_SERVICE_MGMT_TX_WMI,
3580 				      ar->wmi.svc_map))
3581 			return ATH10K_MAC_TX_WMI_MGMT;
3582 		else if (ar->htt.target_version_major >= 3)
3583 			return ATH10K_MAC_TX_HTT;
3584 		else
3585 			return ATH10K_MAC_TX_HTT_MGMT;
3586 	}
3587 
3588 	return ATH10K_MAC_TX_UNKNOWN;
3589 }
3590 
3591 static int ath10k_mac_tx_submit(struct ath10k *ar,
3592 				enum ath10k_hw_txrx_mode txmode,
3593 				enum ath10k_mac_tx_path txpath,
3594 				struct sk_buff *skb)
3595 {
3596 	struct ath10k_htt *htt = &ar->htt;
3597 	int ret = -EINVAL;
3598 
3599 	switch (txpath) {
3600 	case ATH10K_MAC_TX_HTT:
3601 		ret = htt->tx_ops->htt_tx(htt, txmode, skb);
3602 		break;
3603 	case ATH10K_MAC_TX_HTT_MGMT:
3604 		ret = ath10k_htt_mgmt_tx(htt, skb);
3605 		break;
3606 	case ATH10K_MAC_TX_WMI_MGMT:
3607 		ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3608 		break;
3609 	case ATH10K_MAC_TX_UNKNOWN:
3610 		WARN_ON_ONCE(1);
3611 		ret = -EINVAL;
3612 		break;
3613 	}
3614 
3615 	if (ret) {
3616 		ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3617 			    ret);
3618 		ieee80211_free_txskb(ar->hw, skb);
3619 	}
3620 
3621 	return ret;
3622 }
3623 
3624 /* This function consumes the sk_buff regardless of return value as far as
3625  * caller is concerned so no freeing is necessary afterwards.
3626  */
3627 static int ath10k_mac_tx(struct ath10k *ar,
3628 			 struct ieee80211_vif *vif,
3629 			 enum ath10k_hw_txrx_mode txmode,
3630 			 enum ath10k_mac_tx_path txpath,
3631 			 struct sk_buff *skb)
3632 {
3633 	struct ieee80211_hw *hw = ar->hw;
3634 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3635 	int ret;
3636 
3637 	/* We should disable CCK RATE due to P2P */
3638 	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3639 		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3640 
3641 	switch (txmode) {
3642 	case ATH10K_HW_TXRX_MGMT:
3643 	case ATH10K_HW_TXRX_NATIVE_WIFI:
3644 		ath10k_tx_h_nwifi(hw, skb);
3645 		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3646 		ath10k_tx_h_seq_no(vif, skb);
3647 		break;
3648 	case ATH10K_HW_TXRX_ETHERNET:
3649 		ath10k_tx_h_8023(skb);
3650 		break;
3651 	case ATH10K_HW_TXRX_RAW:
3652 		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3653 			WARN_ON_ONCE(1);
3654 			ieee80211_free_txskb(hw, skb);
3655 			return -ENOTSUPP;
3656 		}
3657 	}
3658 
3659 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3660 		if (!ath10k_mac_tx_frm_has_freq(ar)) {
3661 			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
3662 				   skb);
3663 
3664 			skb_queue_tail(&ar->offchan_tx_queue, skb);
3665 			ieee80211_queue_work(hw, &ar->offchan_tx_work);
3666 			return 0;
3667 		}
3668 	}
3669 
3670 	ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3671 	if (ret) {
3672 		ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3673 		return ret;
3674 	}
3675 
3676 	return 0;
3677 }
3678 
3679 void ath10k_offchan_tx_purge(struct ath10k *ar)
3680 {
3681 	struct sk_buff *skb;
3682 
3683 	for (;;) {
3684 		skb = skb_dequeue(&ar->offchan_tx_queue);
3685 		if (!skb)
3686 			break;
3687 
3688 		ieee80211_free_txskb(ar->hw, skb);
3689 	}
3690 }
3691 
3692 void ath10k_offchan_tx_work(struct work_struct *work)
3693 {
3694 	struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3695 	struct ath10k_peer *peer;
3696 	struct ath10k_vif *arvif;
3697 	enum ath10k_hw_txrx_mode txmode;
3698 	enum ath10k_mac_tx_path txpath;
3699 	struct ieee80211_hdr *hdr;
3700 	struct ieee80211_vif *vif;
3701 	struct ieee80211_sta *sta;
3702 	struct sk_buff *skb;
3703 	const u8 *peer_addr;
3704 	int vdev_id;
3705 	int ret;
3706 	unsigned long time_left;
3707 	bool tmp_peer_created = false;
3708 
3709 	/* FW requirement: We must create a peer before FW will send out
3710 	 * an offchannel frame. Otherwise the frame will be stuck and
3711 	 * never transmitted. We delete the peer upon tx completion.
3712 	 * It is unlikely that a peer for offchannel tx will already be
3713 	 * present. However it may be in some rare cases so account for that.
3714 	 * Otherwise we might remove a legitimate peer and break stuff.
3715 	 */
3716 
3717 	for (;;) {
3718 		skb = skb_dequeue(&ar->offchan_tx_queue);
3719 		if (!skb)
3720 			break;
3721 
3722 		mutex_lock(&ar->conf_mutex);
3723 
3724 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
3725 			   skb);
3726 
3727 		hdr = (struct ieee80211_hdr *)skb->data;
3728 		peer_addr = ieee80211_get_DA(hdr);
3729 
3730 		spin_lock_bh(&ar->data_lock);
3731 		vdev_id = ar->scan.vdev_id;
3732 		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3733 		spin_unlock_bh(&ar->data_lock);
3734 
3735 		if (peer)
3736 			/* FIXME: should this use ath10k_warn()? */
3737 			ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3738 				   peer_addr, vdev_id);
3739 
3740 		if (!peer) {
3741 			ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3742 						 peer_addr,
3743 						 WMI_PEER_TYPE_DEFAULT);
3744 			if (ret)
3745 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3746 					    peer_addr, vdev_id, ret);
3747 			tmp_peer_created = (ret == 0);
3748 		}
3749 
3750 		spin_lock_bh(&ar->data_lock);
3751 		reinit_completion(&ar->offchan_tx_completed);
3752 		ar->offchan_tx_skb = skb;
3753 		spin_unlock_bh(&ar->data_lock);
3754 
3755 		/* It's safe to access vif and sta - conf_mutex guarantees that
3756 		 * sta_state() and remove_interface() are locked exclusively
3757 		 * out wrt to this offchannel worker.
3758 		 */
3759 		arvif = ath10k_get_arvif(ar, vdev_id);
3760 		if (arvif) {
3761 			vif = arvif->vif;
3762 			sta = ieee80211_find_sta(vif, peer_addr);
3763 		} else {
3764 			vif = NULL;
3765 			sta = NULL;
3766 		}
3767 
3768 		txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3769 		txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3770 
3771 		ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
3772 		if (ret) {
3773 			ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3774 				    ret);
3775 			/* not serious */
3776 		}
3777 
3778 		time_left =
3779 		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3780 		if (time_left == 0)
3781 			ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
3782 				    skb);
3783 
3784 		if (!peer && tmp_peer_created) {
3785 			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3786 			if (ret)
3787 				ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3788 					    peer_addr, vdev_id, ret);
3789 		}
3790 
3791 		mutex_unlock(&ar->conf_mutex);
3792 	}
3793 }
3794 
3795 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3796 {
3797 	struct sk_buff *skb;
3798 
3799 	for (;;) {
3800 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3801 		if (!skb)
3802 			break;
3803 
3804 		ieee80211_free_txskb(ar->hw, skb);
3805 	}
3806 }
3807 
3808 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3809 {
3810 	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3811 	struct sk_buff *skb;
3812 	dma_addr_t paddr;
3813 	int ret;
3814 
3815 	for (;;) {
3816 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3817 		if (!skb)
3818 			break;
3819 
3820 		if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
3821 			     ar->running_fw->fw_file.fw_features)) {
3822 			paddr = dma_map_single(ar->dev, skb->data,
3823 					       skb->len, DMA_TO_DEVICE);
3824 			if (!paddr)
3825 				continue;
3826 			ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
3827 			if (ret) {
3828 				ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
3829 					    ret);
3830 				dma_unmap_single(ar->dev, paddr, skb->len,
3831 						 DMA_FROM_DEVICE);
3832 				ieee80211_free_txskb(ar->hw, skb);
3833 			}
3834 		} else {
3835 			ret = ath10k_wmi_mgmt_tx(ar, skb);
3836 			if (ret) {
3837 				ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3838 					    ret);
3839 				ieee80211_free_txskb(ar->hw, skb);
3840 			}
3841 		}
3842 	}
3843 }
3844 
3845 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3846 {
3847 	struct ath10k_txq *artxq;
3848 
3849 	if (!txq)
3850 		return;
3851 
3852 	artxq = (void *)txq->drv_priv;
3853 	INIT_LIST_HEAD(&artxq->list);
3854 }
3855 
3856 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3857 {
3858 	struct ath10k_txq *artxq;
3859 	struct ath10k_skb_cb *cb;
3860 	struct sk_buff *msdu;
3861 	int msdu_id;
3862 
3863 	if (!txq)
3864 		return;
3865 
3866 	artxq = (void *)txq->drv_priv;
3867 	spin_lock_bh(&ar->txqs_lock);
3868 	if (!list_empty(&artxq->list))
3869 		list_del_init(&artxq->list);
3870 	spin_unlock_bh(&ar->txqs_lock);
3871 
3872 	spin_lock_bh(&ar->htt.tx_lock);
3873 	idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3874 		cb = ATH10K_SKB_CB(msdu);
3875 		if (cb->txq == txq)
3876 			cb->txq = NULL;
3877 	}
3878 	spin_unlock_bh(&ar->htt.tx_lock);
3879 }
3880 
3881 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3882 					    u16 peer_id,
3883 					    u8 tid)
3884 {
3885 	struct ath10k_peer *peer;
3886 
3887 	lockdep_assert_held(&ar->data_lock);
3888 
3889 	peer = ar->peer_map[peer_id];
3890 	if (!peer)
3891 		return NULL;
3892 
3893 	if (peer->removed)
3894 		return NULL;
3895 
3896 	if (peer->sta)
3897 		return peer->sta->txq[tid];
3898 	else if (peer->vif)
3899 		return peer->vif->txq;
3900 	else
3901 		return NULL;
3902 }
3903 
3904 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3905 				   struct ieee80211_txq *txq)
3906 {
3907 	struct ath10k *ar = hw->priv;
3908 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3909 
3910 	/* No need to get locks */
3911 
3912 	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3913 		return true;
3914 
3915 	if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3916 		return true;
3917 
3918 	if (artxq->num_fw_queued < artxq->num_push_allowed)
3919 		return true;
3920 
3921 	return false;
3922 }
3923 
3924 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3925 			   struct ieee80211_txq *txq)
3926 {
3927 	struct ath10k *ar = hw->priv;
3928 	struct ath10k_htt *htt = &ar->htt;
3929 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3930 	struct ieee80211_vif *vif = txq->vif;
3931 	struct ieee80211_sta *sta = txq->sta;
3932 	enum ath10k_hw_txrx_mode txmode;
3933 	enum ath10k_mac_tx_path txpath;
3934 	struct sk_buff *skb;
3935 	struct ieee80211_hdr *hdr;
3936 	size_t skb_len;
3937 	bool is_mgmt, is_presp;
3938 	int ret;
3939 
3940 	spin_lock_bh(&ar->htt.tx_lock);
3941 	ret = ath10k_htt_tx_inc_pending(htt);
3942 	spin_unlock_bh(&ar->htt.tx_lock);
3943 
3944 	if (ret)
3945 		return ret;
3946 
3947 	skb = ieee80211_tx_dequeue(hw, txq);
3948 	if (!skb) {
3949 		spin_lock_bh(&ar->htt.tx_lock);
3950 		ath10k_htt_tx_dec_pending(htt);
3951 		spin_unlock_bh(&ar->htt.tx_lock);
3952 
3953 		return -ENOENT;
3954 	}
3955 
3956 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3957 
3958 	skb_len = skb->len;
3959 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3960 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3961 	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
3962 
3963 	if (is_mgmt) {
3964 		hdr = (struct ieee80211_hdr *)skb->data;
3965 		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3966 
3967 		spin_lock_bh(&ar->htt.tx_lock);
3968 		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
3969 
3970 		if (ret) {
3971 			ath10k_htt_tx_dec_pending(htt);
3972 			spin_unlock_bh(&ar->htt.tx_lock);
3973 			return ret;
3974 		}
3975 		spin_unlock_bh(&ar->htt.tx_lock);
3976 	}
3977 
3978 	ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
3979 	if (unlikely(ret)) {
3980 		ath10k_warn(ar, "failed to push frame: %d\n", ret);
3981 
3982 		spin_lock_bh(&ar->htt.tx_lock);
3983 		ath10k_htt_tx_dec_pending(htt);
3984 		if (is_mgmt)
3985 			ath10k_htt_tx_mgmt_dec_pending(htt);
3986 		spin_unlock_bh(&ar->htt.tx_lock);
3987 
3988 		return ret;
3989 	}
3990 
3991 	spin_lock_bh(&ar->htt.tx_lock);
3992 	artxq->num_fw_queued++;
3993 	spin_unlock_bh(&ar->htt.tx_lock);
3994 
3995 	return skb_len;
3996 }
3997 
3998 void ath10k_mac_tx_push_pending(struct ath10k *ar)
3999 {
4000 	struct ieee80211_hw *hw = ar->hw;
4001 	struct ieee80211_txq *txq;
4002 	struct ath10k_txq *artxq;
4003 	struct ath10k_txq *last;
4004 	int ret;
4005 	int max;
4006 
4007 	if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
4008 		return;
4009 
4010 	spin_lock_bh(&ar->txqs_lock);
4011 	rcu_read_lock();
4012 
4013 	last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
4014 	while (!list_empty(&ar->txqs)) {
4015 		artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
4016 		txq = container_of((void *)artxq, struct ieee80211_txq,
4017 				   drv_priv);
4018 
4019 		/* Prevent aggressive sta/tid taking over tx queue */
4020 		max = 16;
4021 		ret = 0;
4022 		while (ath10k_mac_tx_can_push(hw, txq) && max--) {
4023 			ret = ath10k_mac_tx_push_txq(hw, txq);
4024 			if (ret < 0)
4025 				break;
4026 		}
4027 
4028 		list_del_init(&artxq->list);
4029 		if (ret != -ENOENT)
4030 			list_add_tail(&artxq->list, &ar->txqs);
4031 
4032 		ath10k_htt_tx_txq_update(hw, txq);
4033 
4034 		if (artxq == last || (ret < 0 && ret != -ENOENT))
4035 			break;
4036 	}
4037 
4038 	rcu_read_unlock();
4039 	spin_unlock_bh(&ar->txqs_lock);
4040 }
4041 
4042 /************/
4043 /* Scanning */
4044 /************/
4045 
4046 void __ath10k_scan_finish(struct ath10k *ar)
4047 {
4048 	lockdep_assert_held(&ar->data_lock);
4049 
4050 	switch (ar->scan.state) {
4051 	case ATH10K_SCAN_IDLE:
4052 		break;
4053 	case ATH10K_SCAN_RUNNING:
4054 	case ATH10K_SCAN_ABORTING:
4055 		if (!ar->scan.is_roc) {
4056 			struct cfg80211_scan_info info = {
4057 				.aborted = (ar->scan.state ==
4058 					    ATH10K_SCAN_ABORTING),
4059 			};
4060 
4061 			ieee80211_scan_completed(ar->hw, &info);
4062 		} else if (ar->scan.roc_notify) {
4063 			ieee80211_remain_on_channel_expired(ar->hw);
4064 		}
4065 		/* fall through */
4066 	case ATH10K_SCAN_STARTING:
4067 		ar->scan.state = ATH10K_SCAN_IDLE;
4068 		ar->scan_channel = NULL;
4069 		ar->scan.roc_freq = 0;
4070 		ath10k_offchan_tx_purge(ar);
4071 		cancel_delayed_work(&ar->scan.timeout);
4072 		complete(&ar->scan.completed);
4073 		break;
4074 	}
4075 }
4076 
4077 void ath10k_scan_finish(struct ath10k *ar)
4078 {
4079 	spin_lock_bh(&ar->data_lock);
4080 	__ath10k_scan_finish(ar);
4081 	spin_unlock_bh(&ar->data_lock);
4082 }
4083 
4084 static int ath10k_scan_stop(struct ath10k *ar)
4085 {
4086 	struct wmi_stop_scan_arg arg = {
4087 		.req_id = 1, /* FIXME */
4088 		.req_type = WMI_SCAN_STOP_ONE,
4089 		.u.scan_id = ATH10K_SCAN_ID,
4090 	};
4091 	int ret;
4092 
4093 	lockdep_assert_held(&ar->conf_mutex);
4094 
4095 	ret = ath10k_wmi_stop_scan(ar, &arg);
4096 	if (ret) {
4097 		ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
4098 		goto out;
4099 	}
4100 
4101 	ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
4102 	if (ret == 0) {
4103 		ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
4104 		ret = -ETIMEDOUT;
4105 	} else if (ret > 0) {
4106 		ret = 0;
4107 	}
4108 
4109 out:
4110 	/* Scan state should be updated upon scan completion but in case
4111 	 * firmware fails to deliver the event (for whatever reason) it is
4112 	 * desired to clean up scan state anyway. Firmware may have just
4113 	 * dropped the scan completion event delivery due to transport pipe
4114 	 * being overflown with data and/or it can recover on its own before
4115 	 * next scan request is submitted.
4116 	 */
4117 	spin_lock_bh(&ar->data_lock);
4118 	if (ar->scan.state != ATH10K_SCAN_IDLE)
4119 		__ath10k_scan_finish(ar);
4120 	spin_unlock_bh(&ar->data_lock);
4121 
4122 	return ret;
4123 }
4124 
4125 static void ath10k_scan_abort(struct ath10k *ar)
4126 {
4127 	int ret;
4128 
4129 	lockdep_assert_held(&ar->conf_mutex);
4130 
4131 	spin_lock_bh(&ar->data_lock);
4132 
4133 	switch (ar->scan.state) {
4134 	case ATH10K_SCAN_IDLE:
4135 		/* This can happen if timeout worker kicked in and called
4136 		 * abortion while scan completion was being processed.
4137 		 */
4138 		break;
4139 	case ATH10K_SCAN_STARTING:
4140 	case ATH10K_SCAN_ABORTING:
4141 		ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
4142 			    ath10k_scan_state_str(ar->scan.state),
4143 			    ar->scan.state);
4144 		break;
4145 	case ATH10K_SCAN_RUNNING:
4146 		ar->scan.state = ATH10K_SCAN_ABORTING;
4147 		spin_unlock_bh(&ar->data_lock);
4148 
4149 		ret = ath10k_scan_stop(ar);
4150 		if (ret)
4151 			ath10k_warn(ar, "failed to abort scan: %d\n", ret);
4152 
4153 		spin_lock_bh(&ar->data_lock);
4154 		break;
4155 	}
4156 
4157 	spin_unlock_bh(&ar->data_lock);
4158 }
4159 
4160 void ath10k_scan_timeout_work(struct work_struct *work)
4161 {
4162 	struct ath10k *ar = container_of(work, struct ath10k,
4163 					 scan.timeout.work);
4164 
4165 	mutex_lock(&ar->conf_mutex);
4166 	ath10k_scan_abort(ar);
4167 	mutex_unlock(&ar->conf_mutex);
4168 }
4169 
4170 static int ath10k_start_scan(struct ath10k *ar,
4171 			     const struct wmi_start_scan_arg *arg)
4172 {
4173 	int ret;
4174 
4175 	lockdep_assert_held(&ar->conf_mutex);
4176 
4177 	ret = ath10k_wmi_start_scan(ar, arg);
4178 	if (ret)
4179 		return ret;
4180 
4181 	ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
4182 	if (ret == 0) {
4183 		ret = ath10k_scan_stop(ar);
4184 		if (ret)
4185 			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
4186 
4187 		return -ETIMEDOUT;
4188 	}
4189 
4190 	/* If we failed to start the scan, return error code at
4191 	 * this point.  This is probably due to some issue in the
4192 	 * firmware, but no need to wedge the driver due to that...
4193 	 */
4194 	spin_lock_bh(&ar->data_lock);
4195 	if (ar->scan.state == ATH10K_SCAN_IDLE) {
4196 		spin_unlock_bh(&ar->data_lock);
4197 		return -EINVAL;
4198 	}
4199 	spin_unlock_bh(&ar->data_lock);
4200 
4201 	return 0;
4202 }
4203 
4204 /**********************/
4205 /* mac80211 callbacks */
4206 /**********************/
4207 
4208 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4209 			     struct ieee80211_tx_control *control,
4210 			     struct sk_buff *skb)
4211 {
4212 	struct ath10k *ar = hw->priv;
4213 	struct ath10k_htt *htt = &ar->htt;
4214 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4215 	struct ieee80211_vif *vif = info->control.vif;
4216 	struct ieee80211_sta *sta = control->sta;
4217 	struct ieee80211_txq *txq = NULL;
4218 	struct ieee80211_hdr *hdr = (void *)skb->data;
4219 	enum ath10k_hw_txrx_mode txmode;
4220 	enum ath10k_mac_tx_path txpath;
4221 	bool is_htt;
4222 	bool is_mgmt;
4223 	bool is_presp;
4224 	int ret;
4225 
4226 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
4227 
4228 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4229 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4230 	is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4231 		  txpath == ATH10K_MAC_TX_HTT_MGMT);
4232 	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4233 
4234 	if (is_htt) {
4235 		spin_lock_bh(&ar->htt.tx_lock);
4236 		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4237 
4238 		ret = ath10k_htt_tx_inc_pending(htt);
4239 		if (ret) {
4240 			ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4241 				    ret);
4242 			spin_unlock_bh(&ar->htt.tx_lock);
4243 			ieee80211_free_txskb(ar->hw, skb);
4244 			return;
4245 		}
4246 
4247 		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4248 		if (ret) {
4249 			ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4250 				   ret);
4251 			ath10k_htt_tx_dec_pending(htt);
4252 			spin_unlock_bh(&ar->htt.tx_lock);
4253 			ieee80211_free_txskb(ar->hw, skb);
4254 			return;
4255 		}
4256 		spin_unlock_bh(&ar->htt.tx_lock);
4257 	}
4258 
4259 	ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
4260 	if (ret) {
4261 		ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4262 		if (is_htt) {
4263 			spin_lock_bh(&ar->htt.tx_lock);
4264 			ath10k_htt_tx_dec_pending(htt);
4265 			if (is_mgmt)
4266 				ath10k_htt_tx_mgmt_dec_pending(htt);
4267 			spin_unlock_bh(&ar->htt.tx_lock);
4268 		}
4269 		return;
4270 	}
4271 }
4272 
4273 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4274 					struct ieee80211_txq *txq)
4275 {
4276 	struct ath10k *ar = hw->priv;
4277 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
4278 	struct ieee80211_txq *f_txq;
4279 	struct ath10k_txq *f_artxq;
4280 	int ret = 0;
4281 	int max = 16;
4282 
4283 	spin_lock_bh(&ar->txqs_lock);
4284 	if (list_empty(&artxq->list))
4285 		list_add_tail(&artxq->list, &ar->txqs);
4286 
4287 	f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
4288 	f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
4289 	list_del_init(&f_artxq->list);
4290 
4291 	while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
4292 		ret = ath10k_mac_tx_push_txq(hw, f_txq);
4293 		if (ret)
4294 			break;
4295 	}
4296 	if (ret != -ENOENT)
4297 		list_add_tail(&f_artxq->list, &ar->txqs);
4298 	spin_unlock_bh(&ar->txqs_lock);
4299 
4300 	ath10k_htt_tx_txq_update(hw, f_txq);
4301 	ath10k_htt_tx_txq_update(hw, txq);
4302 }
4303 
4304 /* Must not be called with conf_mutex held as workers can use that also. */
4305 void ath10k_drain_tx(struct ath10k *ar)
4306 {
4307 	/* make sure rcu-protected mac80211 tx path itself is drained */
4308 	synchronize_net();
4309 
4310 	ath10k_offchan_tx_purge(ar);
4311 	ath10k_mgmt_over_wmi_tx_purge(ar);
4312 
4313 	cancel_work_sync(&ar->offchan_tx_work);
4314 	cancel_work_sync(&ar->wmi_mgmt_tx_work);
4315 }
4316 
4317 void ath10k_halt(struct ath10k *ar)
4318 {
4319 	struct ath10k_vif *arvif;
4320 
4321 	lockdep_assert_held(&ar->conf_mutex);
4322 
4323 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4324 	ar->filter_flags = 0;
4325 	ar->monitor = false;
4326 	ar->monitor_arvif = NULL;
4327 
4328 	if (ar->monitor_started)
4329 		ath10k_monitor_stop(ar);
4330 
4331 	ar->monitor_started = false;
4332 	ar->tx_paused = 0;
4333 
4334 	ath10k_scan_finish(ar);
4335 	ath10k_peer_cleanup_all(ar);
4336 	ath10k_core_stop(ar);
4337 	ath10k_hif_power_down(ar);
4338 
4339 	spin_lock_bh(&ar->data_lock);
4340 	list_for_each_entry(arvif, &ar->arvifs, list)
4341 		ath10k_mac_vif_beacon_cleanup(arvif);
4342 	spin_unlock_bh(&ar->data_lock);
4343 }
4344 
4345 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4346 {
4347 	struct ath10k *ar = hw->priv;
4348 
4349 	mutex_lock(&ar->conf_mutex);
4350 
4351 	*tx_ant = ar->cfg_tx_chainmask;
4352 	*rx_ant = ar->cfg_rx_chainmask;
4353 
4354 	mutex_unlock(&ar->conf_mutex);
4355 
4356 	return 0;
4357 }
4358 
4359 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4360 {
4361 	/* It is not clear that allowing gaps in chainmask
4362 	 * is helpful.  Probably it will not do what user
4363 	 * is hoping for, so warn in that case.
4364 	 */
4365 	if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4366 		return;
4367 
4368 	ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x.  Suggested values: 15, 7, 3, 1 or 0.\n",
4369 		    dbg, cm);
4370 }
4371 
4372 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4373 {
4374 	int nsts = ar->vht_cap_info;
4375 
4376 	nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4377 	nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4378 
4379 	/* If firmware does not deliver to host number of space-time
4380 	 * streams supported, assume it support up to 4 BF STS and return
4381 	 * the value for VHT CAP: nsts-1)
4382 	 */
4383 	if (nsts == 0)
4384 		return 3;
4385 
4386 	return nsts;
4387 }
4388 
4389 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4390 {
4391 	int sound_dim = ar->vht_cap_info;
4392 
4393 	sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4394 	sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4395 
4396 	/* If the sounding dimension is not advertised by the firmware,
4397 	 * let's use a default value of 1
4398 	 */
4399 	if (sound_dim == 0)
4400 		return 1;
4401 
4402 	return sound_dim;
4403 }
4404 
4405 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4406 {
4407 	struct ieee80211_sta_vht_cap vht_cap = {0};
4408 	struct ath10k_hw_params *hw = &ar->hw_params;
4409 	u16 mcs_map;
4410 	u32 val;
4411 	int i;
4412 
4413 	vht_cap.vht_supported = 1;
4414 	vht_cap.cap = ar->vht_cap_info;
4415 
4416 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4417 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4418 		val = ath10k_mac_get_vht_cap_bf_sts(ar);
4419 		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4420 		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4421 
4422 		vht_cap.cap |= val;
4423 	}
4424 
4425 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4426 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4427 		val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4428 		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4429 		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4430 
4431 		vht_cap.cap |= val;
4432 	}
4433 
4434 	/* Currently the firmware seems to be buggy, don't enable 80+80
4435 	 * mode until that's resolved.
4436 	 */
4437 	if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
4438 	    (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
4439 		vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
4440 
4441 	mcs_map = 0;
4442 	for (i = 0; i < 8; i++) {
4443 		if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4444 			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4445 		else
4446 			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4447 	}
4448 
4449 	if (ar->cfg_tx_chainmask <= 1)
4450 		vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4451 
4452 	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4453 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4454 
4455 	/* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
4456 	 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz.  Give
4457 	 * user-space a clue if that is the case.
4458 	 */
4459 	if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
4460 	    (hw->vht160_mcs_rx_highest != 0 ||
4461 	     hw->vht160_mcs_tx_highest != 0)) {
4462 		vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
4463 		vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
4464 	}
4465 
4466 	return vht_cap;
4467 }
4468 
4469 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4470 {
4471 	int i;
4472 	struct ieee80211_sta_ht_cap ht_cap = {0};
4473 
4474 	if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4475 		return ht_cap;
4476 
4477 	ht_cap.ht_supported = 1;
4478 	ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4479 	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4480 	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4481 	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4482 	ht_cap.cap |=
4483 		WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4484 
4485 	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4486 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4487 
4488 	if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4489 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4490 
4491 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4492 		u32 smps;
4493 
4494 		smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
4495 		smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4496 
4497 		ht_cap.cap |= smps;
4498 	}
4499 
4500 	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4501 		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4502 
4503 	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4504 		u32 stbc;
4505 
4506 		stbc   = ar->ht_cap_info;
4507 		stbc  &= WMI_HT_CAP_RX_STBC;
4508 		stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4509 		stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4510 		stbc  &= IEEE80211_HT_CAP_RX_STBC;
4511 
4512 		ht_cap.cap |= stbc;
4513 	}
4514 
4515 	if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4516 		ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4517 
4518 	if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4519 		ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4520 
4521 	/* max AMSDU is implicitly taken from vht_cap_info */
4522 	if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4523 		ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4524 
4525 	for (i = 0; i < ar->num_rf_chains; i++) {
4526 		if (ar->cfg_rx_chainmask & BIT(i))
4527 			ht_cap.mcs.rx_mask[i] = 0xFF;
4528 	}
4529 
4530 	ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4531 
4532 	return ht_cap;
4533 }
4534 
4535 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4536 {
4537 	struct ieee80211_supported_band *band;
4538 	struct ieee80211_sta_vht_cap vht_cap;
4539 	struct ieee80211_sta_ht_cap ht_cap;
4540 
4541 	ht_cap = ath10k_get_ht_cap(ar);
4542 	vht_cap = ath10k_create_vht_cap(ar);
4543 
4544 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4545 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4546 		band->ht_cap = ht_cap;
4547 	}
4548 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4549 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4550 		band->ht_cap = ht_cap;
4551 		band->vht_cap = vht_cap;
4552 	}
4553 }
4554 
4555 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4556 {
4557 	int ret;
4558 
4559 	lockdep_assert_held(&ar->conf_mutex);
4560 
4561 	ath10k_check_chain_mask(ar, tx_ant, "tx");
4562 	ath10k_check_chain_mask(ar, rx_ant, "rx");
4563 
4564 	ar->cfg_tx_chainmask = tx_ant;
4565 	ar->cfg_rx_chainmask = rx_ant;
4566 
4567 	if ((ar->state != ATH10K_STATE_ON) &&
4568 	    (ar->state != ATH10K_STATE_RESTARTED))
4569 		return 0;
4570 
4571 	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4572 					tx_ant);
4573 	if (ret) {
4574 		ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4575 			    ret, tx_ant);
4576 		return ret;
4577 	}
4578 
4579 	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4580 					rx_ant);
4581 	if (ret) {
4582 		ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4583 			    ret, rx_ant);
4584 		return ret;
4585 	}
4586 
4587 	/* Reload HT/VHT capability */
4588 	ath10k_mac_setup_ht_vht_cap(ar);
4589 
4590 	return 0;
4591 }
4592 
4593 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4594 {
4595 	struct ath10k *ar = hw->priv;
4596 	int ret;
4597 
4598 	mutex_lock(&ar->conf_mutex);
4599 	ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4600 	mutex_unlock(&ar->conf_mutex);
4601 	return ret;
4602 }
4603 
4604 static int ath10k_start(struct ieee80211_hw *hw)
4605 {
4606 	struct ath10k *ar = hw->priv;
4607 	u32 param;
4608 	int ret = 0;
4609 
4610 	/*
4611 	 * This makes sense only when restarting hw. It is harmless to call
4612 	 * unconditionally. This is necessary to make sure no HTT/WMI tx
4613 	 * commands will be submitted while restarting.
4614 	 */
4615 	ath10k_drain_tx(ar);
4616 
4617 	mutex_lock(&ar->conf_mutex);
4618 
4619 	switch (ar->state) {
4620 	case ATH10K_STATE_OFF:
4621 		ar->state = ATH10K_STATE_ON;
4622 		break;
4623 	case ATH10K_STATE_RESTARTING:
4624 		ar->state = ATH10K_STATE_RESTARTED;
4625 		break;
4626 	case ATH10K_STATE_ON:
4627 	case ATH10K_STATE_RESTARTED:
4628 	case ATH10K_STATE_WEDGED:
4629 		WARN_ON(1);
4630 		ret = -EINVAL;
4631 		goto err;
4632 	case ATH10K_STATE_UTF:
4633 		ret = -EBUSY;
4634 		goto err;
4635 	}
4636 
4637 	ret = ath10k_hif_power_up(ar);
4638 	if (ret) {
4639 		ath10k_err(ar, "Could not init hif: %d\n", ret);
4640 		goto err_off;
4641 	}
4642 
4643 	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4644 				&ar->normal_mode_fw);
4645 	if (ret) {
4646 		ath10k_err(ar, "Could not init core: %d\n", ret);
4647 		goto err_power_down;
4648 	}
4649 
4650 	param = ar->wmi.pdev_param->pmf_qos;
4651 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4652 	if (ret) {
4653 		ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4654 		goto err_core_stop;
4655 	}
4656 
4657 	param = ar->wmi.pdev_param->dynamic_bw;
4658 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4659 	if (ret) {
4660 		ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4661 		goto err_core_stop;
4662 	}
4663 
4664 	if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4665 		ret = ath10k_wmi_adaptive_qcs(ar, true);
4666 		if (ret) {
4667 			ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4668 				    ret);
4669 			goto err_core_stop;
4670 		}
4671 	}
4672 
4673 	if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4674 		param = ar->wmi.pdev_param->burst_enable;
4675 		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4676 		if (ret) {
4677 			ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4678 			goto err_core_stop;
4679 		}
4680 	}
4681 
4682 	__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4683 
4684 	/*
4685 	 * By default FW set ARP frames ac to voice (6). In that case ARP
4686 	 * exchange is not working properly for UAPSD enabled AP. ARP requests
4687 	 * which arrives with access category 0 are processed by network stack
4688 	 * and send back with access category 0, but FW changes access category
4689 	 * to 6. Set ARP frames access category to best effort (0) solves
4690 	 * this problem.
4691 	 */
4692 
4693 	param = ar->wmi.pdev_param->arp_ac_override;
4694 	ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4695 	if (ret) {
4696 		ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4697 			    ret);
4698 		goto err_core_stop;
4699 	}
4700 
4701 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4702 		     ar->running_fw->fw_file.fw_features)) {
4703 		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4704 							  WMI_CCA_DETECT_LEVEL_AUTO,
4705 							  WMI_CCA_DETECT_MARGIN_AUTO);
4706 		if (ret) {
4707 			ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4708 				    ret);
4709 			goto err_core_stop;
4710 		}
4711 	}
4712 
4713 	param = ar->wmi.pdev_param->ani_enable;
4714 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4715 	if (ret) {
4716 		ath10k_warn(ar, "failed to enable ani by default: %d\n",
4717 			    ret);
4718 		goto err_core_stop;
4719 	}
4720 
4721 	ar->ani_enabled = true;
4722 
4723 	if (ath10k_peer_stats_enabled(ar)) {
4724 		param = ar->wmi.pdev_param->peer_stats_update_period;
4725 		ret = ath10k_wmi_pdev_set_param(ar, param,
4726 						PEER_DEFAULT_STATS_UPDATE_PERIOD);
4727 		if (ret) {
4728 			ath10k_warn(ar,
4729 				    "failed to set peer stats period : %d\n",
4730 				    ret);
4731 			goto err_core_stop;
4732 		}
4733 	}
4734 
4735 	param = ar->wmi.pdev_param->enable_btcoex;
4736 	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4737 	    test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4738 		     ar->running_fw->fw_file.fw_features)) {
4739 		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4740 		if (ret) {
4741 			ath10k_warn(ar,
4742 				    "failed to set btcoex param: %d\n", ret);
4743 			goto err_core_stop;
4744 		}
4745 		clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4746 	}
4747 
4748 	ar->num_started_vdevs = 0;
4749 	ath10k_regd_update(ar);
4750 
4751 	ath10k_spectral_start(ar);
4752 	ath10k_thermal_set_throttling(ar);
4753 
4754 	mutex_unlock(&ar->conf_mutex);
4755 	return 0;
4756 
4757 err_core_stop:
4758 	ath10k_core_stop(ar);
4759 
4760 err_power_down:
4761 	ath10k_hif_power_down(ar);
4762 
4763 err_off:
4764 	ar->state = ATH10K_STATE_OFF;
4765 
4766 err:
4767 	mutex_unlock(&ar->conf_mutex);
4768 	return ret;
4769 }
4770 
4771 static void ath10k_stop(struct ieee80211_hw *hw)
4772 {
4773 	struct ath10k *ar = hw->priv;
4774 
4775 	ath10k_drain_tx(ar);
4776 
4777 	mutex_lock(&ar->conf_mutex);
4778 	if (ar->state != ATH10K_STATE_OFF) {
4779 		ath10k_halt(ar);
4780 		ar->state = ATH10K_STATE_OFF;
4781 	}
4782 	mutex_unlock(&ar->conf_mutex);
4783 
4784 	cancel_work_sync(&ar->set_coverage_class_work);
4785 	cancel_delayed_work_sync(&ar->scan.timeout);
4786 	cancel_work_sync(&ar->restart_work);
4787 }
4788 
4789 static int ath10k_config_ps(struct ath10k *ar)
4790 {
4791 	struct ath10k_vif *arvif;
4792 	int ret = 0;
4793 
4794 	lockdep_assert_held(&ar->conf_mutex);
4795 
4796 	list_for_each_entry(arvif, &ar->arvifs, list) {
4797 		ret = ath10k_mac_vif_setup_ps(arvif);
4798 		if (ret) {
4799 			ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4800 			break;
4801 		}
4802 	}
4803 
4804 	return ret;
4805 }
4806 
4807 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4808 {
4809 	int ret;
4810 	u32 param;
4811 
4812 	lockdep_assert_held(&ar->conf_mutex);
4813 
4814 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4815 
4816 	param = ar->wmi.pdev_param->txpower_limit2g;
4817 	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4818 	if (ret) {
4819 		ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4820 			    txpower, ret);
4821 		return ret;
4822 	}
4823 
4824 	param = ar->wmi.pdev_param->txpower_limit5g;
4825 	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4826 	if (ret) {
4827 		ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4828 			    txpower, ret);
4829 		return ret;
4830 	}
4831 
4832 	return 0;
4833 }
4834 
4835 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4836 {
4837 	struct ath10k_vif *arvif;
4838 	int ret, txpower = -1;
4839 
4840 	lockdep_assert_held(&ar->conf_mutex);
4841 
4842 	list_for_each_entry(arvif, &ar->arvifs, list) {
4843 		if (arvif->txpower <= 0)
4844 			continue;
4845 
4846 		if (txpower == -1)
4847 			txpower = arvif->txpower;
4848 		else
4849 			txpower = min(txpower, arvif->txpower);
4850 	}
4851 
4852 	if (txpower == -1)
4853 		return 0;
4854 
4855 	ret = ath10k_mac_txpower_setup(ar, txpower);
4856 	if (ret) {
4857 		ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4858 			    txpower, ret);
4859 		return ret;
4860 	}
4861 
4862 	return 0;
4863 }
4864 
4865 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4866 {
4867 	struct ath10k *ar = hw->priv;
4868 	struct ieee80211_conf *conf = &hw->conf;
4869 	int ret = 0;
4870 
4871 	mutex_lock(&ar->conf_mutex);
4872 
4873 	if (changed & IEEE80211_CONF_CHANGE_PS)
4874 		ath10k_config_ps(ar);
4875 
4876 	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4877 		ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4878 		ret = ath10k_monitor_recalc(ar);
4879 		if (ret)
4880 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4881 	}
4882 
4883 	mutex_unlock(&ar->conf_mutex);
4884 	return ret;
4885 }
4886 
4887 static u32 get_nss_from_chainmask(u16 chain_mask)
4888 {
4889 	if ((chain_mask & 0xf) == 0xf)
4890 		return 4;
4891 	else if ((chain_mask & 0x7) == 0x7)
4892 		return 3;
4893 	else if ((chain_mask & 0x3) == 0x3)
4894 		return 2;
4895 	return 1;
4896 }
4897 
4898 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4899 {
4900 	u32 value = 0;
4901 	struct ath10k *ar = arvif->ar;
4902 	int nsts;
4903 	int sound_dim;
4904 
4905 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4906 		return 0;
4907 
4908 	nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4909 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4910 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4911 		value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4912 
4913 	sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4914 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4915 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4916 		value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4917 
4918 	if (!value)
4919 		return 0;
4920 
4921 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4922 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4923 
4924 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4925 		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4926 			  WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4927 
4928 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4929 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4930 
4931 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4932 		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4933 			  WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4934 
4935 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4936 					 ar->wmi.vdev_param->txbf, value);
4937 }
4938 
4939 /*
4940  * TODO:
4941  * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4942  * because we will send mgmt frames without CCK. This requirement
4943  * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4944  * in the TX packet.
4945  */
4946 static int ath10k_add_interface(struct ieee80211_hw *hw,
4947 				struct ieee80211_vif *vif)
4948 {
4949 	struct ath10k *ar = hw->priv;
4950 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
4951 	struct ath10k_peer *peer;
4952 	enum wmi_sta_powersave_param param;
4953 	int ret = 0;
4954 	u32 value;
4955 	int bit;
4956 	int i;
4957 	u32 vdev_param;
4958 
4959 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4960 
4961 	mutex_lock(&ar->conf_mutex);
4962 
4963 	memset(arvif, 0, sizeof(*arvif));
4964 	ath10k_mac_txq_init(vif->txq);
4965 
4966 	arvif->ar = ar;
4967 	arvif->vif = vif;
4968 
4969 	INIT_LIST_HEAD(&arvif->list);
4970 	INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4971 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
4972 			  ath10k_mac_vif_sta_connection_loss_work);
4973 
4974 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4975 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4976 		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4977 		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4978 		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4979 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4980 	}
4981 
4982 	if (ar->num_peers >= ar->max_num_peers) {
4983 		ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4984 		ret = -ENOBUFS;
4985 		goto err;
4986 	}
4987 
4988 	if (ar->free_vdev_map == 0) {
4989 		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4990 		ret = -EBUSY;
4991 		goto err;
4992 	}
4993 	bit = __ffs64(ar->free_vdev_map);
4994 
4995 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4996 		   bit, ar->free_vdev_map);
4997 
4998 	arvif->vdev_id = bit;
4999 	arvif->vdev_subtype =
5000 		ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
5001 
5002 	switch (vif->type) {
5003 	case NL80211_IFTYPE_P2P_DEVICE:
5004 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
5005 		arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5006 					(ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
5007 		break;
5008 	case NL80211_IFTYPE_UNSPECIFIED:
5009 	case NL80211_IFTYPE_STATION:
5010 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
5011 		if (vif->p2p)
5012 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5013 					(ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
5014 		break;
5015 	case NL80211_IFTYPE_ADHOC:
5016 		arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
5017 		break;
5018 	case NL80211_IFTYPE_MESH_POINT:
5019 		if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
5020 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5021 						(ar, WMI_VDEV_SUBTYPE_MESH_11S);
5022 		} else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5023 			ret = -EINVAL;
5024 			ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
5025 			goto err;
5026 		}
5027 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
5028 		break;
5029 	case NL80211_IFTYPE_AP:
5030 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
5031 
5032 		if (vif->p2p)
5033 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5034 						(ar, WMI_VDEV_SUBTYPE_P2P_GO);
5035 		break;
5036 	case NL80211_IFTYPE_MONITOR:
5037 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
5038 		break;
5039 	default:
5040 		WARN_ON(1);
5041 		break;
5042 	}
5043 
5044 	/* Using vdev_id as queue number will make it very easy to do per-vif
5045 	 * tx queue locking. This shouldn't wrap due to interface combinations
5046 	 * but do a modulo for correctness sake and prevent using offchannel tx
5047 	 * queues for regular vif tx.
5048 	 */
5049 	vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5050 	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
5051 		vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5052 
5053 	/* Some firmware revisions don't wait for beacon tx completion before
5054 	 * sending another SWBA event. This could lead to hardware using old
5055 	 * (freed) beacon data in some cases, e.g. tx credit starvation
5056 	 * combined with missed TBTT. This is very very rare.
5057 	 *
5058 	 * On non-IOMMU-enabled hosts this could be a possible security issue
5059 	 * because hw could beacon some random data on the air.  On
5060 	 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
5061 	 * device would crash.
5062 	 *
5063 	 * Since there are no beacon tx completions (implicit nor explicit)
5064 	 * propagated to host the only workaround for this is to allocate a
5065 	 * DMA-coherent buffer for a lifetime of a vif and use it for all
5066 	 * beacon tx commands. Worst case for this approach is some beacons may
5067 	 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
5068 	 */
5069 	if (vif->type == NL80211_IFTYPE_ADHOC ||
5070 	    vif->type == NL80211_IFTYPE_MESH_POINT ||
5071 	    vif->type == NL80211_IFTYPE_AP) {
5072 		arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
5073 							IEEE80211_MAX_FRAME_LEN,
5074 							&arvif->beacon_paddr,
5075 							GFP_ATOMIC);
5076 		if (!arvif->beacon_buf) {
5077 			ret = -ENOMEM;
5078 			ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
5079 				    ret);
5080 			goto err;
5081 		}
5082 	}
5083 	if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
5084 		arvif->nohwcrypt = true;
5085 
5086 	if (arvif->nohwcrypt &&
5087 	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5088 		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
5089 		goto err;
5090 	}
5091 
5092 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
5093 		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
5094 		   arvif->beacon_buf ? "single-buf" : "per-skb");
5095 
5096 	ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
5097 				     arvif->vdev_subtype, vif->addr);
5098 	if (ret) {
5099 		ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
5100 			    arvif->vdev_id, ret);
5101 		goto err;
5102 	}
5103 
5104 	ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
5105 	spin_lock_bh(&ar->data_lock);
5106 	list_add(&arvif->list, &ar->arvifs);
5107 	spin_unlock_bh(&ar->data_lock);
5108 
5109 	/* It makes no sense to have firmware do keepalives. mac80211 already
5110 	 * takes care of this with idle connection polling.
5111 	 */
5112 	ret = ath10k_mac_vif_disable_keepalive(arvif);
5113 	if (ret) {
5114 		ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
5115 			    arvif->vdev_id, ret);
5116 		goto err_vdev_delete;
5117 	}
5118 
5119 	arvif->def_wep_key_idx = -1;
5120 
5121 	vdev_param = ar->wmi.vdev_param->tx_encap_type;
5122 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5123 					ATH10K_HW_TXRX_NATIVE_WIFI);
5124 	/* 10.X firmware does not support this VDEV parameter. Do not warn */
5125 	if (ret && ret != -EOPNOTSUPP) {
5126 		ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
5127 			    arvif->vdev_id, ret);
5128 		goto err_vdev_delete;
5129 	}
5130 
5131 	/* Configuring number of spatial stream for monitor interface is causing
5132 	 * target assert in qca9888 and qca6174.
5133 	 */
5134 	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
5135 		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
5136 
5137 		vdev_param = ar->wmi.vdev_param->nss;
5138 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5139 						nss);
5140 		if (ret) {
5141 			ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
5142 				    arvif->vdev_id, ar->cfg_tx_chainmask, nss,
5143 				    ret);
5144 			goto err_vdev_delete;
5145 		}
5146 	}
5147 
5148 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5149 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5150 		ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
5151 					 vif->addr, WMI_PEER_TYPE_DEFAULT);
5152 		if (ret) {
5153 			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
5154 				    arvif->vdev_id, ret);
5155 			goto err_vdev_delete;
5156 		}
5157 
5158 		spin_lock_bh(&ar->data_lock);
5159 
5160 		peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
5161 		if (!peer) {
5162 			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5163 				    vif->addr, arvif->vdev_id);
5164 			spin_unlock_bh(&ar->data_lock);
5165 			ret = -ENOENT;
5166 			goto err_peer_delete;
5167 		}
5168 
5169 		arvif->peer_id = find_first_bit(peer->peer_ids,
5170 						ATH10K_MAX_NUM_PEER_IDS);
5171 
5172 		spin_unlock_bh(&ar->data_lock);
5173 	} else {
5174 		arvif->peer_id = HTT_INVALID_PEERID;
5175 	}
5176 
5177 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
5178 		ret = ath10k_mac_set_kickout(arvif);
5179 		if (ret) {
5180 			ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
5181 				    arvif->vdev_id, ret);
5182 			goto err_peer_delete;
5183 		}
5184 	}
5185 
5186 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
5187 		param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
5188 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
5189 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
5190 						  param, value);
5191 		if (ret) {
5192 			ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
5193 				    arvif->vdev_id, ret);
5194 			goto err_peer_delete;
5195 		}
5196 
5197 		ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
5198 		if (ret) {
5199 			ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
5200 				    arvif->vdev_id, ret);
5201 			goto err_peer_delete;
5202 		}
5203 
5204 		ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
5205 		if (ret) {
5206 			ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
5207 				    arvif->vdev_id, ret);
5208 			goto err_peer_delete;
5209 		}
5210 	}
5211 
5212 	ret = ath10k_mac_set_txbf_conf(arvif);
5213 	if (ret) {
5214 		ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
5215 			    arvif->vdev_id, ret);
5216 		goto err_peer_delete;
5217 	}
5218 
5219 	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
5220 	if (ret) {
5221 		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
5222 			    arvif->vdev_id, ret);
5223 		goto err_peer_delete;
5224 	}
5225 
5226 	arvif->txpower = vif->bss_conf.txpower;
5227 	ret = ath10k_mac_txpower_recalc(ar);
5228 	if (ret) {
5229 		ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5230 		goto err_peer_delete;
5231 	}
5232 
5233 	if (vif->type == NL80211_IFTYPE_MONITOR) {
5234 		ar->monitor_arvif = arvif;
5235 		ret = ath10k_monitor_recalc(ar);
5236 		if (ret) {
5237 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5238 			goto err_peer_delete;
5239 		}
5240 	}
5241 
5242 	spin_lock_bh(&ar->htt.tx_lock);
5243 	if (!ar->tx_paused)
5244 		ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5245 	spin_unlock_bh(&ar->htt.tx_lock);
5246 
5247 	mutex_unlock(&ar->conf_mutex);
5248 	return 0;
5249 
5250 err_peer_delete:
5251 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5252 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
5253 		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5254 
5255 err_vdev_delete:
5256 	ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5257 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5258 	spin_lock_bh(&ar->data_lock);
5259 	list_del(&arvif->list);
5260 	spin_unlock_bh(&ar->data_lock);
5261 
5262 err:
5263 	if (arvif->beacon_buf) {
5264 		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5265 				  arvif->beacon_buf, arvif->beacon_paddr);
5266 		arvif->beacon_buf = NULL;
5267 	}
5268 
5269 	mutex_unlock(&ar->conf_mutex);
5270 
5271 	return ret;
5272 }
5273 
5274 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5275 {
5276 	int i;
5277 
5278 	for (i = 0; i < BITS_PER_LONG; i++)
5279 		ath10k_mac_vif_tx_unlock(arvif, i);
5280 }
5281 
5282 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5283 				    struct ieee80211_vif *vif)
5284 {
5285 	struct ath10k *ar = hw->priv;
5286 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
5287 	struct ath10k_peer *peer;
5288 	int ret;
5289 	int i;
5290 
5291 	cancel_work_sync(&arvif->ap_csa_work);
5292 	cancel_delayed_work_sync(&arvif->connection_loss_work);
5293 
5294 	mutex_lock(&ar->conf_mutex);
5295 
5296 	spin_lock_bh(&ar->data_lock);
5297 	ath10k_mac_vif_beacon_cleanup(arvif);
5298 	spin_unlock_bh(&ar->data_lock);
5299 
5300 	ret = ath10k_spectral_vif_stop(arvif);
5301 	if (ret)
5302 		ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5303 			    arvif->vdev_id, ret);
5304 
5305 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5306 	spin_lock_bh(&ar->data_lock);
5307 	list_del(&arvif->list);
5308 	spin_unlock_bh(&ar->data_lock);
5309 
5310 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5311 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5312 		ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5313 					     vif->addr);
5314 		if (ret)
5315 			ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5316 				    arvif->vdev_id, ret);
5317 
5318 		kfree(arvif->u.ap.noa_data);
5319 	}
5320 
5321 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5322 		   arvif->vdev_id);
5323 
5324 	ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5325 	if (ret)
5326 		ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5327 			    arvif->vdev_id, ret);
5328 
5329 	/* Some firmware revisions don't notify host about self-peer removal
5330 	 * until after associated vdev is deleted.
5331 	 */
5332 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5333 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5334 		ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5335 						   vif->addr);
5336 		if (ret)
5337 			ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5338 				    arvif->vdev_id, ret);
5339 
5340 		spin_lock_bh(&ar->data_lock);
5341 		ar->num_peers--;
5342 		spin_unlock_bh(&ar->data_lock);
5343 	}
5344 
5345 	spin_lock_bh(&ar->data_lock);
5346 	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5347 		peer = ar->peer_map[i];
5348 		if (!peer)
5349 			continue;
5350 
5351 		if (peer->vif == vif) {
5352 			ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5353 				    vif->addr, arvif->vdev_id);
5354 			peer->vif = NULL;
5355 		}
5356 	}
5357 	spin_unlock_bh(&ar->data_lock);
5358 
5359 	ath10k_peer_cleanup(ar, arvif->vdev_id);
5360 	ath10k_mac_txq_unref(ar, vif->txq);
5361 
5362 	if (vif->type == NL80211_IFTYPE_MONITOR) {
5363 		ar->monitor_arvif = NULL;
5364 		ret = ath10k_monitor_recalc(ar);
5365 		if (ret)
5366 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5367 	}
5368 
5369 	ret = ath10k_mac_txpower_recalc(ar);
5370 	if (ret)
5371 		ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5372 
5373 	spin_lock_bh(&ar->htt.tx_lock);
5374 	ath10k_mac_vif_tx_unlock_all(arvif);
5375 	spin_unlock_bh(&ar->htt.tx_lock);
5376 
5377 	ath10k_mac_txq_unref(ar, vif->txq);
5378 
5379 	mutex_unlock(&ar->conf_mutex);
5380 }
5381 
5382 /*
5383  * FIXME: Has to be verified.
5384  */
5385 #define SUPPORTED_FILTERS			\
5386 	(FIF_ALLMULTI |				\
5387 	FIF_CONTROL |				\
5388 	FIF_PSPOLL |				\
5389 	FIF_OTHER_BSS |				\
5390 	FIF_BCN_PRBRESP_PROMISC |		\
5391 	FIF_PROBE_REQ |				\
5392 	FIF_FCSFAIL)
5393 
5394 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5395 				    unsigned int changed_flags,
5396 				    unsigned int *total_flags,
5397 				    u64 multicast)
5398 {
5399 	struct ath10k *ar = hw->priv;
5400 	int ret;
5401 
5402 	mutex_lock(&ar->conf_mutex);
5403 
5404 	changed_flags &= SUPPORTED_FILTERS;
5405 	*total_flags &= SUPPORTED_FILTERS;
5406 	ar->filter_flags = *total_flags;
5407 
5408 	ret = ath10k_monitor_recalc(ar);
5409 	if (ret)
5410 		ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5411 
5412 	mutex_unlock(&ar->conf_mutex);
5413 }
5414 
5415 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5416 				    struct ieee80211_vif *vif,
5417 				    struct ieee80211_bss_conf *info,
5418 				    u32 changed)
5419 {
5420 	struct ath10k *ar = hw->priv;
5421 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
5422 	int ret = 0;
5423 	u32 vdev_param, pdev_param, slottime, preamble;
5424 
5425 	mutex_lock(&ar->conf_mutex);
5426 
5427 	if (changed & BSS_CHANGED_IBSS)
5428 		ath10k_control_ibss(arvif, info, vif->addr);
5429 
5430 	if (changed & BSS_CHANGED_BEACON_INT) {
5431 		arvif->beacon_interval = info->beacon_int;
5432 		vdev_param = ar->wmi.vdev_param->beacon_interval;
5433 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5434 						arvif->beacon_interval);
5435 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5436 			   "mac vdev %d beacon_interval %d\n",
5437 			   arvif->vdev_id, arvif->beacon_interval);
5438 
5439 		if (ret)
5440 			ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5441 				    arvif->vdev_id, ret);
5442 	}
5443 
5444 	if (changed & BSS_CHANGED_BEACON) {
5445 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5446 			   "vdev %d set beacon tx mode to staggered\n",
5447 			   arvif->vdev_id);
5448 
5449 		pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5450 		ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5451 						WMI_BEACON_STAGGERED_MODE);
5452 		if (ret)
5453 			ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5454 				    arvif->vdev_id, ret);
5455 
5456 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
5457 		if (ret)
5458 			ath10k_warn(ar, "failed to update beacon template: %d\n",
5459 				    ret);
5460 
5461 		if (ieee80211_vif_is_mesh(vif)) {
5462 			/* mesh doesn't use SSID but firmware needs it */
5463 			strncpy(arvif->u.ap.ssid, "mesh",
5464 				sizeof(arvif->u.ap.ssid));
5465 			arvif->u.ap.ssid_len = 4;
5466 		}
5467 	}
5468 
5469 	if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5470 		ret = ath10k_mac_setup_prb_tmpl(arvif);
5471 		if (ret)
5472 			ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5473 				    arvif->vdev_id, ret);
5474 	}
5475 
5476 	if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5477 		arvif->dtim_period = info->dtim_period;
5478 
5479 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5480 			   "mac vdev %d dtim_period %d\n",
5481 			   arvif->vdev_id, arvif->dtim_period);
5482 
5483 		vdev_param = ar->wmi.vdev_param->dtim_period;
5484 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5485 						arvif->dtim_period);
5486 		if (ret)
5487 			ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5488 				    arvif->vdev_id, ret);
5489 	}
5490 
5491 	if (changed & BSS_CHANGED_SSID &&
5492 	    vif->type == NL80211_IFTYPE_AP) {
5493 		arvif->u.ap.ssid_len = info->ssid_len;
5494 		if (info->ssid_len)
5495 			memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5496 		arvif->u.ap.hidden_ssid = info->hidden_ssid;
5497 	}
5498 
5499 	if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5500 		ether_addr_copy(arvif->bssid, info->bssid);
5501 
5502 	if (changed & BSS_CHANGED_BEACON_ENABLED)
5503 		ath10k_control_beaconing(arvif, info);
5504 
5505 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5506 		arvif->use_cts_prot = info->use_cts_prot;
5507 
5508 		ret = ath10k_recalc_rtscts_prot(arvif);
5509 		if (ret)
5510 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5511 				    arvif->vdev_id, ret);
5512 
5513 		if (ath10k_mac_can_set_cts_prot(arvif)) {
5514 			ret = ath10k_mac_set_cts_prot(arvif);
5515 			if (ret)
5516 				ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
5517 					    arvif->vdev_id, ret);
5518 		}
5519 	}
5520 
5521 	if (changed & BSS_CHANGED_ERP_SLOT) {
5522 		if (info->use_short_slot)
5523 			slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5524 
5525 		else
5526 			slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5527 
5528 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5529 			   arvif->vdev_id, slottime);
5530 
5531 		vdev_param = ar->wmi.vdev_param->slot_time;
5532 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5533 						slottime);
5534 		if (ret)
5535 			ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5536 				    arvif->vdev_id, ret);
5537 	}
5538 
5539 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5540 		if (info->use_short_preamble)
5541 			preamble = WMI_VDEV_PREAMBLE_SHORT;
5542 		else
5543 			preamble = WMI_VDEV_PREAMBLE_LONG;
5544 
5545 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5546 			   "mac vdev %d preamble %dn",
5547 			   arvif->vdev_id, preamble);
5548 
5549 		vdev_param = ar->wmi.vdev_param->preamble;
5550 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5551 						preamble);
5552 		if (ret)
5553 			ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5554 				    arvif->vdev_id, ret);
5555 	}
5556 
5557 	if (changed & BSS_CHANGED_ASSOC) {
5558 		if (info->assoc) {
5559 			/* Workaround: Make sure monitor vdev is not running
5560 			 * when associating to prevent some firmware revisions
5561 			 * (e.g. 10.1 and 10.2) from crashing.
5562 			 */
5563 			if (ar->monitor_started)
5564 				ath10k_monitor_stop(ar);
5565 			ath10k_bss_assoc(hw, vif, info);
5566 			ath10k_monitor_recalc(ar);
5567 		} else {
5568 			ath10k_bss_disassoc(hw, vif);
5569 		}
5570 	}
5571 
5572 	if (changed & BSS_CHANGED_TXPOWER) {
5573 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5574 			   arvif->vdev_id, info->txpower);
5575 
5576 		arvif->txpower = info->txpower;
5577 		ret = ath10k_mac_txpower_recalc(ar);
5578 		if (ret)
5579 			ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5580 	}
5581 
5582 	if (changed & BSS_CHANGED_PS) {
5583 		arvif->ps = vif->bss_conf.ps;
5584 
5585 		ret = ath10k_config_ps(ar);
5586 		if (ret)
5587 			ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5588 				    arvif->vdev_id, ret);
5589 	}
5590 
5591 	mutex_unlock(&ar->conf_mutex);
5592 }
5593 
5594 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
5595 {
5596 	struct ath10k *ar = hw->priv;
5597 
5598 	/* This function should never be called if setting the coverage class
5599 	 * is not supported on this hardware.
5600 	 */
5601 	if (!ar->hw_params.hw_ops->set_coverage_class) {
5602 		WARN_ON_ONCE(1);
5603 		return;
5604 	}
5605 	ar->hw_params.hw_ops->set_coverage_class(ar, value);
5606 }
5607 
5608 struct ath10k_mac_tdls_iter_data {
5609 	u32 num_tdls_stations;
5610 	struct ieee80211_vif *curr_vif;
5611 };
5612 
5613 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5614 						    struct ieee80211_sta *sta)
5615 {
5616 	struct ath10k_mac_tdls_iter_data *iter_data = data;
5617 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5618 	struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5619 
5620 	if (sta->tdls && sta_vif == iter_data->curr_vif)
5621 		iter_data->num_tdls_stations++;
5622 }
5623 
5624 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5625 					      struct ieee80211_vif *vif)
5626 {
5627 	struct ath10k_mac_tdls_iter_data data = {};
5628 
5629 	data.curr_vif = vif;
5630 
5631 	ieee80211_iterate_stations_atomic(hw,
5632 					  ath10k_mac_tdls_vif_stations_count_iter,
5633 					  &data);
5634 	return data.num_tdls_stations;
5635 }
5636 
5637 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5638 					    struct ieee80211_vif *vif)
5639 {
5640 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
5641 	int *num_tdls_vifs = data;
5642 
5643 	if (vif->type != NL80211_IFTYPE_STATION)
5644 		return;
5645 
5646 	if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5647 		(*num_tdls_vifs)++;
5648 }
5649 
5650 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5651 {
5652 	int num_tdls_vifs = 0;
5653 
5654 	ieee80211_iterate_active_interfaces_atomic(hw,
5655 						   IEEE80211_IFACE_ITER_NORMAL,
5656 						   ath10k_mac_tdls_vifs_count_iter,
5657 						   &num_tdls_vifs);
5658 	return num_tdls_vifs;
5659 }
5660 
5661 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5662 			  struct ieee80211_vif *vif,
5663 			  struct ieee80211_scan_request *hw_req)
5664 {
5665 	struct ath10k *ar = hw->priv;
5666 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
5667 	struct cfg80211_scan_request *req = &hw_req->req;
5668 	struct wmi_start_scan_arg arg;
5669 	int ret = 0;
5670 	int i;
5671 
5672 	mutex_lock(&ar->conf_mutex);
5673 
5674 	if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
5675 		ret = -EBUSY;
5676 		goto exit;
5677 	}
5678 
5679 	spin_lock_bh(&ar->data_lock);
5680 	switch (ar->scan.state) {
5681 	case ATH10K_SCAN_IDLE:
5682 		reinit_completion(&ar->scan.started);
5683 		reinit_completion(&ar->scan.completed);
5684 		ar->scan.state = ATH10K_SCAN_STARTING;
5685 		ar->scan.is_roc = false;
5686 		ar->scan.vdev_id = arvif->vdev_id;
5687 		ret = 0;
5688 		break;
5689 	case ATH10K_SCAN_STARTING:
5690 	case ATH10K_SCAN_RUNNING:
5691 	case ATH10K_SCAN_ABORTING:
5692 		ret = -EBUSY;
5693 		break;
5694 	}
5695 	spin_unlock_bh(&ar->data_lock);
5696 
5697 	if (ret)
5698 		goto exit;
5699 
5700 	memset(&arg, 0, sizeof(arg));
5701 	ath10k_wmi_start_scan_init(ar, &arg);
5702 	arg.vdev_id = arvif->vdev_id;
5703 	arg.scan_id = ATH10K_SCAN_ID;
5704 
5705 	if (req->ie_len) {
5706 		arg.ie_len = req->ie_len;
5707 		memcpy(arg.ie, req->ie, arg.ie_len);
5708 	}
5709 
5710 	if (req->n_ssids) {
5711 		arg.n_ssids = req->n_ssids;
5712 		for (i = 0; i < arg.n_ssids; i++) {
5713 			arg.ssids[i].len  = req->ssids[i].ssid_len;
5714 			arg.ssids[i].ssid = req->ssids[i].ssid;
5715 		}
5716 	} else {
5717 		arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5718 	}
5719 
5720 	if (req->n_channels) {
5721 		arg.n_channels = req->n_channels;
5722 		for (i = 0; i < arg.n_channels; i++)
5723 			arg.channels[i] = req->channels[i]->center_freq;
5724 	}
5725 
5726 	ret = ath10k_start_scan(ar, &arg);
5727 	if (ret) {
5728 		ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5729 		spin_lock_bh(&ar->data_lock);
5730 		ar->scan.state = ATH10K_SCAN_IDLE;
5731 		spin_unlock_bh(&ar->data_lock);
5732 	}
5733 
5734 	/* Add a 200ms margin to account for event/command processing */
5735 	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5736 				     msecs_to_jiffies(arg.max_scan_time +
5737 						      200));
5738 
5739 exit:
5740 	mutex_unlock(&ar->conf_mutex);
5741 	return ret;
5742 }
5743 
5744 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5745 				  struct ieee80211_vif *vif)
5746 {
5747 	struct ath10k *ar = hw->priv;
5748 
5749 	mutex_lock(&ar->conf_mutex);
5750 	ath10k_scan_abort(ar);
5751 	mutex_unlock(&ar->conf_mutex);
5752 
5753 	cancel_delayed_work_sync(&ar->scan.timeout);
5754 }
5755 
5756 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5757 					struct ath10k_vif *arvif,
5758 					enum set_key_cmd cmd,
5759 					struct ieee80211_key_conf *key)
5760 {
5761 	u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5762 	int ret;
5763 
5764 	/* 10.1 firmware branch requires default key index to be set to group
5765 	 * key index after installing it. Otherwise FW/HW Txes corrupted
5766 	 * frames with multi-vif APs. This is not required for main firmware
5767 	 * branch (e.g. 636).
5768 	 *
5769 	 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5770 	 *
5771 	 * FIXME: It remains unknown if this is required for multi-vif STA
5772 	 * interfaces on 10.1.
5773 	 */
5774 
5775 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5776 	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5777 		return;
5778 
5779 	if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5780 		return;
5781 
5782 	if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5783 		return;
5784 
5785 	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5786 		return;
5787 
5788 	if (cmd != SET_KEY)
5789 		return;
5790 
5791 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5792 					key->keyidx);
5793 	if (ret)
5794 		ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5795 			    arvif->vdev_id, ret);
5796 }
5797 
5798 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5799 			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5800 			  struct ieee80211_key_conf *key)
5801 {
5802 	struct ath10k *ar = hw->priv;
5803 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
5804 	struct ath10k_peer *peer;
5805 	const u8 *peer_addr;
5806 	bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5807 		      key->cipher == WLAN_CIPHER_SUITE_WEP104;
5808 	int ret = 0;
5809 	int ret2;
5810 	u32 flags = 0;
5811 	u32 flags2;
5812 
5813 	/* this one needs to be done in software */
5814 	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
5815 	    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
5816 	    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
5817 	    key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
5818 		return 1;
5819 
5820 	if (arvif->nohwcrypt)
5821 		return 1;
5822 
5823 	if (key->keyidx > WMI_MAX_KEY_INDEX)
5824 		return -ENOSPC;
5825 
5826 	mutex_lock(&ar->conf_mutex);
5827 
5828 	if (sta)
5829 		peer_addr = sta->addr;
5830 	else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5831 		peer_addr = vif->bss_conf.bssid;
5832 	else
5833 		peer_addr = vif->addr;
5834 
5835 	key->hw_key_idx = key->keyidx;
5836 
5837 	if (is_wep) {
5838 		if (cmd == SET_KEY)
5839 			arvif->wep_keys[key->keyidx] = key;
5840 		else
5841 			arvif->wep_keys[key->keyidx] = NULL;
5842 	}
5843 
5844 	/* the peer should not disappear in mid-way (unless FW goes awry) since
5845 	 * we already hold conf_mutex. we just make sure its there now.
5846 	 */
5847 	spin_lock_bh(&ar->data_lock);
5848 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5849 	spin_unlock_bh(&ar->data_lock);
5850 
5851 	if (!peer) {
5852 		if (cmd == SET_KEY) {
5853 			ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5854 				    peer_addr);
5855 			ret = -EOPNOTSUPP;
5856 			goto exit;
5857 		} else {
5858 			/* if the peer doesn't exist there is no key to disable anymore */
5859 			goto exit;
5860 		}
5861 	}
5862 
5863 	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5864 		flags |= WMI_KEY_PAIRWISE;
5865 	else
5866 		flags |= WMI_KEY_GROUP;
5867 
5868 	if (is_wep) {
5869 		if (cmd == DISABLE_KEY)
5870 			ath10k_clear_vdev_key(arvif, key);
5871 
5872 		/* When WEP keys are uploaded it's possible that there are
5873 		 * stations associated already (e.g. when merging) without any
5874 		 * keys. Static WEP needs an explicit per-peer key upload.
5875 		 */
5876 		if (vif->type == NL80211_IFTYPE_ADHOC &&
5877 		    cmd == SET_KEY)
5878 			ath10k_mac_vif_update_wep_key(arvif, key);
5879 
5880 		/* 802.1x never sets the def_wep_key_idx so each set_key()
5881 		 * call changes default tx key.
5882 		 *
5883 		 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5884 		 * after first set_key().
5885 		 */
5886 		if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5887 			flags |= WMI_KEY_TX_USAGE;
5888 	}
5889 
5890 	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5891 	if (ret) {
5892 		WARN_ON(ret > 0);
5893 		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5894 			    arvif->vdev_id, peer_addr, ret);
5895 		goto exit;
5896 	}
5897 
5898 	/* mac80211 sets static WEP keys as groupwise while firmware requires
5899 	 * them to be installed twice as both pairwise and groupwise.
5900 	 */
5901 	if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5902 		flags2 = flags;
5903 		flags2 &= ~WMI_KEY_GROUP;
5904 		flags2 |= WMI_KEY_PAIRWISE;
5905 
5906 		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5907 		if (ret) {
5908 			WARN_ON(ret > 0);
5909 			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5910 				    arvif->vdev_id, peer_addr, ret);
5911 			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5912 						  peer_addr, flags);
5913 			if (ret2) {
5914 				WARN_ON(ret2 > 0);
5915 				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5916 					    arvif->vdev_id, peer_addr, ret2);
5917 			}
5918 			goto exit;
5919 		}
5920 	}
5921 
5922 	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5923 
5924 	spin_lock_bh(&ar->data_lock);
5925 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5926 	if (peer && cmd == SET_KEY)
5927 		peer->keys[key->keyidx] = key;
5928 	else if (peer && cmd == DISABLE_KEY)
5929 		peer->keys[key->keyidx] = NULL;
5930 	else if (peer == NULL)
5931 		/* impossible unless FW goes crazy */
5932 		ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5933 	spin_unlock_bh(&ar->data_lock);
5934 
5935 	if (sta && sta->tdls)
5936 		ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5937 					  WMI_PEER_AUTHORIZE, 1);
5938 
5939 exit:
5940 	mutex_unlock(&ar->conf_mutex);
5941 	return ret;
5942 }
5943 
5944 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5945 					   struct ieee80211_vif *vif,
5946 					   int keyidx)
5947 {
5948 	struct ath10k *ar = hw->priv;
5949 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
5950 	int ret;
5951 
5952 	mutex_lock(&arvif->ar->conf_mutex);
5953 
5954 	if (arvif->ar->state != ATH10K_STATE_ON)
5955 		goto unlock;
5956 
5957 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5958 		   arvif->vdev_id, keyidx);
5959 
5960 	ret = ath10k_wmi_vdev_set_param(arvif->ar,
5961 					arvif->vdev_id,
5962 					arvif->ar->wmi.vdev_param->def_keyid,
5963 					keyidx);
5964 
5965 	if (ret) {
5966 		ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5967 			    arvif->vdev_id,
5968 			    ret);
5969 		goto unlock;
5970 	}
5971 
5972 	arvif->def_wep_key_idx = keyidx;
5973 
5974 unlock:
5975 	mutex_unlock(&arvif->ar->conf_mutex);
5976 }
5977 
5978 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5979 {
5980 	struct ath10k *ar;
5981 	struct ath10k_vif *arvif;
5982 	struct ath10k_sta *arsta;
5983 	struct ieee80211_sta *sta;
5984 	struct cfg80211_chan_def def;
5985 	enum nl80211_band band;
5986 	const u8 *ht_mcs_mask;
5987 	const u16 *vht_mcs_mask;
5988 	u32 changed, bw, nss, smps;
5989 	int err;
5990 
5991 	arsta = container_of(wk, struct ath10k_sta, update_wk);
5992 	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5993 	arvif = arsta->arvif;
5994 	ar = arvif->ar;
5995 
5996 	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5997 		return;
5998 
5999 	band = def.chan->band;
6000 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
6001 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
6002 
6003 	spin_lock_bh(&ar->data_lock);
6004 
6005 	changed = arsta->changed;
6006 	arsta->changed = 0;
6007 
6008 	bw = arsta->bw;
6009 	nss = arsta->nss;
6010 	smps = arsta->smps;
6011 
6012 	spin_unlock_bh(&ar->data_lock);
6013 
6014 	mutex_lock(&ar->conf_mutex);
6015 
6016 	nss = max_t(u32, 1, nss);
6017 	nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6018 			   ath10k_mac_max_vht_nss(vht_mcs_mask)));
6019 
6020 	if (changed & IEEE80211_RC_BW_CHANGED) {
6021 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
6022 			   sta->addr, bw);
6023 
6024 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6025 						WMI_PEER_CHAN_WIDTH, bw);
6026 		if (err)
6027 			ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
6028 				    sta->addr, bw, err);
6029 	}
6030 
6031 	if (changed & IEEE80211_RC_NSS_CHANGED) {
6032 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
6033 			   sta->addr, nss);
6034 
6035 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6036 						WMI_PEER_NSS, nss);
6037 		if (err)
6038 			ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
6039 				    sta->addr, nss, err);
6040 	}
6041 
6042 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
6043 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
6044 			   sta->addr, smps);
6045 
6046 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6047 						WMI_PEER_SMPS_STATE, smps);
6048 		if (err)
6049 			ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
6050 				    sta->addr, smps, err);
6051 	}
6052 
6053 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
6054 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
6055 			   sta->addr);
6056 
6057 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
6058 		if (err)
6059 			ath10k_warn(ar, "failed to reassociate station: %pM\n",
6060 				    sta->addr);
6061 	}
6062 
6063 	mutex_unlock(&ar->conf_mutex);
6064 }
6065 
6066 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
6067 				       struct ieee80211_sta *sta)
6068 {
6069 	struct ath10k *ar = arvif->ar;
6070 
6071 	lockdep_assert_held(&ar->conf_mutex);
6072 
6073 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6074 		return 0;
6075 
6076 	if (ar->num_stations >= ar->max_num_stations)
6077 		return -ENOBUFS;
6078 
6079 	ar->num_stations++;
6080 
6081 	return 0;
6082 }
6083 
6084 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
6085 					struct ieee80211_sta *sta)
6086 {
6087 	struct ath10k *ar = arvif->ar;
6088 
6089 	lockdep_assert_held(&ar->conf_mutex);
6090 
6091 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6092 		return;
6093 
6094 	ar->num_stations--;
6095 }
6096 
6097 static int ath10k_sta_state(struct ieee80211_hw *hw,
6098 			    struct ieee80211_vif *vif,
6099 			    struct ieee80211_sta *sta,
6100 			    enum ieee80211_sta_state old_state,
6101 			    enum ieee80211_sta_state new_state)
6102 {
6103 	struct ath10k *ar = hw->priv;
6104 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
6105 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6106 	struct ath10k_peer *peer;
6107 	int ret = 0;
6108 	int i;
6109 
6110 	if (old_state == IEEE80211_STA_NOTEXIST &&
6111 	    new_state == IEEE80211_STA_NONE) {
6112 		memset(arsta, 0, sizeof(*arsta));
6113 		arsta->arvif = arvif;
6114 		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
6115 
6116 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6117 			ath10k_mac_txq_init(sta->txq[i]);
6118 	}
6119 
6120 	/* cancel must be done outside the mutex to avoid deadlock */
6121 	if ((old_state == IEEE80211_STA_NONE &&
6122 	     new_state == IEEE80211_STA_NOTEXIST))
6123 		cancel_work_sync(&arsta->update_wk);
6124 
6125 	mutex_lock(&ar->conf_mutex);
6126 
6127 	if (old_state == IEEE80211_STA_NOTEXIST &&
6128 	    new_state == IEEE80211_STA_NONE) {
6129 		/*
6130 		 * New station addition.
6131 		 */
6132 		enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
6133 		u32 num_tdls_stations;
6134 		u32 num_tdls_vifs;
6135 
6136 		ath10k_dbg(ar, ATH10K_DBG_MAC,
6137 			   "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
6138 			   arvif->vdev_id, sta->addr,
6139 			   ar->num_stations + 1, ar->max_num_stations,
6140 			   ar->num_peers + 1, ar->max_num_peers);
6141 
6142 		num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
6143 		num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
6144 
6145 		if (sta->tdls) {
6146 			if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
6147 				ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
6148 					    arvif->vdev_id,
6149 					    ar->max_num_tdls_vdevs);
6150 				ret = -ELNRNG;
6151 				goto exit;
6152 			}
6153 			peer_type = WMI_PEER_TYPE_TDLS;
6154 		}
6155 
6156 		ret = ath10k_mac_inc_num_stations(arvif, sta);
6157 		if (ret) {
6158 			ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
6159 				    ar->max_num_stations);
6160 			goto exit;
6161 		}
6162 
6163 		ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
6164 					 sta->addr, peer_type);
6165 		if (ret) {
6166 			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
6167 				    sta->addr, arvif->vdev_id, ret);
6168 			ath10k_mac_dec_num_stations(arvif, sta);
6169 			goto exit;
6170 		}
6171 
6172 		spin_lock_bh(&ar->data_lock);
6173 
6174 		peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
6175 		if (!peer) {
6176 			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
6177 				    vif->addr, arvif->vdev_id);
6178 			spin_unlock_bh(&ar->data_lock);
6179 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6180 			ath10k_mac_dec_num_stations(arvif, sta);
6181 			ret = -ENOENT;
6182 			goto exit;
6183 		}
6184 
6185 		arsta->peer_id = find_first_bit(peer->peer_ids,
6186 						ATH10K_MAX_NUM_PEER_IDS);
6187 
6188 		spin_unlock_bh(&ar->data_lock);
6189 
6190 		if (!sta->tdls)
6191 			goto exit;
6192 
6193 		ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6194 						      WMI_TDLS_ENABLE_ACTIVE);
6195 		if (ret) {
6196 			ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6197 				    arvif->vdev_id, ret);
6198 			ath10k_peer_delete(ar, arvif->vdev_id,
6199 					   sta->addr);
6200 			ath10k_mac_dec_num_stations(arvif, sta);
6201 			goto exit;
6202 		}
6203 
6204 		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6205 						  WMI_TDLS_PEER_STATE_PEERING);
6206 		if (ret) {
6207 			ath10k_warn(ar,
6208 				    "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
6209 				    sta->addr, arvif->vdev_id, ret);
6210 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6211 			ath10k_mac_dec_num_stations(arvif, sta);
6212 
6213 			if (num_tdls_stations != 0)
6214 				goto exit;
6215 			ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6216 							WMI_TDLS_DISABLE);
6217 		}
6218 	} else if ((old_state == IEEE80211_STA_NONE &&
6219 		    new_state == IEEE80211_STA_NOTEXIST)) {
6220 		/*
6221 		 * Existing station deletion.
6222 		 */
6223 		ath10k_dbg(ar, ATH10K_DBG_MAC,
6224 			   "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
6225 			   arvif->vdev_id, sta->addr, sta);
6226 
6227 		if (sta->tdls) {
6228 			ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
6229 							  sta,
6230 							  WMI_TDLS_PEER_STATE_TEARDOWN);
6231 			if (ret)
6232 				ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
6233 					    sta->addr,
6234 					    WMI_TDLS_PEER_STATE_TEARDOWN, ret);
6235 		}
6236 
6237 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6238 		if (ret)
6239 			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
6240 				    sta->addr, arvif->vdev_id, ret);
6241 
6242 		ath10k_mac_dec_num_stations(arvif, sta);
6243 
6244 		spin_lock_bh(&ar->data_lock);
6245 		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
6246 			peer = ar->peer_map[i];
6247 			if (!peer)
6248 				continue;
6249 
6250 			if (peer->sta == sta) {
6251 				ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
6252 					    sta->addr, peer, i, arvif->vdev_id);
6253 				peer->sta = NULL;
6254 
6255 				/* Clean up the peer object as well since we
6256 				 * must have failed to do this above.
6257 				 */
6258 				list_del(&peer->list);
6259 				ar->peer_map[i] = NULL;
6260 				kfree(peer);
6261 				ar->num_peers--;
6262 			}
6263 		}
6264 		spin_unlock_bh(&ar->data_lock);
6265 
6266 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6267 			ath10k_mac_txq_unref(ar, sta->txq[i]);
6268 
6269 		if (!sta->tdls)
6270 			goto exit;
6271 
6272 		if (ath10k_mac_tdls_vif_stations_count(hw, vif))
6273 			goto exit;
6274 
6275 		/* This was the last tdls peer in current vif */
6276 		ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6277 						      WMI_TDLS_DISABLE);
6278 		if (ret) {
6279 			ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6280 				    arvif->vdev_id, ret);
6281 		}
6282 	} else if (old_state == IEEE80211_STA_AUTH &&
6283 		   new_state == IEEE80211_STA_ASSOC &&
6284 		   (vif->type == NL80211_IFTYPE_AP ||
6285 		    vif->type == NL80211_IFTYPE_MESH_POINT ||
6286 		    vif->type == NL80211_IFTYPE_ADHOC)) {
6287 		/*
6288 		 * New association.
6289 		 */
6290 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
6291 			   sta->addr);
6292 
6293 		ret = ath10k_station_assoc(ar, vif, sta, false);
6294 		if (ret)
6295 			ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
6296 				    sta->addr, arvif->vdev_id, ret);
6297 	} else if (old_state == IEEE80211_STA_ASSOC &&
6298 		   new_state == IEEE80211_STA_AUTHORIZED &&
6299 		   sta->tdls) {
6300 		/*
6301 		 * Tdls station authorized.
6302 		 */
6303 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
6304 			   sta->addr);
6305 
6306 		ret = ath10k_station_assoc(ar, vif, sta, false);
6307 		if (ret) {
6308 			ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
6309 				    sta->addr, arvif->vdev_id, ret);
6310 			goto exit;
6311 		}
6312 
6313 		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6314 						  WMI_TDLS_PEER_STATE_CONNECTED);
6315 		if (ret)
6316 			ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
6317 				    sta->addr, arvif->vdev_id, ret);
6318 	} else if (old_state == IEEE80211_STA_ASSOC &&
6319 		    new_state == IEEE80211_STA_AUTH &&
6320 		    (vif->type == NL80211_IFTYPE_AP ||
6321 		     vif->type == NL80211_IFTYPE_MESH_POINT ||
6322 		     vif->type == NL80211_IFTYPE_ADHOC)) {
6323 		/*
6324 		 * Disassociation.
6325 		 */
6326 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6327 			   sta->addr);
6328 
6329 		ret = ath10k_station_disassoc(ar, vif, sta);
6330 		if (ret)
6331 			ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6332 				    sta->addr, arvif->vdev_id, ret);
6333 	}
6334 exit:
6335 	mutex_unlock(&ar->conf_mutex);
6336 	return ret;
6337 }
6338 
6339 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6340 				u16 ac, bool enable)
6341 {
6342 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
6343 	struct wmi_sta_uapsd_auto_trig_arg arg = {};
6344 	u32 prio = 0, acc = 0;
6345 	u32 value = 0;
6346 	int ret = 0;
6347 
6348 	lockdep_assert_held(&ar->conf_mutex);
6349 
6350 	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6351 		return 0;
6352 
6353 	switch (ac) {
6354 	case IEEE80211_AC_VO:
6355 		value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6356 			WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6357 		prio = 7;
6358 		acc = 3;
6359 		break;
6360 	case IEEE80211_AC_VI:
6361 		value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6362 			WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6363 		prio = 5;
6364 		acc = 2;
6365 		break;
6366 	case IEEE80211_AC_BE:
6367 		value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6368 			WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6369 		prio = 2;
6370 		acc = 1;
6371 		break;
6372 	case IEEE80211_AC_BK:
6373 		value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6374 			WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6375 		prio = 0;
6376 		acc = 0;
6377 		break;
6378 	}
6379 
6380 	if (enable)
6381 		arvif->u.sta.uapsd |= value;
6382 	else
6383 		arvif->u.sta.uapsd &= ~value;
6384 
6385 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6386 					  WMI_STA_PS_PARAM_UAPSD,
6387 					  arvif->u.sta.uapsd);
6388 	if (ret) {
6389 		ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6390 		goto exit;
6391 	}
6392 
6393 	if (arvif->u.sta.uapsd)
6394 		value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6395 	else
6396 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6397 
6398 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6399 					  WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6400 					  value);
6401 	if (ret)
6402 		ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6403 
6404 	ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6405 	if (ret) {
6406 		ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6407 			    arvif->vdev_id, ret);
6408 		return ret;
6409 	}
6410 
6411 	ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6412 	if (ret) {
6413 		ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6414 			    arvif->vdev_id, ret);
6415 		return ret;
6416 	}
6417 
6418 	if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6419 	    test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6420 		/* Only userspace can make an educated decision when to send
6421 		 * trigger frame. The following effectively disables u-UAPSD
6422 		 * autotrigger in firmware (which is enabled by default
6423 		 * provided the autotrigger service is available).
6424 		 */
6425 
6426 		arg.wmm_ac = acc;
6427 		arg.user_priority = prio;
6428 		arg.service_interval = 0;
6429 		arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6430 		arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6431 
6432 		ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6433 						arvif->bssid, &arg, 1);
6434 		if (ret) {
6435 			ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6436 				    ret);
6437 			return ret;
6438 		}
6439 	}
6440 
6441 exit:
6442 	return ret;
6443 }
6444 
6445 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6446 			  struct ieee80211_vif *vif, u16 ac,
6447 			  const struct ieee80211_tx_queue_params *params)
6448 {
6449 	struct ath10k *ar = hw->priv;
6450 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
6451 	struct wmi_wmm_params_arg *p = NULL;
6452 	int ret;
6453 
6454 	mutex_lock(&ar->conf_mutex);
6455 
6456 	switch (ac) {
6457 	case IEEE80211_AC_VO:
6458 		p = &arvif->wmm_params.ac_vo;
6459 		break;
6460 	case IEEE80211_AC_VI:
6461 		p = &arvif->wmm_params.ac_vi;
6462 		break;
6463 	case IEEE80211_AC_BE:
6464 		p = &arvif->wmm_params.ac_be;
6465 		break;
6466 	case IEEE80211_AC_BK:
6467 		p = &arvif->wmm_params.ac_bk;
6468 		break;
6469 	}
6470 
6471 	if (WARN_ON(!p)) {
6472 		ret = -EINVAL;
6473 		goto exit;
6474 	}
6475 
6476 	p->cwmin = params->cw_min;
6477 	p->cwmax = params->cw_max;
6478 	p->aifs = params->aifs;
6479 
6480 	/*
6481 	 * The channel time duration programmed in the HW is in absolute
6482 	 * microseconds, while mac80211 gives the txop in units of
6483 	 * 32 microseconds.
6484 	 */
6485 	p->txop = params->txop * 32;
6486 
6487 	if (ar->wmi.ops->gen_vdev_wmm_conf) {
6488 		ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6489 					       &arvif->wmm_params);
6490 		if (ret) {
6491 			ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6492 				    arvif->vdev_id, ret);
6493 			goto exit;
6494 		}
6495 	} else {
6496 		/* This won't work well with multi-interface cases but it's
6497 		 * better than nothing.
6498 		 */
6499 		ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6500 		if (ret) {
6501 			ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6502 			goto exit;
6503 		}
6504 	}
6505 
6506 	ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6507 	if (ret)
6508 		ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6509 
6510 exit:
6511 	mutex_unlock(&ar->conf_mutex);
6512 	return ret;
6513 }
6514 
6515 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6516 
6517 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6518 				    struct ieee80211_vif *vif,
6519 				    struct ieee80211_channel *chan,
6520 				    int duration,
6521 				    enum ieee80211_roc_type type)
6522 {
6523 	struct ath10k *ar = hw->priv;
6524 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
6525 	struct wmi_start_scan_arg arg;
6526 	int ret = 0;
6527 	u32 scan_time_msec;
6528 
6529 	mutex_lock(&ar->conf_mutex);
6530 
6531 	if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
6532 		ret = -EBUSY;
6533 		goto exit;
6534 	}
6535 
6536 	spin_lock_bh(&ar->data_lock);
6537 	switch (ar->scan.state) {
6538 	case ATH10K_SCAN_IDLE:
6539 		reinit_completion(&ar->scan.started);
6540 		reinit_completion(&ar->scan.completed);
6541 		reinit_completion(&ar->scan.on_channel);
6542 		ar->scan.state = ATH10K_SCAN_STARTING;
6543 		ar->scan.is_roc = true;
6544 		ar->scan.vdev_id = arvif->vdev_id;
6545 		ar->scan.roc_freq = chan->center_freq;
6546 		ar->scan.roc_notify = true;
6547 		ret = 0;
6548 		break;
6549 	case ATH10K_SCAN_STARTING:
6550 	case ATH10K_SCAN_RUNNING:
6551 	case ATH10K_SCAN_ABORTING:
6552 		ret = -EBUSY;
6553 		break;
6554 	}
6555 	spin_unlock_bh(&ar->data_lock);
6556 
6557 	if (ret)
6558 		goto exit;
6559 
6560 	scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6561 
6562 	memset(&arg, 0, sizeof(arg));
6563 	ath10k_wmi_start_scan_init(ar, &arg);
6564 	arg.vdev_id = arvif->vdev_id;
6565 	arg.scan_id = ATH10K_SCAN_ID;
6566 	arg.n_channels = 1;
6567 	arg.channels[0] = chan->center_freq;
6568 	arg.dwell_time_active = scan_time_msec;
6569 	arg.dwell_time_passive = scan_time_msec;
6570 	arg.max_scan_time = scan_time_msec;
6571 	arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6572 	arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6573 	arg.burst_duration_ms = duration;
6574 
6575 	ret = ath10k_start_scan(ar, &arg);
6576 	if (ret) {
6577 		ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6578 		spin_lock_bh(&ar->data_lock);
6579 		ar->scan.state = ATH10K_SCAN_IDLE;
6580 		spin_unlock_bh(&ar->data_lock);
6581 		goto exit;
6582 	}
6583 
6584 	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6585 	if (ret == 0) {
6586 		ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6587 
6588 		ret = ath10k_scan_stop(ar);
6589 		if (ret)
6590 			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6591 
6592 		ret = -ETIMEDOUT;
6593 		goto exit;
6594 	}
6595 
6596 	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6597 				     msecs_to_jiffies(duration));
6598 
6599 	ret = 0;
6600 exit:
6601 	mutex_unlock(&ar->conf_mutex);
6602 	return ret;
6603 }
6604 
6605 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6606 {
6607 	struct ath10k *ar = hw->priv;
6608 
6609 	mutex_lock(&ar->conf_mutex);
6610 
6611 	spin_lock_bh(&ar->data_lock);
6612 	ar->scan.roc_notify = false;
6613 	spin_unlock_bh(&ar->data_lock);
6614 
6615 	ath10k_scan_abort(ar);
6616 
6617 	mutex_unlock(&ar->conf_mutex);
6618 
6619 	cancel_delayed_work_sync(&ar->scan.timeout);
6620 
6621 	return 0;
6622 }
6623 
6624 /*
6625  * Both RTS and Fragmentation threshold are interface-specific
6626  * in ath10k, but device-specific in mac80211.
6627  */
6628 
6629 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6630 {
6631 	struct ath10k *ar = hw->priv;
6632 	struct ath10k_vif *arvif;
6633 	int ret = 0;
6634 
6635 	mutex_lock(&ar->conf_mutex);
6636 	list_for_each_entry(arvif, &ar->arvifs, list) {
6637 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6638 			   arvif->vdev_id, value);
6639 
6640 		ret = ath10k_mac_set_rts(arvif, value);
6641 		if (ret) {
6642 			ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6643 				    arvif->vdev_id, ret);
6644 			break;
6645 		}
6646 	}
6647 	mutex_unlock(&ar->conf_mutex);
6648 
6649 	return ret;
6650 }
6651 
6652 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6653 {
6654 	/* Even though there's a WMI enum for fragmentation threshold no known
6655 	 * firmware actually implements it. Moreover it is not possible to rely
6656 	 * frame fragmentation to mac80211 because firmware clears the "more
6657 	 * fragments" bit in frame control making it impossible for remote
6658 	 * devices to reassemble frames.
6659 	 *
6660 	 * Hence implement a dummy callback just to say fragmentation isn't
6661 	 * supported. This effectively prevents mac80211 from doing frame
6662 	 * fragmentation in software.
6663 	 */
6664 	return -EOPNOTSUPP;
6665 }
6666 
6667 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6668 			 u32 queues, bool drop)
6669 {
6670 	struct ath10k *ar = hw->priv;
6671 	bool skip;
6672 	long time_left;
6673 
6674 	/* mac80211 doesn't care if we really xmit queued frames or not
6675 	 * we'll collect those frames either way if we stop/delete vdevs
6676 	 */
6677 	if (drop)
6678 		return;
6679 
6680 	mutex_lock(&ar->conf_mutex);
6681 
6682 	if (ar->state == ATH10K_STATE_WEDGED)
6683 		goto skip;
6684 
6685 	time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6686 			bool empty;
6687 
6688 			spin_lock_bh(&ar->htt.tx_lock);
6689 			empty = (ar->htt.num_pending_tx == 0);
6690 			spin_unlock_bh(&ar->htt.tx_lock);
6691 
6692 			skip = (ar->state == ATH10K_STATE_WEDGED) ||
6693 			       test_bit(ATH10K_FLAG_CRASH_FLUSH,
6694 					&ar->dev_flags);
6695 
6696 			(empty || skip);
6697 		}), ATH10K_FLUSH_TIMEOUT_HZ);
6698 
6699 	if (time_left == 0 || skip)
6700 		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6701 			    skip, ar->state, time_left);
6702 
6703 skip:
6704 	mutex_unlock(&ar->conf_mutex);
6705 }
6706 
6707 /* TODO: Implement this function properly
6708  * For now it is needed to reply to Probe Requests in IBSS mode.
6709  * Propably we need this information from FW.
6710  */
6711 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6712 {
6713 	return 1;
6714 }
6715 
6716 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6717 				     enum ieee80211_reconfig_type reconfig_type)
6718 {
6719 	struct ath10k *ar = hw->priv;
6720 
6721 	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6722 		return;
6723 
6724 	mutex_lock(&ar->conf_mutex);
6725 
6726 	/* If device failed to restart it will be in a different state, e.g.
6727 	 * ATH10K_STATE_WEDGED
6728 	 */
6729 	if (ar->state == ATH10K_STATE_RESTARTED) {
6730 		ath10k_info(ar, "device successfully recovered\n");
6731 		ar->state = ATH10K_STATE_ON;
6732 		ieee80211_wake_queues(ar->hw);
6733 	}
6734 
6735 	mutex_unlock(&ar->conf_mutex);
6736 }
6737 
6738 static void
6739 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6740 				  struct ieee80211_channel *channel)
6741 {
6742 	int ret;
6743 	enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6744 
6745 	lockdep_assert_held(&ar->conf_mutex);
6746 
6747 	if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6748 	    (ar->rx_channel != channel))
6749 		return;
6750 
6751 	if (ar->scan.state != ATH10K_SCAN_IDLE) {
6752 		ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6753 		return;
6754 	}
6755 
6756 	reinit_completion(&ar->bss_survey_done);
6757 
6758 	ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6759 	if (ret) {
6760 		ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6761 		return;
6762 	}
6763 
6764 	ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6765 	if (!ret) {
6766 		ath10k_warn(ar, "bss channel survey timed out\n");
6767 		return;
6768 	}
6769 }
6770 
6771 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6772 			     struct survey_info *survey)
6773 {
6774 	struct ath10k *ar = hw->priv;
6775 	struct ieee80211_supported_band *sband;
6776 	struct survey_info *ar_survey = &ar->survey[idx];
6777 	int ret = 0;
6778 
6779 	mutex_lock(&ar->conf_mutex);
6780 
6781 	sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6782 	if (sband && idx >= sband->n_channels) {
6783 		idx -= sband->n_channels;
6784 		sband = NULL;
6785 	}
6786 
6787 	if (!sband)
6788 		sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6789 
6790 	if (!sband || idx >= sband->n_channels) {
6791 		ret = -ENOENT;
6792 		goto exit;
6793 	}
6794 
6795 	ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
6796 
6797 	spin_lock_bh(&ar->data_lock);
6798 	memcpy(survey, ar_survey, sizeof(*survey));
6799 	spin_unlock_bh(&ar->data_lock);
6800 
6801 	survey->channel = &sband->channels[idx];
6802 
6803 	if (ar->rx_channel == survey->channel)
6804 		survey->filled |= SURVEY_INFO_IN_USE;
6805 
6806 exit:
6807 	mutex_unlock(&ar->conf_mutex);
6808 	return ret;
6809 }
6810 
6811 static bool
6812 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6813 					enum nl80211_band band,
6814 					const struct cfg80211_bitrate_mask *mask)
6815 {
6816 	int num_rates = 0;
6817 	int i;
6818 
6819 	num_rates += hweight32(mask->control[band].legacy);
6820 
6821 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6822 		num_rates += hweight8(mask->control[band].ht_mcs[i]);
6823 
6824 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6825 		num_rates += hweight16(mask->control[band].vht_mcs[i]);
6826 
6827 	return num_rates == 1;
6828 }
6829 
6830 static bool
6831 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6832 				       enum nl80211_band band,
6833 				       const struct cfg80211_bitrate_mask *mask,
6834 				       int *nss)
6835 {
6836 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6837 	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6838 	u8 ht_nss_mask = 0;
6839 	u8 vht_nss_mask = 0;
6840 	int i;
6841 
6842 	if (mask->control[band].legacy)
6843 		return false;
6844 
6845 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6846 		if (mask->control[band].ht_mcs[i] == 0)
6847 			continue;
6848 		else if (mask->control[band].ht_mcs[i] ==
6849 			 sband->ht_cap.mcs.rx_mask[i])
6850 			ht_nss_mask |= BIT(i);
6851 		else
6852 			return false;
6853 	}
6854 
6855 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6856 		if (mask->control[band].vht_mcs[i] == 0)
6857 			continue;
6858 		else if (mask->control[band].vht_mcs[i] ==
6859 			 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6860 			vht_nss_mask |= BIT(i);
6861 		else
6862 			return false;
6863 	}
6864 
6865 	if (ht_nss_mask != vht_nss_mask)
6866 		return false;
6867 
6868 	if (ht_nss_mask == 0)
6869 		return false;
6870 
6871 	if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6872 		return false;
6873 
6874 	*nss = fls(ht_nss_mask);
6875 
6876 	return true;
6877 }
6878 
6879 static int
6880 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6881 					enum nl80211_band band,
6882 					const struct cfg80211_bitrate_mask *mask,
6883 					u8 *rate, u8 *nss)
6884 {
6885 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6886 	int rate_idx;
6887 	int i;
6888 	u16 bitrate;
6889 	u8 preamble;
6890 	u8 hw_rate;
6891 
6892 	if (hweight32(mask->control[band].legacy) == 1) {
6893 		rate_idx = ffs(mask->control[band].legacy) - 1;
6894 
6895 		hw_rate = sband->bitrates[rate_idx].hw_value;
6896 		bitrate = sband->bitrates[rate_idx].bitrate;
6897 
6898 		if (ath10k_mac_bitrate_is_cck(bitrate))
6899 			preamble = WMI_RATE_PREAMBLE_CCK;
6900 		else
6901 			preamble = WMI_RATE_PREAMBLE_OFDM;
6902 
6903 		*nss = 1;
6904 		*rate = preamble << 6 |
6905 			(*nss - 1) << 4 |
6906 			hw_rate << 0;
6907 
6908 		return 0;
6909 	}
6910 
6911 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6912 		if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6913 			*nss = i + 1;
6914 			*rate = WMI_RATE_PREAMBLE_HT << 6 |
6915 				(*nss - 1) << 4 |
6916 				(ffs(mask->control[band].ht_mcs[i]) - 1);
6917 
6918 			return 0;
6919 		}
6920 	}
6921 
6922 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6923 		if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6924 			*nss = i + 1;
6925 			*rate = WMI_RATE_PREAMBLE_VHT << 6 |
6926 				(*nss - 1) << 4 |
6927 				(ffs(mask->control[band].vht_mcs[i]) - 1);
6928 
6929 			return 0;
6930 		}
6931 	}
6932 
6933 	return -EINVAL;
6934 }
6935 
6936 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6937 					    u8 rate, u8 nss, u8 sgi, u8 ldpc)
6938 {
6939 	struct ath10k *ar = arvif->ar;
6940 	u32 vdev_param;
6941 	int ret;
6942 
6943 	lockdep_assert_held(&ar->conf_mutex);
6944 
6945 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6946 		   arvif->vdev_id, rate, nss, sgi);
6947 
6948 	vdev_param = ar->wmi.vdev_param->fixed_rate;
6949 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6950 	if (ret) {
6951 		ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6952 			    rate, ret);
6953 		return ret;
6954 	}
6955 
6956 	vdev_param = ar->wmi.vdev_param->nss;
6957 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6958 	if (ret) {
6959 		ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6960 		return ret;
6961 	}
6962 
6963 	vdev_param = ar->wmi.vdev_param->sgi;
6964 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6965 	if (ret) {
6966 		ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6967 		return ret;
6968 	}
6969 
6970 	vdev_param = ar->wmi.vdev_param->ldpc;
6971 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6972 	if (ret) {
6973 		ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6974 		return ret;
6975 	}
6976 
6977 	return 0;
6978 }
6979 
6980 static bool
6981 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6982 				enum nl80211_band band,
6983 				const struct cfg80211_bitrate_mask *mask)
6984 {
6985 	int i;
6986 	u16 vht_mcs;
6987 
6988 	/* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6989 	 * to express all VHT MCS rate masks. Effectively only the following
6990 	 * ranges can be used: none, 0-7, 0-8 and 0-9.
6991 	 */
6992 	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6993 		vht_mcs = mask->control[band].vht_mcs[i];
6994 
6995 		switch (vht_mcs) {
6996 		case 0:
6997 		case BIT(8) - 1:
6998 		case BIT(9) - 1:
6999 		case BIT(10) - 1:
7000 			break;
7001 		default:
7002 			ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
7003 			return false;
7004 		}
7005 	}
7006 
7007 	return true;
7008 }
7009 
7010 static void ath10k_mac_set_bitrate_mask_iter(void *data,
7011 					     struct ieee80211_sta *sta)
7012 {
7013 	struct ath10k_vif *arvif = data;
7014 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7015 	struct ath10k *ar = arvif->ar;
7016 
7017 	if (arsta->arvif != arvif)
7018 		return;
7019 
7020 	spin_lock_bh(&ar->data_lock);
7021 	arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
7022 	spin_unlock_bh(&ar->data_lock);
7023 
7024 	ieee80211_queue_work(ar->hw, &arsta->update_wk);
7025 }
7026 
7027 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
7028 					  struct ieee80211_vif *vif,
7029 					  const struct cfg80211_bitrate_mask *mask)
7030 {
7031 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7032 	struct cfg80211_chan_def def;
7033 	struct ath10k *ar = arvif->ar;
7034 	enum nl80211_band band;
7035 	const u8 *ht_mcs_mask;
7036 	const u16 *vht_mcs_mask;
7037 	u8 rate;
7038 	u8 nss;
7039 	u8 sgi;
7040 	u8 ldpc;
7041 	int single_nss;
7042 	int ret;
7043 
7044 	if (ath10k_mac_vif_chan(vif, &def))
7045 		return -EPERM;
7046 
7047 	band = def.chan->band;
7048 	ht_mcs_mask = mask->control[band].ht_mcs;
7049 	vht_mcs_mask = mask->control[band].vht_mcs;
7050 	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
7051 
7052 	sgi = mask->control[band].gi;
7053 	if (sgi == NL80211_TXRATE_FORCE_LGI)
7054 		return -EINVAL;
7055 
7056 	if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
7057 		ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
7058 							      &rate, &nss);
7059 		if (ret) {
7060 			ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
7061 				    arvif->vdev_id, ret);
7062 			return ret;
7063 		}
7064 	} else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
7065 							  &single_nss)) {
7066 		rate = WMI_FIXED_RATE_NONE;
7067 		nss = single_nss;
7068 	} else {
7069 		rate = WMI_FIXED_RATE_NONE;
7070 		nss = min(ar->num_rf_chains,
7071 			  max(ath10k_mac_max_ht_nss(ht_mcs_mask),
7072 			      ath10k_mac_max_vht_nss(vht_mcs_mask)));
7073 
7074 		if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
7075 			return -EINVAL;
7076 
7077 		mutex_lock(&ar->conf_mutex);
7078 
7079 		arvif->bitrate_mask = *mask;
7080 		ieee80211_iterate_stations_atomic(ar->hw,
7081 						  ath10k_mac_set_bitrate_mask_iter,
7082 						  arvif);
7083 
7084 		mutex_unlock(&ar->conf_mutex);
7085 	}
7086 
7087 	mutex_lock(&ar->conf_mutex);
7088 
7089 	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
7090 	if (ret) {
7091 		ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
7092 			    arvif->vdev_id, ret);
7093 		goto exit;
7094 	}
7095 
7096 exit:
7097 	mutex_unlock(&ar->conf_mutex);
7098 
7099 	return ret;
7100 }
7101 
7102 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
7103 				 struct ieee80211_vif *vif,
7104 				 struct ieee80211_sta *sta,
7105 				 u32 changed)
7106 {
7107 	struct ath10k *ar = hw->priv;
7108 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7109 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7110 	struct ath10k_peer *peer;
7111 	u32 bw, smps;
7112 
7113 	spin_lock_bh(&ar->data_lock);
7114 
7115 	peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
7116 	if (!peer) {
7117 		spin_unlock_bh(&ar->data_lock);
7118 		ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
7119 			    sta->addr, arvif->vdev_id);
7120 		return;
7121 	}
7122 
7123 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7124 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
7125 		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
7126 		   sta->smps_mode);
7127 
7128 	if (changed & IEEE80211_RC_BW_CHANGED) {
7129 		bw = WMI_PEER_CHWIDTH_20MHZ;
7130 
7131 		switch (sta->bandwidth) {
7132 		case IEEE80211_STA_RX_BW_20:
7133 			bw = WMI_PEER_CHWIDTH_20MHZ;
7134 			break;
7135 		case IEEE80211_STA_RX_BW_40:
7136 			bw = WMI_PEER_CHWIDTH_40MHZ;
7137 			break;
7138 		case IEEE80211_STA_RX_BW_80:
7139 			bw = WMI_PEER_CHWIDTH_80MHZ;
7140 			break;
7141 		case IEEE80211_STA_RX_BW_160:
7142 			bw = WMI_PEER_CHWIDTH_160MHZ;
7143 			break;
7144 		default:
7145 			ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
7146 				    sta->bandwidth, sta->addr);
7147 			bw = WMI_PEER_CHWIDTH_20MHZ;
7148 			break;
7149 		}
7150 
7151 		arsta->bw = bw;
7152 	}
7153 
7154 	if (changed & IEEE80211_RC_NSS_CHANGED)
7155 		arsta->nss = sta->rx_nss;
7156 
7157 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
7158 		smps = WMI_PEER_SMPS_PS_NONE;
7159 
7160 		switch (sta->smps_mode) {
7161 		case IEEE80211_SMPS_AUTOMATIC:
7162 		case IEEE80211_SMPS_OFF:
7163 			smps = WMI_PEER_SMPS_PS_NONE;
7164 			break;
7165 		case IEEE80211_SMPS_STATIC:
7166 			smps = WMI_PEER_SMPS_STATIC;
7167 			break;
7168 		case IEEE80211_SMPS_DYNAMIC:
7169 			smps = WMI_PEER_SMPS_DYNAMIC;
7170 			break;
7171 		case IEEE80211_SMPS_NUM_MODES:
7172 			ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
7173 				    sta->smps_mode, sta->addr);
7174 			smps = WMI_PEER_SMPS_PS_NONE;
7175 			break;
7176 		}
7177 
7178 		arsta->smps = smps;
7179 	}
7180 
7181 	arsta->changed |= changed;
7182 
7183 	spin_unlock_bh(&ar->data_lock);
7184 
7185 	ieee80211_queue_work(hw, &arsta->update_wk);
7186 }
7187 
7188 static void ath10k_offset_tsf(struct ieee80211_hw *hw,
7189 			      struct ieee80211_vif *vif, s64 tsf_offset)
7190 {
7191 	struct ath10k *ar = hw->priv;
7192 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7193 	u32 offset, vdev_param;
7194 	int ret;
7195 
7196 	if (tsf_offset < 0) {
7197 		vdev_param = ar->wmi.vdev_param->dec_tsf;
7198 		offset = -tsf_offset;
7199 	} else {
7200 		vdev_param = ar->wmi.vdev_param->inc_tsf;
7201 		offset = tsf_offset;
7202 	}
7203 
7204 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
7205 					vdev_param, offset);
7206 
7207 	if (ret && ret != -EOPNOTSUPP)
7208 		ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
7209 			    offset, vdev_param, ret);
7210 }
7211 
7212 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
7213 			       struct ieee80211_vif *vif,
7214 			       struct ieee80211_ampdu_params *params)
7215 {
7216 	struct ath10k *ar = hw->priv;
7217 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7218 	struct ieee80211_sta *sta = params->sta;
7219 	enum ieee80211_ampdu_mlme_action action = params->action;
7220 	u16 tid = params->tid;
7221 
7222 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
7223 		   arvif->vdev_id, sta->addr, tid, action);
7224 
7225 	switch (action) {
7226 	case IEEE80211_AMPDU_RX_START:
7227 	case IEEE80211_AMPDU_RX_STOP:
7228 		/* HTT AddBa/DelBa events trigger mac80211 Rx BA session
7229 		 * creation/removal. Do we need to verify this?
7230 		 */
7231 		return 0;
7232 	case IEEE80211_AMPDU_TX_START:
7233 	case IEEE80211_AMPDU_TX_STOP_CONT:
7234 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
7235 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
7236 	case IEEE80211_AMPDU_TX_OPERATIONAL:
7237 		/* Firmware offloads Tx aggregation entirely so deny mac80211
7238 		 * Tx aggregation requests.
7239 		 */
7240 		return -EOPNOTSUPP;
7241 	}
7242 
7243 	return -EINVAL;
7244 }
7245 
7246 static void
7247 ath10k_mac_update_rx_channel(struct ath10k *ar,
7248 			     struct ieee80211_chanctx_conf *ctx,
7249 			     struct ieee80211_vif_chanctx_switch *vifs,
7250 			     int n_vifs)
7251 {
7252 	struct cfg80211_chan_def *def = NULL;
7253 
7254 	/* Both locks are required because ar->rx_channel is modified. This
7255 	 * allows readers to hold either lock.
7256 	 */
7257 	lockdep_assert_held(&ar->conf_mutex);
7258 	lockdep_assert_held(&ar->data_lock);
7259 
7260 	WARN_ON(ctx && vifs);
7261 	WARN_ON(vifs && !n_vifs);
7262 
7263 	/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
7264 	 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
7265 	 * ppdu on Rx may reduce performance on low-end systems. It should be
7266 	 * possible to make tables/hashmaps to speed the lookup up (be vary of
7267 	 * cpu data cache lines though regarding sizes) but to keep the initial
7268 	 * implementation simple and less intrusive fallback to the slow lookup
7269 	 * only for multi-channel cases. Single-channel cases will remain to
7270 	 * use the old channel derival and thus performance should not be
7271 	 * affected much.
7272 	 */
7273 	rcu_read_lock();
7274 	if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
7275 		ieee80211_iter_chan_contexts_atomic(ar->hw,
7276 						    ath10k_mac_get_any_chandef_iter,
7277 						    &def);
7278 
7279 		if (vifs)
7280 			def = &vifs[0].new_ctx->def;
7281 
7282 		ar->rx_channel = def->chan;
7283 	} else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
7284 		   (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
7285 		/* During driver restart due to firmware assert, since mac80211
7286 		 * already has valid channel context for given radio, channel
7287 		 * context iteration return num_chanctx > 0. So fix rx_channel
7288 		 * when restart is in progress.
7289 		 */
7290 		ar->rx_channel = ctx->def.chan;
7291 	} else {
7292 		ar->rx_channel = NULL;
7293 	}
7294 	rcu_read_unlock();
7295 }
7296 
7297 static void
7298 ath10k_mac_update_vif_chan(struct ath10k *ar,
7299 			   struct ieee80211_vif_chanctx_switch *vifs,
7300 			   int n_vifs)
7301 {
7302 	struct ath10k_vif *arvif;
7303 	int ret;
7304 	int i;
7305 
7306 	lockdep_assert_held(&ar->conf_mutex);
7307 
7308 	/* First stop monitor interface. Some FW versions crash if there's a
7309 	 * lone monitor interface.
7310 	 */
7311 	if (ar->monitor_started)
7312 		ath10k_monitor_stop(ar);
7313 
7314 	for (i = 0; i < n_vifs; i++) {
7315 		arvif = (void *)vifs[i].vif->drv_priv;
7316 
7317 		ath10k_dbg(ar, ATH10K_DBG_MAC,
7318 			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
7319 			   arvif->vdev_id,
7320 			   vifs[i].old_ctx->def.chan->center_freq,
7321 			   vifs[i].new_ctx->def.chan->center_freq,
7322 			   vifs[i].old_ctx->def.width,
7323 			   vifs[i].new_ctx->def.width);
7324 
7325 		if (WARN_ON(!arvif->is_started))
7326 			continue;
7327 
7328 		if (WARN_ON(!arvif->is_up))
7329 			continue;
7330 
7331 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7332 		if (ret) {
7333 			ath10k_warn(ar, "failed to down vdev %d: %d\n",
7334 				    arvif->vdev_id, ret);
7335 			continue;
7336 		}
7337 	}
7338 
7339 	/* All relevant vdevs are downed and associated channel resources
7340 	 * should be available for the channel switch now.
7341 	 */
7342 
7343 	spin_lock_bh(&ar->data_lock);
7344 	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7345 	spin_unlock_bh(&ar->data_lock);
7346 
7347 	for (i = 0; i < n_vifs; i++) {
7348 		arvif = (void *)vifs[i].vif->drv_priv;
7349 
7350 		if (WARN_ON(!arvif->is_started))
7351 			continue;
7352 
7353 		if (WARN_ON(!arvif->is_up))
7354 			continue;
7355 
7356 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
7357 		if (ret)
7358 			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7359 				    ret);
7360 
7361 		ret = ath10k_mac_setup_prb_tmpl(arvif);
7362 		if (ret)
7363 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7364 				    ret);
7365 
7366 		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7367 		if (ret) {
7368 			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7369 				    arvif->vdev_id, ret);
7370 			continue;
7371 		}
7372 
7373 		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7374 					 arvif->bssid);
7375 		if (ret) {
7376 			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7377 				    arvif->vdev_id, ret);
7378 			continue;
7379 		}
7380 	}
7381 
7382 	ath10k_monitor_recalc(ar);
7383 }
7384 
7385 static int
7386 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7387 			  struct ieee80211_chanctx_conf *ctx)
7388 {
7389 	struct ath10k *ar = hw->priv;
7390 
7391 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7392 		   "mac chanctx add freq %hu width %d ptr %pK\n",
7393 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7394 
7395 	mutex_lock(&ar->conf_mutex);
7396 
7397 	spin_lock_bh(&ar->data_lock);
7398 	ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7399 	spin_unlock_bh(&ar->data_lock);
7400 
7401 	ath10k_recalc_radar_detection(ar);
7402 	ath10k_monitor_recalc(ar);
7403 
7404 	mutex_unlock(&ar->conf_mutex);
7405 
7406 	return 0;
7407 }
7408 
7409 static void
7410 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7411 			     struct ieee80211_chanctx_conf *ctx)
7412 {
7413 	struct ath10k *ar = hw->priv;
7414 
7415 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7416 		   "mac chanctx remove freq %hu width %d ptr %pK\n",
7417 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7418 
7419 	mutex_lock(&ar->conf_mutex);
7420 
7421 	spin_lock_bh(&ar->data_lock);
7422 	ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7423 	spin_unlock_bh(&ar->data_lock);
7424 
7425 	ath10k_recalc_radar_detection(ar);
7426 	ath10k_monitor_recalc(ar);
7427 
7428 	mutex_unlock(&ar->conf_mutex);
7429 }
7430 
7431 struct ath10k_mac_change_chanctx_arg {
7432 	struct ieee80211_chanctx_conf *ctx;
7433 	struct ieee80211_vif_chanctx_switch *vifs;
7434 	int n_vifs;
7435 	int next_vif;
7436 };
7437 
7438 static void
7439 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7440 				   struct ieee80211_vif *vif)
7441 {
7442 	struct ath10k_mac_change_chanctx_arg *arg = data;
7443 
7444 	if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7445 		return;
7446 
7447 	arg->n_vifs++;
7448 }
7449 
7450 static void
7451 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7452 				    struct ieee80211_vif *vif)
7453 {
7454 	struct ath10k_mac_change_chanctx_arg *arg = data;
7455 	struct ieee80211_chanctx_conf *ctx;
7456 
7457 	ctx = rcu_access_pointer(vif->chanctx_conf);
7458 	if (ctx != arg->ctx)
7459 		return;
7460 
7461 	if (WARN_ON(arg->next_vif == arg->n_vifs))
7462 		return;
7463 
7464 	arg->vifs[arg->next_vif].vif = vif;
7465 	arg->vifs[arg->next_vif].old_ctx = ctx;
7466 	arg->vifs[arg->next_vif].new_ctx = ctx;
7467 	arg->next_vif++;
7468 }
7469 
7470 static void
7471 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7472 			     struct ieee80211_chanctx_conf *ctx,
7473 			     u32 changed)
7474 {
7475 	struct ath10k *ar = hw->priv;
7476 	struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7477 
7478 	mutex_lock(&ar->conf_mutex);
7479 
7480 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7481 		   "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
7482 		   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7483 
7484 	/* This shouldn't really happen because channel switching should use
7485 	 * switch_vif_chanctx().
7486 	 */
7487 	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7488 		goto unlock;
7489 
7490 	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7491 		ieee80211_iterate_active_interfaces_atomic(
7492 					hw,
7493 					IEEE80211_IFACE_ITER_NORMAL,
7494 					ath10k_mac_change_chanctx_cnt_iter,
7495 					&arg);
7496 		if (arg.n_vifs == 0)
7497 			goto radar;
7498 
7499 		arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7500 				   GFP_KERNEL);
7501 		if (!arg.vifs)
7502 			goto radar;
7503 
7504 		ieee80211_iterate_active_interfaces_atomic(
7505 					hw,
7506 					IEEE80211_IFACE_ITER_NORMAL,
7507 					ath10k_mac_change_chanctx_fill_iter,
7508 					&arg);
7509 		ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7510 		kfree(arg.vifs);
7511 	}
7512 
7513 radar:
7514 	ath10k_recalc_radar_detection(ar);
7515 
7516 	/* FIXME: How to configure Rx chains properly? */
7517 
7518 	/* No other actions are actually necessary. Firmware maintains channel
7519 	 * definitions per vdev internally and there's no host-side channel
7520 	 * context abstraction to configure, e.g. channel width.
7521 	 */
7522 
7523 unlock:
7524 	mutex_unlock(&ar->conf_mutex);
7525 }
7526 
7527 static int
7528 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7529 				 struct ieee80211_vif *vif,
7530 				 struct ieee80211_chanctx_conf *ctx)
7531 {
7532 	struct ath10k *ar = hw->priv;
7533 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7534 	int ret;
7535 
7536 	mutex_lock(&ar->conf_mutex);
7537 
7538 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7539 		   "mac chanctx assign ptr %pK vdev_id %i\n",
7540 		   ctx, arvif->vdev_id);
7541 
7542 	if (WARN_ON(arvif->is_started)) {
7543 		mutex_unlock(&ar->conf_mutex);
7544 		return -EBUSY;
7545 	}
7546 
7547 	ret = ath10k_vdev_start(arvif, &ctx->def);
7548 	if (ret) {
7549 		ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7550 			    arvif->vdev_id, vif->addr,
7551 			    ctx->def.chan->center_freq, ret);
7552 		goto err;
7553 	}
7554 
7555 	arvif->is_started = true;
7556 
7557 	ret = ath10k_mac_vif_setup_ps(arvif);
7558 	if (ret) {
7559 		ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7560 			    arvif->vdev_id, ret);
7561 		goto err_stop;
7562 	}
7563 
7564 	if (vif->type == NL80211_IFTYPE_MONITOR) {
7565 		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7566 		if (ret) {
7567 			ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7568 				    arvif->vdev_id, ret);
7569 			goto err_stop;
7570 		}
7571 
7572 		arvif->is_up = true;
7573 	}
7574 
7575 	if (ath10k_mac_can_set_cts_prot(arvif)) {
7576 		ret = ath10k_mac_set_cts_prot(arvif);
7577 		if (ret)
7578 			ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
7579 				    arvif->vdev_id, ret);
7580 	}
7581 
7582 	if (ath10k_peer_stats_enabled(ar)) {
7583 		ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
7584 		ret = ath10k_wmi_pdev_pktlog_enable(ar,
7585 						    ar->pktlog_filter);
7586 		if (ret) {
7587 			ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
7588 			goto err_stop;
7589 		}
7590 	}
7591 
7592 	mutex_unlock(&ar->conf_mutex);
7593 	return 0;
7594 
7595 err_stop:
7596 	ath10k_vdev_stop(arvif);
7597 	arvif->is_started = false;
7598 	ath10k_mac_vif_setup_ps(arvif);
7599 
7600 err:
7601 	mutex_unlock(&ar->conf_mutex);
7602 	return ret;
7603 }
7604 
7605 static void
7606 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7607 				   struct ieee80211_vif *vif,
7608 				   struct ieee80211_chanctx_conf *ctx)
7609 {
7610 	struct ath10k *ar = hw->priv;
7611 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7612 	int ret;
7613 
7614 	mutex_lock(&ar->conf_mutex);
7615 
7616 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7617 		   "mac chanctx unassign ptr %pK vdev_id %i\n",
7618 		   ctx, arvif->vdev_id);
7619 
7620 	WARN_ON(!arvif->is_started);
7621 
7622 	if (vif->type == NL80211_IFTYPE_MONITOR) {
7623 		WARN_ON(!arvif->is_up);
7624 
7625 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7626 		if (ret)
7627 			ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7628 				    arvif->vdev_id, ret);
7629 
7630 		arvif->is_up = false;
7631 	}
7632 
7633 	ret = ath10k_vdev_stop(arvif);
7634 	if (ret)
7635 		ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7636 			    arvif->vdev_id, ret);
7637 
7638 	arvif->is_started = false;
7639 
7640 	mutex_unlock(&ar->conf_mutex);
7641 }
7642 
7643 static int
7644 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7645 				 struct ieee80211_vif_chanctx_switch *vifs,
7646 				 int n_vifs,
7647 				 enum ieee80211_chanctx_switch_mode mode)
7648 {
7649 	struct ath10k *ar = hw->priv;
7650 
7651 	mutex_lock(&ar->conf_mutex);
7652 
7653 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7654 		   "mac chanctx switch n_vifs %d mode %d\n",
7655 		   n_vifs, mode);
7656 	ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7657 
7658 	mutex_unlock(&ar->conf_mutex);
7659 	return 0;
7660 }
7661 
7662 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
7663 					     struct ieee80211_vif *vif,
7664 					     struct ieee80211_sta *sta)
7665 {
7666 	struct ath10k *ar;
7667 	struct ath10k_peer *peer;
7668 
7669 	ar = hw->priv;
7670 
7671 	list_for_each_entry(peer, &ar->peers, list)
7672 		if (peer->sta == sta)
7673 			peer->removed = true;
7674 }
7675 
7676 static void ath10k_sta_statistics(struct ieee80211_hw *hw,
7677 				  struct ieee80211_vif *vif,
7678 				  struct ieee80211_sta *sta,
7679 				  struct station_info *sinfo)
7680 {
7681 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7682 	struct ath10k *ar = arsta->arvif->ar;
7683 
7684 	if (!ath10k_peer_stats_enabled(ar))
7685 		return;
7686 
7687 	sinfo->rx_duration = arsta->rx_duration;
7688 	sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
7689 
7690 	if (!arsta->txrate.legacy && !arsta->txrate.nss)
7691 		return;
7692 
7693 	if (arsta->txrate.legacy) {
7694 		sinfo->txrate.legacy = arsta->txrate.legacy;
7695 	} else {
7696 		sinfo->txrate.mcs = arsta->txrate.mcs;
7697 		sinfo->txrate.nss = arsta->txrate.nss;
7698 		sinfo->txrate.bw = arsta->txrate.bw;
7699 	}
7700 	sinfo->txrate.flags = arsta->txrate.flags;
7701 	sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
7702 }
7703 
7704 static const struct ieee80211_ops ath10k_ops = {
7705 	.tx				= ath10k_mac_op_tx,
7706 	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
7707 	.start				= ath10k_start,
7708 	.stop				= ath10k_stop,
7709 	.config				= ath10k_config,
7710 	.add_interface			= ath10k_add_interface,
7711 	.remove_interface		= ath10k_remove_interface,
7712 	.configure_filter		= ath10k_configure_filter,
7713 	.bss_info_changed		= ath10k_bss_info_changed,
7714 	.set_coverage_class		= ath10k_mac_op_set_coverage_class,
7715 	.hw_scan			= ath10k_hw_scan,
7716 	.cancel_hw_scan			= ath10k_cancel_hw_scan,
7717 	.set_key			= ath10k_set_key,
7718 	.set_default_unicast_key        = ath10k_set_default_unicast_key,
7719 	.sta_state			= ath10k_sta_state,
7720 	.conf_tx			= ath10k_conf_tx,
7721 	.remain_on_channel		= ath10k_remain_on_channel,
7722 	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
7723 	.set_rts_threshold		= ath10k_set_rts_threshold,
7724 	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
7725 	.flush				= ath10k_flush,
7726 	.tx_last_beacon			= ath10k_tx_last_beacon,
7727 	.set_antenna			= ath10k_set_antenna,
7728 	.get_antenna			= ath10k_get_antenna,
7729 	.reconfig_complete		= ath10k_reconfig_complete,
7730 	.get_survey			= ath10k_get_survey,
7731 	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
7732 	.sta_rc_update			= ath10k_sta_rc_update,
7733 	.offset_tsf			= ath10k_offset_tsf,
7734 	.ampdu_action			= ath10k_ampdu_action,
7735 	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
7736 	.get_et_stats			= ath10k_debug_get_et_stats,
7737 	.get_et_strings			= ath10k_debug_get_et_strings,
7738 	.add_chanctx			= ath10k_mac_op_add_chanctx,
7739 	.remove_chanctx			= ath10k_mac_op_remove_chanctx,
7740 	.change_chanctx			= ath10k_mac_op_change_chanctx,
7741 	.assign_vif_chanctx		= ath10k_mac_op_assign_vif_chanctx,
7742 	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
7743 	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
7744 	.sta_pre_rcu_remove		= ath10k_mac_op_sta_pre_rcu_remove,
7745 	.sta_statistics			= ath10k_sta_statistics,
7746 
7747 	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7748 
7749 #ifdef CONFIG_PM
7750 	.suspend			= ath10k_wow_op_suspend,
7751 	.resume				= ath10k_wow_op_resume,
7752 	.set_wakeup			= ath10k_wow_op_set_wakeup,
7753 #endif
7754 #ifdef CONFIG_MAC80211_DEBUGFS
7755 	.sta_add_debugfs		= ath10k_sta_add_debugfs,
7756 #endif
7757 };
7758 
7759 #define CHAN2G(_channel, _freq, _flags) { \
7760 	.band			= NL80211_BAND_2GHZ, \
7761 	.hw_value		= (_channel), \
7762 	.center_freq		= (_freq), \
7763 	.flags			= (_flags), \
7764 	.max_antenna_gain	= 0, \
7765 	.max_power		= 30, \
7766 }
7767 
7768 #define CHAN5G(_channel, _freq, _flags) { \
7769 	.band			= NL80211_BAND_5GHZ, \
7770 	.hw_value		= (_channel), \
7771 	.center_freq		= (_freq), \
7772 	.flags			= (_flags), \
7773 	.max_antenna_gain	= 0, \
7774 	.max_power		= 30, \
7775 }
7776 
7777 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7778 	CHAN2G(1, 2412, 0),
7779 	CHAN2G(2, 2417, 0),
7780 	CHAN2G(3, 2422, 0),
7781 	CHAN2G(4, 2427, 0),
7782 	CHAN2G(5, 2432, 0),
7783 	CHAN2G(6, 2437, 0),
7784 	CHAN2G(7, 2442, 0),
7785 	CHAN2G(8, 2447, 0),
7786 	CHAN2G(9, 2452, 0),
7787 	CHAN2G(10, 2457, 0),
7788 	CHAN2G(11, 2462, 0),
7789 	CHAN2G(12, 2467, 0),
7790 	CHAN2G(13, 2472, 0),
7791 	CHAN2G(14, 2484, 0),
7792 };
7793 
7794 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7795 	CHAN5G(36, 5180, 0),
7796 	CHAN5G(40, 5200, 0),
7797 	CHAN5G(44, 5220, 0),
7798 	CHAN5G(48, 5240, 0),
7799 	CHAN5G(52, 5260, 0),
7800 	CHAN5G(56, 5280, 0),
7801 	CHAN5G(60, 5300, 0),
7802 	CHAN5G(64, 5320, 0),
7803 	CHAN5G(100, 5500, 0),
7804 	CHAN5G(104, 5520, 0),
7805 	CHAN5G(108, 5540, 0),
7806 	CHAN5G(112, 5560, 0),
7807 	CHAN5G(116, 5580, 0),
7808 	CHAN5G(120, 5600, 0),
7809 	CHAN5G(124, 5620, 0),
7810 	CHAN5G(128, 5640, 0),
7811 	CHAN5G(132, 5660, 0),
7812 	CHAN5G(136, 5680, 0),
7813 	CHAN5G(140, 5700, 0),
7814 	CHAN5G(144, 5720, 0),
7815 	CHAN5G(149, 5745, 0),
7816 	CHAN5G(153, 5765, 0),
7817 	CHAN5G(157, 5785, 0),
7818 	CHAN5G(161, 5805, 0),
7819 	CHAN5G(165, 5825, 0),
7820 	CHAN5G(169, 5845, 0),
7821 };
7822 
7823 struct ath10k *ath10k_mac_create(size_t priv_size)
7824 {
7825 	struct ieee80211_hw *hw;
7826 	struct ieee80211_ops *ops;
7827 	struct ath10k *ar;
7828 
7829 	ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
7830 	if (!ops)
7831 		return NULL;
7832 
7833 	hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
7834 	if (!hw) {
7835 		kfree(ops);
7836 		return NULL;
7837 	}
7838 
7839 	ar = hw->priv;
7840 	ar->hw = hw;
7841 	ar->ops = ops;
7842 
7843 	return ar;
7844 }
7845 
7846 void ath10k_mac_destroy(struct ath10k *ar)
7847 {
7848 	struct ieee80211_ops *ops = ar->ops;
7849 
7850 	ieee80211_free_hw(ar->hw);
7851 	kfree(ops);
7852 }
7853 
7854 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7855 	{
7856 		.max	= 8,
7857 		.types	= BIT(NL80211_IFTYPE_STATION)
7858 			| BIT(NL80211_IFTYPE_P2P_CLIENT)
7859 	},
7860 	{
7861 		.max	= 3,
7862 		.types	= BIT(NL80211_IFTYPE_P2P_GO)
7863 	},
7864 	{
7865 		.max	= 1,
7866 		.types	= BIT(NL80211_IFTYPE_P2P_DEVICE)
7867 	},
7868 	{
7869 		.max	= 7,
7870 		.types	= BIT(NL80211_IFTYPE_AP)
7871 #ifdef CONFIG_MAC80211_MESH
7872 			| BIT(NL80211_IFTYPE_MESH_POINT)
7873 #endif
7874 	},
7875 };
7876 
7877 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7878 	{
7879 		.max	= 8,
7880 		.types	= BIT(NL80211_IFTYPE_AP)
7881 #ifdef CONFIG_MAC80211_MESH
7882 			| BIT(NL80211_IFTYPE_MESH_POINT)
7883 #endif
7884 	},
7885 	{
7886 		.max	= 1,
7887 		.types	= BIT(NL80211_IFTYPE_STATION)
7888 	},
7889 };
7890 
7891 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7892 	{
7893 		.limits = ath10k_if_limits,
7894 		.n_limits = ARRAY_SIZE(ath10k_if_limits),
7895 		.max_interfaces = 8,
7896 		.num_different_channels = 1,
7897 		.beacon_int_infra_match = true,
7898 	},
7899 };
7900 
7901 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7902 	{
7903 		.limits = ath10k_10x_if_limits,
7904 		.n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7905 		.max_interfaces = 8,
7906 		.num_different_channels = 1,
7907 		.beacon_int_infra_match = true,
7908 		.beacon_int_min_gcd = 1,
7909 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7910 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7911 					BIT(NL80211_CHAN_WIDTH_20) |
7912 					BIT(NL80211_CHAN_WIDTH_40) |
7913 					BIT(NL80211_CHAN_WIDTH_80),
7914 #endif
7915 	},
7916 };
7917 
7918 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7919 	{
7920 		.max = 2,
7921 		.types = BIT(NL80211_IFTYPE_STATION),
7922 	},
7923 	{
7924 		.max = 2,
7925 		.types = BIT(NL80211_IFTYPE_AP) |
7926 #ifdef CONFIG_MAC80211_MESH
7927 			 BIT(NL80211_IFTYPE_MESH_POINT) |
7928 #endif
7929 			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7930 			 BIT(NL80211_IFTYPE_P2P_GO),
7931 	},
7932 	{
7933 		.max = 1,
7934 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7935 	},
7936 };
7937 
7938 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7939 	{
7940 		.max = 2,
7941 		.types = BIT(NL80211_IFTYPE_STATION),
7942 	},
7943 	{
7944 		.max = 2,
7945 		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7946 	},
7947 	{
7948 		.max = 1,
7949 		.types = BIT(NL80211_IFTYPE_AP) |
7950 #ifdef CONFIG_MAC80211_MESH
7951 			 BIT(NL80211_IFTYPE_MESH_POINT) |
7952 #endif
7953 			 BIT(NL80211_IFTYPE_P2P_GO),
7954 	},
7955 	{
7956 		.max = 1,
7957 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7958 	},
7959 };
7960 
7961 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7962 	{
7963 		.max = 1,
7964 		.types = BIT(NL80211_IFTYPE_STATION),
7965 	},
7966 	{
7967 		.max = 1,
7968 		.types = BIT(NL80211_IFTYPE_ADHOC),
7969 	},
7970 };
7971 
7972 /* FIXME: This is not thouroughly tested. These combinations may over- or
7973  * underestimate hw/fw capabilities.
7974  */
7975 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7976 	{
7977 		.limits = ath10k_tlv_if_limit,
7978 		.num_different_channels = 1,
7979 		.max_interfaces = 4,
7980 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7981 	},
7982 	{
7983 		.limits = ath10k_tlv_if_limit_ibss,
7984 		.num_different_channels = 1,
7985 		.max_interfaces = 2,
7986 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7987 	},
7988 };
7989 
7990 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7991 	{
7992 		.limits = ath10k_tlv_if_limit,
7993 		.num_different_channels = 1,
7994 		.max_interfaces = 4,
7995 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7996 	},
7997 	{
7998 		.limits = ath10k_tlv_qcs_if_limit,
7999 		.num_different_channels = 2,
8000 		.max_interfaces = 4,
8001 		.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
8002 	},
8003 	{
8004 		.limits = ath10k_tlv_if_limit_ibss,
8005 		.num_different_channels = 1,
8006 		.max_interfaces = 2,
8007 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
8008 	},
8009 };
8010 
8011 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
8012 	{
8013 		.max = 1,
8014 		.types = BIT(NL80211_IFTYPE_STATION),
8015 	},
8016 	{
8017 		.max	= 16,
8018 		.types	= BIT(NL80211_IFTYPE_AP)
8019 #ifdef CONFIG_MAC80211_MESH
8020 			| BIT(NL80211_IFTYPE_MESH_POINT)
8021 #endif
8022 	},
8023 };
8024 
8025 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
8026 	{
8027 		.limits = ath10k_10_4_if_limits,
8028 		.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
8029 		.max_interfaces = 16,
8030 		.num_different_channels = 1,
8031 		.beacon_int_infra_match = true,
8032 		.beacon_int_min_gcd = 1,
8033 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
8034 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
8035 					BIT(NL80211_CHAN_WIDTH_20) |
8036 					BIT(NL80211_CHAN_WIDTH_40) |
8037 					BIT(NL80211_CHAN_WIDTH_80),
8038 #endif
8039 	},
8040 };
8041 
8042 static void ath10k_get_arvif_iter(void *data, u8 *mac,
8043 				  struct ieee80211_vif *vif)
8044 {
8045 	struct ath10k_vif_iter *arvif_iter = data;
8046 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
8047 
8048 	if (arvif->vdev_id == arvif_iter->vdev_id)
8049 		arvif_iter->arvif = arvif;
8050 }
8051 
8052 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
8053 {
8054 	struct ath10k_vif_iter arvif_iter;
8055 	u32 flags;
8056 
8057 	memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
8058 	arvif_iter.vdev_id = vdev_id;
8059 
8060 	flags = IEEE80211_IFACE_ITER_RESUME_ALL;
8061 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
8062 						   flags,
8063 						   ath10k_get_arvif_iter,
8064 						   &arvif_iter);
8065 	if (!arvif_iter.arvif) {
8066 		ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
8067 		return NULL;
8068 	}
8069 
8070 	return arvif_iter.arvif;
8071 }
8072 
8073 #define WRD_METHOD "WRDD"
8074 #define WRDD_WIFI  (0x07)
8075 
8076 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
8077 {
8078 	union acpi_object *mcc_pkg;
8079 	union acpi_object *domain_type;
8080 	union acpi_object *mcc_value;
8081 	u32 i;
8082 
8083 	if (wrdd->type != ACPI_TYPE_PACKAGE ||
8084 	    wrdd->package.count < 2 ||
8085 	    wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
8086 	    wrdd->package.elements[0].integer.value != 0) {
8087 		ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
8088 		return 0;
8089 	}
8090 
8091 	for (i = 1; i < wrdd->package.count; ++i) {
8092 		mcc_pkg = &wrdd->package.elements[i];
8093 
8094 		if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
8095 			continue;
8096 		if (mcc_pkg->package.count < 2)
8097 			continue;
8098 		if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
8099 		    mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
8100 			continue;
8101 
8102 		domain_type = &mcc_pkg->package.elements[0];
8103 		if (domain_type->integer.value != WRDD_WIFI)
8104 			continue;
8105 
8106 		mcc_value = &mcc_pkg->package.elements[1];
8107 		return mcc_value->integer.value;
8108 	}
8109 	return 0;
8110 }
8111 
8112 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
8113 {
8114 	struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
8115 	acpi_handle root_handle;
8116 	acpi_handle handle;
8117 	struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
8118 	acpi_status status;
8119 	u32 alpha2_code;
8120 	char alpha2[3];
8121 
8122 	root_handle = ACPI_HANDLE(&pdev->dev);
8123 	if (!root_handle)
8124 		return -EOPNOTSUPP;
8125 
8126 	status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
8127 	if (ACPI_FAILURE(status)) {
8128 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
8129 			   "failed to get wrd method %d\n", status);
8130 		return -EIO;
8131 	}
8132 
8133 	status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
8134 	if (ACPI_FAILURE(status)) {
8135 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
8136 			   "failed to call wrdc %d\n", status);
8137 		return -EIO;
8138 	}
8139 
8140 	alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
8141 	kfree(wrdd.pointer);
8142 	if (!alpha2_code)
8143 		return -EIO;
8144 
8145 	alpha2[0] = (alpha2_code >> 8) & 0xff;
8146 	alpha2[1] = (alpha2_code >> 0) & 0xff;
8147 	alpha2[2] = '\0';
8148 
8149 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
8150 		   "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
8151 
8152 	*rd = ath_regd_find_country_by_name(alpha2);
8153 	if (*rd == 0xffff)
8154 		return -EIO;
8155 
8156 	*rd |= COUNTRY_ERD_FLAG;
8157 	return 0;
8158 }
8159 
8160 static int ath10k_mac_init_rd(struct ath10k *ar)
8161 {
8162 	int ret;
8163 	u16 rd;
8164 
8165 	ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
8166 	if (ret) {
8167 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
8168 			   "fallback to eeprom programmed regulatory settings\n");
8169 		rd = ar->hw_eeprom_rd;
8170 	}
8171 
8172 	ar->ath_common.regulatory.current_rd = rd;
8173 	return 0;
8174 }
8175 
8176 int ath10k_mac_register(struct ath10k *ar)
8177 {
8178 	static const u32 cipher_suites[] = {
8179 		WLAN_CIPHER_SUITE_WEP40,
8180 		WLAN_CIPHER_SUITE_WEP104,
8181 		WLAN_CIPHER_SUITE_TKIP,
8182 		WLAN_CIPHER_SUITE_CCMP,
8183 
8184 		/* Do not add hardware supported ciphers before this line.
8185 		 * Allow software encryption for all chips. Don't forget to
8186 		 * update n_cipher_suites below.
8187 		 */
8188 		WLAN_CIPHER_SUITE_AES_CMAC,
8189 		WLAN_CIPHER_SUITE_BIP_CMAC_256,
8190 		WLAN_CIPHER_SUITE_BIP_GMAC_128,
8191 		WLAN_CIPHER_SUITE_BIP_GMAC_256,
8192 
8193 		/* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256
8194 		 * and CCMP-256 in hardware.
8195 		 */
8196 		WLAN_CIPHER_SUITE_GCMP,
8197 		WLAN_CIPHER_SUITE_GCMP_256,
8198 		WLAN_CIPHER_SUITE_CCMP_256,
8199 	};
8200 	struct ieee80211_supported_band *band;
8201 	void *channels;
8202 	int ret;
8203 
8204 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
8205 
8206 	SET_IEEE80211_DEV(ar->hw, ar->dev);
8207 
8208 	BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
8209 		      ARRAY_SIZE(ath10k_5ghz_channels)) !=
8210 		     ATH10K_NUM_CHANS);
8211 
8212 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
8213 		channels = kmemdup(ath10k_2ghz_channels,
8214 				   sizeof(ath10k_2ghz_channels),
8215 				   GFP_KERNEL);
8216 		if (!channels) {
8217 			ret = -ENOMEM;
8218 			goto err_free;
8219 		}
8220 
8221 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
8222 		band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
8223 		band->channels = channels;
8224 
8225 		if (ar->hw_params.cck_rate_map_rev2) {
8226 			band->n_bitrates = ath10k_g_rates_rev2_size;
8227 			band->bitrates = ath10k_g_rates_rev2;
8228 		} else {
8229 			band->n_bitrates = ath10k_g_rates_size;
8230 			band->bitrates = ath10k_g_rates;
8231 		}
8232 
8233 		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
8234 	}
8235 
8236 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
8237 		channels = kmemdup(ath10k_5ghz_channels,
8238 				   sizeof(ath10k_5ghz_channels),
8239 				   GFP_KERNEL);
8240 		if (!channels) {
8241 			ret = -ENOMEM;
8242 			goto err_free;
8243 		}
8244 
8245 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
8246 		band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
8247 		band->channels = channels;
8248 		band->n_bitrates = ath10k_a_rates_size;
8249 		band->bitrates = ath10k_a_rates;
8250 		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
8251 	}
8252 
8253 	ath10k_mac_setup_ht_vht_cap(ar);
8254 
8255 	ar->hw->wiphy->interface_modes =
8256 		BIT(NL80211_IFTYPE_STATION) |
8257 		BIT(NL80211_IFTYPE_AP) |
8258 		BIT(NL80211_IFTYPE_MESH_POINT);
8259 
8260 	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
8261 	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
8262 
8263 	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
8264 		ar->hw->wiphy->interface_modes |=
8265 			BIT(NL80211_IFTYPE_P2P_DEVICE) |
8266 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
8267 			BIT(NL80211_IFTYPE_P2P_GO);
8268 
8269 	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
8270 
8271 	if (!test_bit(ATH10K_FW_FEATURE_NO_PS,
8272 		      ar->running_fw->fw_file.fw_features)) {
8273 		ieee80211_hw_set(ar->hw, SUPPORTS_PS);
8274 		ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
8275 	}
8276 
8277 	ieee80211_hw_set(ar->hw, MFP_CAPABLE);
8278 	ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
8279 	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
8280 	ieee80211_hw_set(ar->hw, AP_LINK_PS);
8281 	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
8282 	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
8283 	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
8284 	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
8285 	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
8286 	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
8287 	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
8288 	ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
8289 	ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
8290 
8291 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8292 		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
8293 
8294 	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
8295 	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
8296 
8297 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
8298 		ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
8299 
8300 	if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
8301 		ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
8302 		ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
8303 	}
8304 
8305 	ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
8306 	ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
8307 
8308 	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
8309 	ar->hw->sta_data_size = sizeof(struct ath10k_sta);
8310 	ar->hw->txq_data_size = sizeof(struct ath10k_txq);
8311 
8312 	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
8313 
8314 	if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
8315 		ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
8316 
8317 		/* Firmware delivers WPS/P2P Probe Requests frames to driver so
8318 		 * that userspace (e.g. wpa_supplicant/hostapd) can generate
8319 		 * correct Probe Responses. This is more of a hack advert..
8320 		 */
8321 		ar->hw->wiphy->probe_resp_offload |=
8322 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
8323 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
8324 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
8325 	}
8326 
8327 	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
8328 	    test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
8329 		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
8330 		if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
8331 			ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
8332 	}
8333 
8334 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8335 		ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA);
8336 
8337 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
8338 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
8339 	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
8340 
8341 	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
8342 	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
8343 				   NL80211_FEATURE_AP_SCAN;
8344 
8345 	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
8346 
8347 	ret = ath10k_wow_init(ar);
8348 	if (ret) {
8349 		ath10k_warn(ar, "failed to init wow: %d\n", ret);
8350 		goto err_free;
8351 	}
8352 
8353 	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
8354 
8355 	/*
8356 	 * on LL hardware queues are managed entirely by the FW
8357 	 * so we only advertise to mac we can do the queues thing
8358 	 */
8359 	ar->hw->queues = IEEE80211_MAX_QUEUES;
8360 
8361 	/* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
8362 	 * something that vdev_ids can't reach so that we don't stop the queue
8363 	 * accidentally.
8364 	 */
8365 	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
8366 
8367 	switch (ar->running_fw->fw_file.wmi_op_version) {
8368 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
8369 		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
8370 		ar->hw->wiphy->n_iface_combinations =
8371 			ARRAY_SIZE(ath10k_if_comb);
8372 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8373 		break;
8374 	case ATH10K_FW_WMI_OP_VERSION_TLV:
8375 		if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
8376 			ar->hw->wiphy->iface_combinations =
8377 				ath10k_tlv_qcs_if_comb;
8378 			ar->hw->wiphy->n_iface_combinations =
8379 				ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
8380 		} else {
8381 			ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
8382 			ar->hw->wiphy->n_iface_combinations =
8383 				ARRAY_SIZE(ath10k_tlv_if_comb);
8384 		}
8385 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8386 		break;
8387 	case ATH10K_FW_WMI_OP_VERSION_10_1:
8388 	case ATH10K_FW_WMI_OP_VERSION_10_2:
8389 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
8390 		ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
8391 		ar->hw->wiphy->n_iface_combinations =
8392 			ARRAY_SIZE(ath10k_10x_if_comb);
8393 		break;
8394 	case ATH10K_FW_WMI_OP_VERSION_10_4:
8395 		ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
8396 		ar->hw->wiphy->n_iface_combinations =
8397 			ARRAY_SIZE(ath10k_10_4_if_comb);
8398 		break;
8399 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
8400 	case ATH10K_FW_WMI_OP_VERSION_MAX:
8401 		WARN_ON(1);
8402 		ret = -EINVAL;
8403 		goto err_free;
8404 	}
8405 
8406 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8407 		ar->hw->netdev_features = NETIF_F_HW_CSUM;
8408 
8409 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
8410 		/* Init ath dfs pattern detector */
8411 		ar->ath_common.debug_mask = ATH_DBG_DFS;
8412 		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
8413 							     NL80211_DFS_UNSET);
8414 
8415 		if (!ar->dfs_detector)
8416 			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
8417 	}
8418 
8419 	ret = ath10k_mac_init_rd(ar);
8420 	if (ret) {
8421 		ath10k_err(ar, "failed to derive regdom: %d\n", ret);
8422 		goto err_dfs_detector_exit;
8423 	}
8424 
8425 	/* Disable set_coverage_class for chipsets that do not support it. */
8426 	if (!ar->hw_params.hw_ops->set_coverage_class)
8427 		ar->ops->set_coverage_class = NULL;
8428 
8429 	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
8430 			    ath10k_reg_notifier);
8431 	if (ret) {
8432 		ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
8433 		goto err_dfs_detector_exit;
8434 	}
8435 
8436 	ar->hw->wiphy->cipher_suites = cipher_suites;
8437 
8438 	/* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
8439 	 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported
8440 	 * from chip specific hw_param table.
8441 	 */
8442 	if (!ar->hw_params.n_cipher_suites ||
8443 	    ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) {
8444 		ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n",
8445 			   ar->hw_params.n_cipher_suites);
8446 		ar->hw_params.n_cipher_suites = 8;
8447 	}
8448 	ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites;
8449 
8450 	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
8451 
8452 	ret = ieee80211_register_hw(ar->hw);
8453 	if (ret) {
8454 		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
8455 		goto err_dfs_detector_exit;
8456 	}
8457 
8458 	if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
8459 		ret = regulatory_hint(ar->hw->wiphy,
8460 				      ar->ath_common.regulatory.alpha2);
8461 		if (ret)
8462 			goto err_unregister;
8463 	}
8464 
8465 	return 0;
8466 
8467 err_unregister:
8468 	ieee80211_unregister_hw(ar->hw);
8469 
8470 err_dfs_detector_exit:
8471 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8472 		ar->dfs_detector->exit(ar->dfs_detector);
8473 
8474 err_free:
8475 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8476 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8477 
8478 	SET_IEEE80211_DEV(ar->hw, NULL);
8479 	return ret;
8480 }
8481 
8482 void ath10k_mac_unregister(struct ath10k *ar)
8483 {
8484 	ieee80211_unregister_hw(ar->hw);
8485 
8486 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8487 		ar->dfs_detector->exit(ar->dfs_detector);
8488 
8489 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8490 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8491 
8492 	SET_IEEE80211_DEV(ar->hw, NULL);
8493 }
8494