xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/mac.c (revision 995c6a7f)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "mac.h"
19 
20 #include <net/mac80211.h>
21 #include <linux/etherdevice.h>
22 
23 #include "hif.h"
24 #include "core.h"
25 #include "debug.h"
26 #include "wmi.h"
27 #include "htt.h"
28 #include "txrx.h"
29 #include "testmode.h"
30 #include "wmi.h"
31 #include "wmi-tlv.h"
32 #include "wmi-ops.h"
33 #include "wow.h"
34 
35 /*********/
36 /* Rates */
37 /*********/
38 
39 static struct ieee80211_rate ath10k_rates[] = {
40 	{ .bitrate = 10,
41 	  .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
42 	{ .bitrate = 20,
43 	  .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
44 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
45 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
46 	{ .bitrate = 55,
47 	  .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
48 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
49 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
50 	{ .bitrate = 110,
51 	  .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
52 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
53 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
54 
55 	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
56 	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
57 	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
58 	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
59 	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
60 	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
61 	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
62 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
63 };
64 
65 static struct ieee80211_rate ath10k_rates_rev2[] = {
66 	{ .bitrate = 10,
67 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
68 	{ .bitrate = 20,
69 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
70 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
71 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
72 	{ .bitrate = 55,
73 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
74 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
75 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
76 	{ .bitrate = 110,
77 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
78 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
79 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
80 
81 	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
82 	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
83 	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
84 	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
85 	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
86 	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
87 	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
88 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
89 };
90 
91 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
92 
93 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
94 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
95 			     ATH10K_MAC_FIRST_OFDM_RATE_IDX)
96 #define ath10k_g_rates (ath10k_rates + 0)
97 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
98 
99 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
100 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
101 
102 static bool ath10k_mac_bitrate_is_cck(int bitrate)
103 {
104 	switch (bitrate) {
105 	case 10:
106 	case 20:
107 	case 55:
108 	case 110:
109 		return true;
110 	}
111 
112 	return false;
113 }
114 
115 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
116 {
117 	return DIV_ROUND_UP(bitrate, 5) |
118 	       (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
119 }
120 
121 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
122 			     u8 hw_rate, bool cck)
123 {
124 	const struct ieee80211_rate *rate;
125 	int i;
126 
127 	for (i = 0; i < sband->n_bitrates; i++) {
128 		rate = &sband->bitrates[i];
129 
130 		if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
131 			continue;
132 
133 		if (rate->hw_value == hw_rate)
134 			return i;
135 		else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
136 			 rate->hw_value_short == hw_rate)
137 			return i;
138 	}
139 
140 	return 0;
141 }
142 
143 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
144 			     u32 bitrate)
145 {
146 	int i;
147 
148 	for (i = 0; i < sband->n_bitrates; i++)
149 		if (sband->bitrates[i].bitrate == bitrate)
150 			return i;
151 
152 	return 0;
153 }
154 
155 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
156 {
157 	switch ((mcs_map >> (2 * nss)) & 0x3) {
158 	case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
159 	case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
160 	case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
161 	}
162 	return 0;
163 }
164 
165 static u32
166 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
167 {
168 	int nss;
169 
170 	for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
171 		if (ht_mcs_mask[nss])
172 			return nss + 1;
173 
174 	return 1;
175 }
176 
177 static u32
178 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
179 {
180 	int nss;
181 
182 	for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
183 		if (vht_mcs_mask[nss])
184 			return nss + 1;
185 
186 	return 1;
187 }
188 
189 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
190 {
191 	enum wmi_host_platform_type platform_type;
192 	int ret;
193 
194 	if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
195 		platform_type = WMI_HOST_PLATFORM_LOW_PERF;
196 	else
197 		platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
198 
199 	ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
200 
201 	if (ret && ret != -EOPNOTSUPP) {
202 		ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
203 		return ret;
204 	}
205 
206 	return 0;
207 }
208 
209 /**********/
210 /* Crypto */
211 /**********/
212 
213 static int ath10k_send_key(struct ath10k_vif *arvif,
214 			   struct ieee80211_key_conf *key,
215 			   enum set_key_cmd cmd,
216 			   const u8 *macaddr, u32 flags)
217 {
218 	struct ath10k *ar = arvif->ar;
219 	struct wmi_vdev_install_key_arg arg = {
220 		.vdev_id = arvif->vdev_id,
221 		.key_idx = key->keyidx,
222 		.key_len = key->keylen,
223 		.key_data = key->key,
224 		.key_flags = flags,
225 		.macaddr = macaddr,
226 	};
227 
228 	lockdep_assert_held(&arvif->ar->conf_mutex);
229 
230 	switch (key->cipher) {
231 	case WLAN_CIPHER_SUITE_CCMP:
232 		arg.key_cipher = WMI_CIPHER_AES_CCM;
233 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
234 		break;
235 	case WLAN_CIPHER_SUITE_TKIP:
236 		arg.key_cipher = WMI_CIPHER_TKIP;
237 		arg.key_txmic_len = 8;
238 		arg.key_rxmic_len = 8;
239 		break;
240 	case WLAN_CIPHER_SUITE_WEP40:
241 	case WLAN_CIPHER_SUITE_WEP104:
242 		arg.key_cipher = WMI_CIPHER_WEP;
243 		break;
244 	case WLAN_CIPHER_SUITE_AES_CMAC:
245 		WARN_ON(1);
246 		return -EINVAL;
247 	default:
248 		ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
249 		return -EOPNOTSUPP;
250 	}
251 
252 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
253 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
254 
255 	if (cmd == DISABLE_KEY) {
256 		arg.key_cipher = WMI_CIPHER_NONE;
257 		arg.key_data = NULL;
258 	}
259 
260 	return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
261 }
262 
263 static int ath10k_install_key(struct ath10k_vif *arvif,
264 			      struct ieee80211_key_conf *key,
265 			      enum set_key_cmd cmd,
266 			      const u8 *macaddr, u32 flags)
267 {
268 	struct ath10k *ar = arvif->ar;
269 	int ret;
270 	unsigned long time_left;
271 
272 	lockdep_assert_held(&ar->conf_mutex);
273 
274 	reinit_completion(&ar->install_key_done);
275 
276 	if (arvif->nohwcrypt)
277 		return 1;
278 
279 	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
280 	if (ret)
281 		return ret;
282 
283 	time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
284 	if (time_left == 0)
285 		return -ETIMEDOUT;
286 
287 	return 0;
288 }
289 
290 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
291 					const u8 *addr)
292 {
293 	struct ath10k *ar = arvif->ar;
294 	struct ath10k_peer *peer;
295 	int ret;
296 	int i;
297 	u32 flags;
298 
299 	lockdep_assert_held(&ar->conf_mutex);
300 
301 	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
302 		    arvif->vif->type != NL80211_IFTYPE_ADHOC &&
303 		    arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
304 		return -EINVAL;
305 
306 	spin_lock_bh(&ar->data_lock);
307 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
308 	spin_unlock_bh(&ar->data_lock);
309 
310 	if (!peer)
311 		return -ENOENT;
312 
313 	for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
314 		if (arvif->wep_keys[i] == NULL)
315 			continue;
316 
317 		switch (arvif->vif->type) {
318 		case NL80211_IFTYPE_AP:
319 			flags = WMI_KEY_PAIRWISE;
320 
321 			if (arvif->def_wep_key_idx == i)
322 				flags |= WMI_KEY_TX_USAGE;
323 
324 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
325 						 SET_KEY, addr, flags);
326 			if (ret < 0)
327 				return ret;
328 			break;
329 		case NL80211_IFTYPE_ADHOC:
330 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
331 						 SET_KEY, addr,
332 						 WMI_KEY_PAIRWISE);
333 			if (ret < 0)
334 				return ret;
335 
336 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
337 						 SET_KEY, addr, WMI_KEY_GROUP);
338 			if (ret < 0)
339 				return ret;
340 			break;
341 		default:
342 			WARN_ON(1);
343 			return -EINVAL;
344 		}
345 
346 		spin_lock_bh(&ar->data_lock);
347 		peer->keys[i] = arvif->wep_keys[i];
348 		spin_unlock_bh(&ar->data_lock);
349 	}
350 
351 	/* In some cases (notably with static WEP IBSS with multiple keys)
352 	 * multicast Tx becomes broken. Both pairwise and groupwise keys are
353 	 * installed already. Using WMI_KEY_TX_USAGE in different combinations
354 	 * didn't seem help. Using def_keyid vdev parameter seems to be
355 	 * effective so use that.
356 	 *
357 	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
358 	 */
359 	if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
360 		return 0;
361 
362 	if (arvif->def_wep_key_idx == -1)
363 		return 0;
364 
365 	ret = ath10k_wmi_vdev_set_param(arvif->ar,
366 					arvif->vdev_id,
367 					arvif->ar->wmi.vdev_param->def_keyid,
368 					arvif->def_wep_key_idx);
369 	if (ret) {
370 		ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
371 			    arvif->vdev_id, ret);
372 		return ret;
373 	}
374 
375 	return 0;
376 }
377 
378 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
379 				  const u8 *addr)
380 {
381 	struct ath10k *ar = arvif->ar;
382 	struct ath10k_peer *peer;
383 	int first_errno = 0;
384 	int ret;
385 	int i;
386 	u32 flags = 0;
387 
388 	lockdep_assert_held(&ar->conf_mutex);
389 
390 	spin_lock_bh(&ar->data_lock);
391 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
392 	spin_unlock_bh(&ar->data_lock);
393 
394 	if (!peer)
395 		return -ENOENT;
396 
397 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
398 		if (peer->keys[i] == NULL)
399 			continue;
400 
401 		/* key flags are not required to delete the key */
402 		ret = ath10k_install_key(arvif, peer->keys[i],
403 					 DISABLE_KEY, addr, flags);
404 		if (ret < 0 && first_errno == 0)
405 			first_errno = ret;
406 
407 		if (ret < 0)
408 			ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
409 				    i, ret);
410 
411 		spin_lock_bh(&ar->data_lock);
412 		peer->keys[i] = NULL;
413 		spin_unlock_bh(&ar->data_lock);
414 	}
415 
416 	return first_errno;
417 }
418 
419 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
420 				    u8 keyidx)
421 {
422 	struct ath10k_peer *peer;
423 	int i;
424 
425 	lockdep_assert_held(&ar->data_lock);
426 
427 	/* We don't know which vdev this peer belongs to,
428 	 * since WMI doesn't give us that information.
429 	 *
430 	 * FIXME: multi-bss needs to be handled.
431 	 */
432 	peer = ath10k_peer_find(ar, 0, addr);
433 	if (!peer)
434 		return false;
435 
436 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
437 		if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
438 			return true;
439 	}
440 
441 	return false;
442 }
443 
444 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
445 				 struct ieee80211_key_conf *key)
446 {
447 	struct ath10k *ar = arvif->ar;
448 	struct ath10k_peer *peer;
449 	u8 addr[ETH_ALEN];
450 	int first_errno = 0;
451 	int ret;
452 	int i;
453 	u32 flags = 0;
454 
455 	lockdep_assert_held(&ar->conf_mutex);
456 
457 	for (;;) {
458 		/* since ath10k_install_key we can't hold data_lock all the
459 		 * time, so we try to remove the keys incrementally */
460 		spin_lock_bh(&ar->data_lock);
461 		i = 0;
462 		list_for_each_entry(peer, &ar->peers, list) {
463 			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
464 				if (peer->keys[i] == key) {
465 					ether_addr_copy(addr, peer->addr);
466 					peer->keys[i] = NULL;
467 					break;
468 				}
469 			}
470 
471 			if (i < ARRAY_SIZE(peer->keys))
472 				break;
473 		}
474 		spin_unlock_bh(&ar->data_lock);
475 
476 		if (i == ARRAY_SIZE(peer->keys))
477 			break;
478 		/* key flags are not required to delete the key */
479 		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
480 		if (ret < 0 && first_errno == 0)
481 			first_errno = ret;
482 
483 		if (ret)
484 			ath10k_warn(ar, "failed to remove key for %pM: %d\n",
485 				    addr, ret);
486 	}
487 
488 	return first_errno;
489 }
490 
491 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
492 					 struct ieee80211_key_conf *key)
493 {
494 	struct ath10k *ar = arvif->ar;
495 	struct ath10k_peer *peer;
496 	int ret;
497 
498 	lockdep_assert_held(&ar->conf_mutex);
499 
500 	list_for_each_entry(peer, &ar->peers, list) {
501 		if (ether_addr_equal(peer->addr, arvif->vif->addr))
502 			continue;
503 
504 		if (ether_addr_equal(peer->addr, arvif->bssid))
505 			continue;
506 
507 		if (peer->keys[key->keyidx] == key)
508 			continue;
509 
510 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
511 			   arvif->vdev_id, key->keyidx);
512 
513 		ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
514 		if (ret) {
515 			ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
516 				    arvif->vdev_id, peer->addr, ret);
517 			return ret;
518 		}
519 	}
520 
521 	return 0;
522 }
523 
524 /*********************/
525 /* General utilities */
526 /*********************/
527 
528 static inline enum wmi_phy_mode
529 chan_to_phymode(const struct cfg80211_chan_def *chandef)
530 {
531 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
532 
533 	switch (chandef->chan->band) {
534 	case NL80211_BAND_2GHZ:
535 		switch (chandef->width) {
536 		case NL80211_CHAN_WIDTH_20_NOHT:
537 			if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
538 				phymode = MODE_11B;
539 			else
540 				phymode = MODE_11G;
541 			break;
542 		case NL80211_CHAN_WIDTH_20:
543 			phymode = MODE_11NG_HT20;
544 			break;
545 		case NL80211_CHAN_WIDTH_40:
546 			phymode = MODE_11NG_HT40;
547 			break;
548 		case NL80211_CHAN_WIDTH_5:
549 		case NL80211_CHAN_WIDTH_10:
550 		case NL80211_CHAN_WIDTH_80:
551 		case NL80211_CHAN_WIDTH_80P80:
552 		case NL80211_CHAN_WIDTH_160:
553 			phymode = MODE_UNKNOWN;
554 			break;
555 		}
556 		break;
557 	case NL80211_BAND_5GHZ:
558 		switch (chandef->width) {
559 		case NL80211_CHAN_WIDTH_20_NOHT:
560 			phymode = MODE_11A;
561 			break;
562 		case NL80211_CHAN_WIDTH_20:
563 			phymode = MODE_11NA_HT20;
564 			break;
565 		case NL80211_CHAN_WIDTH_40:
566 			phymode = MODE_11NA_HT40;
567 			break;
568 		case NL80211_CHAN_WIDTH_80:
569 			phymode = MODE_11AC_VHT80;
570 			break;
571 		case NL80211_CHAN_WIDTH_5:
572 		case NL80211_CHAN_WIDTH_10:
573 		case NL80211_CHAN_WIDTH_80P80:
574 		case NL80211_CHAN_WIDTH_160:
575 			phymode = MODE_UNKNOWN;
576 			break;
577 		}
578 		break;
579 	default:
580 		break;
581 	}
582 
583 	WARN_ON(phymode == MODE_UNKNOWN);
584 	return phymode;
585 }
586 
587 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
588 {
589 /*
590  * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
591  *   0 for no restriction
592  *   1 for 1/4 us
593  *   2 for 1/2 us
594  *   3 for 1 us
595  *   4 for 2 us
596  *   5 for 4 us
597  *   6 for 8 us
598  *   7 for 16 us
599  */
600 	switch (mpdudensity) {
601 	case 0:
602 		return 0;
603 	case 1:
604 	case 2:
605 	case 3:
606 	/* Our lower layer calculations limit our precision to
607 	   1 microsecond */
608 		return 1;
609 	case 4:
610 		return 2;
611 	case 5:
612 		return 4;
613 	case 6:
614 		return 8;
615 	case 7:
616 		return 16;
617 	default:
618 		return 0;
619 	}
620 }
621 
622 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
623 			struct cfg80211_chan_def *def)
624 {
625 	struct ieee80211_chanctx_conf *conf;
626 
627 	rcu_read_lock();
628 	conf = rcu_dereference(vif->chanctx_conf);
629 	if (!conf) {
630 		rcu_read_unlock();
631 		return -ENOENT;
632 	}
633 
634 	*def = conf->def;
635 	rcu_read_unlock();
636 
637 	return 0;
638 }
639 
640 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
641 					 struct ieee80211_chanctx_conf *conf,
642 					 void *data)
643 {
644 	int *num = data;
645 
646 	(*num)++;
647 }
648 
649 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
650 {
651 	int num = 0;
652 
653 	ieee80211_iter_chan_contexts_atomic(ar->hw,
654 					    ath10k_mac_num_chanctxs_iter,
655 					    &num);
656 
657 	return num;
658 }
659 
660 static void
661 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
662 				struct ieee80211_chanctx_conf *conf,
663 				void *data)
664 {
665 	struct cfg80211_chan_def **def = data;
666 
667 	*def = &conf->def;
668 }
669 
670 static int ath10k_peer_create(struct ath10k *ar,
671 			      struct ieee80211_vif *vif,
672 			      struct ieee80211_sta *sta,
673 			      u32 vdev_id,
674 			      const u8 *addr,
675 			      enum wmi_peer_type peer_type)
676 {
677 	struct ath10k_vif *arvif;
678 	struct ath10k_peer *peer;
679 	int num_peers = 0;
680 	int ret;
681 
682 	lockdep_assert_held(&ar->conf_mutex);
683 
684 	num_peers = ar->num_peers;
685 
686 	/* Each vdev consumes a peer entry as well */
687 	list_for_each_entry(arvif, &ar->arvifs, list)
688 		num_peers++;
689 
690 	if (num_peers >= ar->max_num_peers)
691 		return -ENOBUFS;
692 
693 	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
694 	if (ret) {
695 		ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
696 			    addr, vdev_id, ret);
697 		return ret;
698 	}
699 
700 	ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
701 	if (ret) {
702 		ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
703 			    addr, vdev_id, ret);
704 		return ret;
705 	}
706 
707 	spin_lock_bh(&ar->data_lock);
708 
709 	peer = ath10k_peer_find(ar, vdev_id, addr);
710 	if (!peer) {
711 		spin_unlock_bh(&ar->data_lock);
712 		ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
713 			    addr, vdev_id);
714 		ath10k_wmi_peer_delete(ar, vdev_id, addr);
715 		return -ENOENT;
716 	}
717 
718 	peer->vif = vif;
719 	peer->sta = sta;
720 
721 	spin_unlock_bh(&ar->data_lock);
722 
723 	ar->num_peers++;
724 
725 	return 0;
726 }
727 
728 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
729 {
730 	struct ath10k *ar = arvif->ar;
731 	u32 param;
732 	int ret;
733 
734 	param = ar->wmi.pdev_param->sta_kickout_th;
735 	ret = ath10k_wmi_pdev_set_param(ar, param,
736 					ATH10K_KICKOUT_THRESHOLD);
737 	if (ret) {
738 		ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
739 			    arvif->vdev_id, ret);
740 		return ret;
741 	}
742 
743 	param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
744 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
745 					ATH10K_KEEPALIVE_MIN_IDLE);
746 	if (ret) {
747 		ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
748 			    arvif->vdev_id, ret);
749 		return ret;
750 	}
751 
752 	param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
753 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
754 					ATH10K_KEEPALIVE_MAX_IDLE);
755 	if (ret) {
756 		ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
757 			    arvif->vdev_id, ret);
758 		return ret;
759 	}
760 
761 	param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
762 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
763 					ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
764 	if (ret) {
765 		ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
766 			    arvif->vdev_id, ret);
767 		return ret;
768 	}
769 
770 	return 0;
771 }
772 
773 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
774 {
775 	struct ath10k *ar = arvif->ar;
776 	u32 vdev_param;
777 
778 	vdev_param = ar->wmi.vdev_param->rts_threshold;
779 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
780 }
781 
782 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
783 {
784 	int ret;
785 
786 	lockdep_assert_held(&ar->conf_mutex);
787 
788 	ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
789 	if (ret)
790 		return ret;
791 
792 	ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
793 	if (ret)
794 		return ret;
795 
796 	ar->num_peers--;
797 
798 	return 0;
799 }
800 
801 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
802 {
803 	struct ath10k_peer *peer, *tmp;
804 	int peer_id;
805 	int i;
806 
807 	lockdep_assert_held(&ar->conf_mutex);
808 
809 	spin_lock_bh(&ar->data_lock);
810 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
811 		if (peer->vdev_id != vdev_id)
812 			continue;
813 
814 		ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
815 			    peer->addr, vdev_id);
816 
817 		for_each_set_bit(peer_id, peer->peer_ids,
818 				 ATH10K_MAX_NUM_PEER_IDS) {
819 			ar->peer_map[peer_id] = NULL;
820 		}
821 
822 		/* Double check that peer is properly un-referenced from
823 		 * the peer_map
824 		 */
825 		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
826 			if (ar->peer_map[i] == peer) {
827 				ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
828 					    peer->addr, peer, i);
829 				ar->peer_map[i] = NULL;
830 			}
831 		}
832 
833 		list_del(&peer->list);
834 		kfree(peer);
835 		ar->num_peers--;
836 	}
837 	spin_unlock_bh(&ar->data_lock);
838 }
839 
840 static void ath10k_peer_cleanup_all(struct ath10k *ar)
841 {
842 	struct ath10k_peer *peer, *tmp;
843 	int i;
844 
845 	lockdep_assert_held(&ar->conf_mutex);
846 
847 	spin_lock_bh(&ar->data_lock);
848 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
849 		list_del(&peer->list);
850 		kfree(peer);
851 	}
852 
853 	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
854 		ar->peer_map[i] = NULL;
855 
856 	spin_unlock_bh(&ar->data_lock);
857 
858 	ar->num_peers = 0;
859 	ar->num_stations = 0;
860 }
861 
862 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
863 				       struct ieee80211_sta *sta,
864 				       enum wmi_tdls_peer_state state)
865 {
866 	int ret;
867 	struct wmi_tdls_peer_update_cmd_arg arg = {};
868 	struct wmi_tdls_peer_capab_arg cap = {};
869 	struct wmi_channel_arg chan_arg = {};
870 
871 	lockdep_assert_held(&ar->conf_mutex);
872 
873 	arg.vdev_id = vdev_id;
874 	arg.peer_state = state;
875 	ether_addr_copy(arg.addr, sta->addr);
876 
877 	cap.peer_max_sp = sta->max_sp;
878 	cap.peer_uapsd_queues = sta->uapsd_queues;
879 
880 	if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
881 	    !sta->tdls_initiator)
882 		cap.is_peer_responder = 1;
883 
884 	ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
885 	if (ret) {
886 		ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
887 			    arg.addr, vdev_id, ret);
888 		return ret;
889 	}
890 
891 	return 0;
892 }
893 
894 /************************/
895 /* Interface management */
896 /************************/
897 
898 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
899 {
900 	struct ath10k *ar = arvif->ar;
901 
902 	lockdep_assert_held(&ar->data_lock);
903 
904 	if (!arvif->beacon)
905 		return;
906 
907 	if (!arvif->beacon_buf)
908 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
909 				 arvif->beacon->len, DMA_TO_DEVICE);
910 
911 	if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
912 		    arvif->beacon_state != ATH10K_BEACON_SENT))
913 		return;
914 
915 	dev_kfree_skb_any(arvif->beacon);
916 
917 	arvif->beacon = NULL;
918 	arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
919 }
920 
921 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
922 {
923 	struct ath10k *ar = arvif->ar;
924 
925 	lockdep_assert_held(&ar->data_lock);
926 
927 	ath10k_mac_vif_beacon_free(arvif);
928 
929 	if (arvif->beacon_buf) {
930 		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
931 				  arvif->beacon_buf, arvif->beacon_paddr);
932 		arvif->beacon_buf = NULL;
933 	}
934 }
935 
936 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
937 {
938 	unsigned long time_left;
939 
940 	lockdep_assert_held(&ar->conf_mutex);
941 
942 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
943 		return -ESHUTDOWN;
944 
945 	time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
946 						ATH10K_VDEV_SETUP_TIMEOUT_HZ);
947 	if (time_left == 0)
948 		return -ETIMEDOUT;
949 
950 	return 0;
951 }
952 
953 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
954 {
955 	struct cfg80211_chan_def *chandef = NULL;
956 	struct ieee80211_channel *channel = NULL;
957 	struct wmi_vdev_start_request_arg arg = {};
958 	int ret = 0;
959 
960 	lockdep_assert_held(&ar->conf_mutex);
961 
962 	ieee80211_iter_chan_contexts_atomic(ar->hw,
963 					    ath10k_mac_get_any_chandef_iter,
964 					    &chandef);
965 	if (WARN_ON_ONCE(!chandef))
966 		return -ENOENT;
967 
968 	channel = chandef->chan;
969 
970 	arg.vdev_id = vdev_id;
971 	arg.channel.freq = channel->center_freq;
972 	arg.channel.band_center_freq1 = chandef->center_freq1;
973 
974 	/* TODO setup this dynamically, what in case we
975 	   don't have any vifs? */
976 	arg.channel.mode = chan_to_phymode(chandef);
977 	arg.channel.chan_radar =
978 			!!(channel->flags & IEEE80211_CHAN_RADAR);
979 
980 	arg.channel.min_power = 0;
981 	arg.channel.max_power = channel->max_power * 2;
982 	arg.channel.max_reg_power = channel->max_reg_power * 2;
983 	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
984 
985 	reinit_completion(&ar->vdev_setup_done);
986 
987 	ret = ath10k_wmi_vdev_start(ar, &arg);
988 	if (ret) {
989 		ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
990 			    vdev_id, ret);
991 		return ret;
992 	}
993 
994 	ret = ath10k_vdev_setup_sync(ar);
995 	if (ret) {
996 		ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
997 			    vdev_id, ret);
998 		return ret;
999 	}
1000 
1001 	ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
1002 	if (ret) {
1003 		ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
1004 			    vdev_id, ret);
1005 		goto vdev_stop;
1006 	}
1007 
1008 	ar->monitor_vdev_id = vdev_id;
1009 
1010 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1011 		   ar->monitor_vdev_id);
1012 	return 0;
1013 
1014 vdev_stop:
1015 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1016 	if (ret)
1017 		ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1018 			    ar->monitor_vdev_id, ret);
1019 
1020 	return ret;
1021 }
1022 
1023 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1024 {
1025 	int ret = 0;
1026 
1027 	lockdep_assert_held(&ar->conf_mutex);
1028 
1029 	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1030 	if (ret)
1031 		ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1032 			    ar->monitor_vdev_id, ret);
1033 
1034 	reinit_completion(&ar->vdev_setup_done);
1035 
1036 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1037 	if (ret)
1038 		ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
1039 			    ar->monitor_vdev_id, ret);
1040 
1041 	ret = ath10k_vdev_setup_sync(ar);
1042 	if (ret)
1043 		ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1044 			    ar->monitor_vdev_id, ret);
1045 
1046 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1047 		   ar->monitor_vdev_id);
1048 	return ret;
1049 }
1050 
1051 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1052 {
1053 	int bit, ret = 0;
1054 
1055 	lockdep_assert_held(&ar->conf_mutex);
1056 
1057 	if (ar->free_vdev_map == 0) {
1058 		ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1059 		return -ENOMEM;
1060 	}
1061 
1062 	bit = __ffs64(ar->free_vdev_map);
1063 
1064 	ar->monitor_vdev_id = bit;
1065 
1066 	ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1067 				     WMI_VDEV_TYPE_MONITOR,
1068 				     0, ar->mac_addr);
1069 	if (ret) {
1070 		ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1071 			    ar->monitor_vdev_id, ret);
1072 		return ret;
1073 	}
1074 
1075 	ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1076 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1077 		   ar->monitor_vdev_id);
1078 
1079 	return 0;
1080 }
1081 
1082 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1083 {
1084 	int ret = 0;
1085 
1086 	lockdep_assert_held(&ar->conf_mutex);
1087 
1088 	ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1089 	if (ret) {
1090 		ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1091 			    ar->monitor_vdev_id, ret);
1092 		return ret;
1093 	}
1094 
1095 	ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1096 
1097 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1098 		   ar->monitor_vdev_id);
1099 	return ret;
1100 }
1101 
1102 static int ath10k_monitor_start(struct ath10k *ar)
1103 {
1104 	int ret;
1105 
1106 	lockdep_assert_held(&ar->conf_mutex);
1107 
1108 	ret = ath10k_monitor_vdev_create(ar);
1109 	if (ret) {
1110 		ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1111 		return ret;
1112 	}
1113 
1114 	ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1115 	if (ret) {
1116 		ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1117 		ath10k_monitor_vdev_delete(ar);
1118 		return ret;
1119 	}
1120 
1121 	ar->monitor_started = true;
1122 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1123 
1124 	return 0;
1125 }
1126 
1127 static int ath10k_monitor_stop(struct ath10k *ar)
1128 {
1129 	int ret;
1130 
1131 	lockdep_assert_held(&ar->conf_mutex);
1132 
1133 	ret = ath10k_monitor_vdev_stop(ar);
1134 	if (ret) {
1135 		ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1136 		return ret;
1137 	}
1138 
1139 	ret = ath10k_monitor_vdev_delete(ar);
1140 	if (ret) {
1141 		ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1142 		return ret;
1143 	}
1144 
1145 	ar->monitor_started = false;
1146 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1147 
1148 	return 0;
1149 }
1150 
1151 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1152 {
1153 	int num_ctx;
1154 
1155 	/* At least one chanctx is required to derive a channel to start
1156 	 * monitor vdev on.
1157 	 */
1158 	num_ctx = ath10k_mac_num_chanctxs(ar);
1159 	if (num_ctx == 0)
1160 		return false;
1161 
1162 	/* If there's already an existing special monitor interface then don't
1163 	 * bother creating another monitor vdev.
1164 	 */
1165 	if (ar->monitor_arvif)
1166 		return false;
1167 
1168 	return ar->monitor ||
1169 	       ar->filter_flags & FIF_OTHER_BSS ||
1170 	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1171 }
1172 
1173 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1174 {
1175 	int num_ctx;
1176 
1177 	num_ctx = ath10k_mac_num_chanctxs(ar);
1178 
1179 	/* FIXME: Current interface combinations and cfg80211/mac80211 code
1180 	 * shouldn't allow this but make sure to prevent handling the following
1181 	 * case anyway since multi-channel DFS hasn't been tested at all.
1182 	 */
1183 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1184 		return false;
1185 
1186 	return true;
1187 }
1188 
1189 static int ath10k_monitor_recalc(struct ath10k *ar)
1190 {
1191 	bool needed;
1192 	bool allowed;
1193 	int ret;
1194 
1195 	lockdep_assert_held(&ar->conf_mutex);
1196 
1197 	needed = ath10k_mac_monitor_vdev_is_needed(ar);
1198 	allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1199 
1200 	ath10k_dbg(ar, ATH10K_DBG_MAC,
1201 		   "mac monitor recalc started? %d needed? %d allowed? %d\n",
1202 		   ar->monitor_started, needed, allowed);
1203 
1204 	if (WARN_ON(needed && !allowed)) {
1205 		if (ar->monitor_started) {
1206 			ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1207 
1208 			ret = ath10k_monitor_stop(ar);
1209 			if (ret)
1210 				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1211 					    ret);
1212 				/* not serious */
1213 		}
1214 
1215 		return -EPERM;
1216 	}
1217 
1218 	if (needed == ar->monitor_started)
1219 		return 0;
1220 
1221 	if (needed)
1222 		return ath10k_monitor_start(ar);
1223 	else
1224 		return ath10k_monitor_stop(ar);
1225 }
1226 
1227 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1228 {
1229 	struct ath10k *ar = arvif->ar;
1230 	u32 vdev_param, rts_cts = 0;
1231 
1232 	lockdep_assert_held(&ar->conf_mutex);
1233 
1234 	vdev_param = ar->wmi.vdev_param->enable_rtscts;
1235 
1236 	rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1237 
1238 	if (arvif->num_legacy_stations > 0)
1239 		rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1240 			      WMI_RTSCTS_PROFILE);
1241 	else
1242 		rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1243 			      WMI_RTSCTS_PROFILE);
1244 
1245 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1246 					 rts_cts);
1247 }
1248 
1249 static int ath10k_start_cac(struct ath10k *ar)
1250 {
1251 	int ret;
1252 
1253 	lockdep_assert_held(&ar->conf_mutex);
1254 
1255 	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1256 
1257 	ret = ath10k_monitor_recalc(ar);
1258 	if (ret) {
1259 		ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1260 		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1261 		return ret;
1262 	}
1263 
1264 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1265 		   ar->monitor_vdev_id);
1266 
1267 	return 0;
1268 }
1269 
1270 static int ath10k_stop_cac(struct ath10k *ar)
1271 {
1272 	lockdep_assert_held(&ar->conf_mutex);
1273 
1274 	/* CAC is not running - do nothing */
1275 	if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1276 		return 0;
1277 
1278 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1279 	ath10k_monitor_stop(ar);
1280 
1281 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1282 
1283 	return 0;
1284 }
1285 
1286 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1287 				      struct ieee80211_chanctx_conf *conf,
1288 				      void *data)
1289 {
1290 	bool *ret = data;
1291 
1292 	if (!*ret && conf->radar_enabled)
1293 		*ret = true;
1294 }
1295 
1296 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1297 {
1298 	bool has_radar = false;
1299 
1300 	ieee80211_iter_chan_contexts_atomic(ar->hw,
1301 					    ath10k_mac_has_radar_iter,
1302 					    &has_radar);
1303 
1304 	return has_radar;
1305 }
1306 
1307 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1308 {
1309 	int ret;
1310 
1311 	lockdep_assert_held(&ar->conf_mutex);
1312 
1313 	ath10k_stop_cac(ar);
1314 
1315 	if (!ath10k_mac_has_radar_enabled(ar))
1316 		return;
1317 
1318 	if (ar->num_started_vdevs > 0)
1319 		return;
1320 
1321 	ret = ath10k_start_cac(ar);
1322 	if (ret) {
1323 		/*
1324 		 * Not possible to start CAC on current channel so starting
1325 		 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1326 		 * by indicating that radar was detected.
1327 		 */
1328 		ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1329 		ieee80211_radar_detected(ar->hw);
1330 	}
1331 }
1332 
1333 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1334 {
1335 	struct ath10k *ar = arvif->ar;
1336 	int ret;
1337 
1338 	lockdep_assert_held(&ar->conf_mutex);
1339 
1340 	reinit_completion(&ar->vdev_setup_done);
1341 
1342 	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1343 	if (ret) {
1344 		ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1345 			    arvif->vdev_id, ret);
1346 		return ret;
1347 	}
1348 
1349 	ret = ath10k_vdev_setup_sync(ar);
1350 	if (ret) {
1351 		ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
1352 			    arvif->vdev_id, ret);
1353 		return ret;
1354 	}
1355 
1356 	WARN_ON(ar->num_started_vdevs == 0);
1357 
1358 	if (ar->num_started_vdevs != 0) {
1359 		ar->num_started_vdevs--;
1360 		ath10k_recalc_radar_detection(ar);
1361 	}
1362 
1363 	return ret;
1364 }
1365 
1366 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1367 				     const struct cfg80211_chan_def *chandef,
1368 				     bool restart)
1369 {
1370 	struct ath10k *ar = arvif->ar;
1371 	struct wmi_vdev_start_request_arg arg = {};
1372 	int ret = 0;
1373 
1374 	lockdep_assert_held(&ar->conf_mutex);
1375 
1376 	reinit_completion(&ar->vdev_setup_done);
1377 
1378 	arg.vdev_id = arvif->vdev_id;
1379 	arg.dtim_period = arvif->dtim_period;
1380 	arg.bcn_intval = arvif->beacon_interval;
1381 
1382 	arg.channel.freq = chandef->chan->center_freq;
1383 	arg.channel.band_center_freq1 = chandef->center_freq1;
1384 	arg.channel.mode = chan_to_phymode(chandef);
1385 
1386 	arg.channel.min_power = 0;
1387 	arg.channel.max_power = chandef->chan->max_power * 2;
1388 	arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1389 	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1390 
1391 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1392 		arg.ssid = arvif->u.ap.ssid;
1393 		arg.ssid_len = arvif->u.ap.ssid_len;
1394 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1395 
1396 		/* For now allow DFS for AP mode */
1397 		arg.channel.chan_radar =
1398 			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1399 	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1400 		arg.ssid = arvif->vif->bss_conf.ssid;
1401 		arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1402 	}
1403 
1404 	ath10k_dbg(ar, ATH10K_DBG_MAC,
1405 		   "mac vdev %d start center_freq %d phymode %s\n",
1406 		   arg.vdev_id, arg.channel.freq,
1407 		   ath10k_wmi_phymode_str(arg.channel.mode));
1408 
1409 	if (restart)
1410 		ret = ath10k_wmi_vdev_restart(ar, &arg);
1411 	else
1412 		ret = ath10k_wmi_vdev_start(ar, &arg);
1413 
1414 	if (ret) {
1415 		ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1416 			    arg.vdev_id, ret);
1417 		return ret;
1418 	}
1419 
1420 	ret = ath10k_vdev_setup_sync(ar);
1421 	if (ret) {
1422 		ath10k_warn(ar,
1423 			    "failed to synchronize setup for vdev %i restart %d: %d\n",
1424 			    arg.vdev_id, restart, ret);
1425 		return ret;
1426 	}
1427 
1428 	ar->num_started_vdevs++;
1429 	ath10k_recalc_radar_detection(ar);
1430 
1431 	return ret;
1432 }
1433 
1434 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1435 			     const struct cfg80211_chan_def *def)
1436 {
1437 	return ath10k_vdev_start_restart(arvif, def, false);
1438 }
1439 
1440 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1441 			       const struct cfg80211_chan_def *def)
1442 {
1443 	return ath10k_vdev_start_restart(arvif, def, true);
1444 }
1445 
1446 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1447 				       struct sk_buff *bcn)
1448 {
1449 	struct ath10k *ar = arvif->ar;
1450 	struct ieee80211_mgmt *mgmt;
1451 	const u8 *p2p_ie;
1452 	int ret;
1453 
1454 	if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1455 		return 0;
1456 
1457 	mgmt = (void *)bcn->data;
1458 	p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1459 					 mgmt->u.beacon.variable,
1460 					 bcn->len - (mgmt->u.beacon.variable -
1461 						     bcn->data));
1462 	if (!p2p_ie)
1463 		return -ENOENT;
1464 
1465 	ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1466 	if (ret) {
1467 		ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1468 			    arvif->vdev_id, ret);
1469 		return ret;
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1476 				       u8 oui_type, size_t ie_offset)
1477 {
1478 	size_t len;
1479 	const u8 *next;
1480 	const u8 *end;
1481 	u8 *ie;
1482 
1483 	if (WARN_ON(skb->len < ie_offset))
1484 		return -EINVAL;
1485 
1486 	ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1487 					   skb->data + ie_offset,
1488 					   skb->len - ie_offset);
1489 	if (!ie)
1490 		return -ENOENT;
1491 
1492 	len = ie[1] + 2;
1493 	end = skb->data + skb->len;
1494 	next = ie + len;
1495 
1496 	if (WARN_ON(next > end))
1497 		return -EINVAL;
1498 
1499 	memmove(ie, next, end - next);
1500 	skb_trim(skb, skb->len - len);
1501 
1502 	return 0;
1503 }
1504 
1505 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1506 {
1507 	struct ath10k *ar = arvif->ar;
1508 	struct ieee80211_hw *hw = ar->hw;
1509 	struct ieee80211_vif *vif = arvif->vif;
1510 	struct ieee80211_mutable_offsets offs = {};
1511 	struct sk_buff *bcn;
1512 	int ret;
1513 
1514 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1515 		return 0;
1516 
1517 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1518 	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1519 		return 0;
1520 
1521 	bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1522 	if (!bcn) {
1523 		ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1524 		return -EPERM;
1525 	}
1526 
1527 	ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1528 	if (ret) {
1529 		ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1530 		kfree_skb(bcn);
1531 		return ret;
1532 	}
1533 
1534 	/* P2P IE is inserted by firmware automatically (as configured above)
1535 	 * so remove it from the base beacon template to avoid duplicate P2P
1536 	 * IEs in beacon frames.
1537 	 */
1538 	ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1539 				    offsetof(struct ieee80211_mgmt,
1540 					     u.beacon.variable));
1541 
1542 	ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1543 				  0, NULL, 0);
1544 	kfree_skb(bcn);
1545 
1546 	if (ret) {
1547 		ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1548 			    ret);
1549 		return ret;
1550 	}
1551 
1552 	return 0;
1553 }
1554 
1555 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1556 {
1557 	struct ath10k *ar = arvif->ar;
1558 	struct ieee80211_hw *hw = ar->hw;
1559 	struct ieee80211_vif *vif = arvif->vif;
1560 	struct sk_buff *prb;
1561 	int ret;
1562 
1563 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1564 		return 0;
1565 
1566 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1567 		return 0;
1568 
1569 	prb = ieee80211_proberesp_get(hw, vif);
1570 	if (!prb) {
1571 		ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1572 		return -EPERM;
1573 	}
1574 
1575 	ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1576 	kfree_skb(prb);
1577 
1578 	if (ret) {
1579 		ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1580 			    ret);
1581 		return ret;
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1588 {
1589 	struct ath10k *ar = arvif->ar;
1590 	struct cfg80211_chan_def def;
1591 	int ret;
1592 
1593 	/* When originally vdev is started during assign_vif_chanctx() some
1594 	 * information is missing, notably SSID. Firmware revisions with beacon
1595 	 * offloading require the SSID to be provided during vdev (re)start to
1596 	 * handle hidden SSID properly.
1597 	 *
1598 	 * Vdev restart must be done after vdev has been both started and
1599 	 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1600 	 * deliver vdev restart response event causing timeouts during vdev
1601 	 * syncing in ath10k.
1602 	 *
1603 	 * Note: The vdev down/up and template reinstallation could be skipped
1604 	 * since only wmi-tlv firmware are known to have beacon offload and
1605 	 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1606 	 * response delivery. It's probably more robust to keep it as is.
1607 	 */
1608 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1609 		return 0;
1610 
1611 	if (WARN_ON(!arvif->is_started))
1612 		return -EINVAL;
1613 
1614 	if (WARN_ON(!arvif->is_up))
1615 		return -EINVAL;
1616 
1617 	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1618 		return -EINVAL;
1619 
1620 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1621 	if (ret) {
1622 		ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1623 			    arvif->vdev_id, ret);
1624 		return ret;
1625 	}
1626 
1627 	/* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1628 	 * firmware will crash upon vdev up.
1629 	 */
1630 
1631 	ret = ath10k_mac_setup_bcn_tmpl(arvif);
1632 	if (ret) {
1633 		ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1634 		return ret;
1635 	}
1636 
1637 	ret = ath10k_mac_setup_prb_tmpl(arvif);
1638 	if (ret) {
1639 		ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1640 		return ret;
1641 	}
1642 
1643 	ret = ath10k_vdev_restart(arvif, &def);
1644 	if (ret) {
1645 		ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1646 			    arvif->vdev_id, ret);
1647 		return ret;
1648 	}
1649 
1650 	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1651 				 arvif->bssid);
1652 	if (ret) {
1653 		ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1654 			    arvif->vdev_id, ret);
1655 		return ret;
1656 	}
1657 
1658 	return 0;
1659 }
1660 
1661 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1662 				     struct ieee80211_bss_conf *info)
1663 {
1664 	struct ath10k *ar = arvif->ar;
1665 	int ret = 0;
1666 
1667 	lockdep_assert_held(&arvif->ar->conf_mutex);
1668 
1669 	if (!info->enable_beacon) {
1670 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1671 		if (ret)
1672 			ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1673 				    arvif->vdev_id, ret);
1674 
1675 		arvif->is_up = false;
1676 
1677 		spin_lock_bh(&arvif->ar->data_lock);
1678 		ath10k_mac_vif_beacon_free(arvif);
1679 		spin_unlock_bh(&arvif->ar->data_lock);
1680 
1681 		return;
1682 	}
1683 
1684 	arvif->tx_seq_no = 0x1000;
1685 
1686 	arvif->aid = 0;
1687 	ether_addr_copy(arvif->bssid, info->bssid);
1688 
1689 	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1690 				 arvif->bssid);
1691 	if (ret) {
1692 		ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1693 			    arvif->vdev_id, ret);
1694 		return;
1695 	}
1696 
1697 	arvif->is_up = true;
1698 
1699 	ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1700 	if (ret) {
1701 		ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1702 			    arvif->vdev_id, ret);
1703 		return;
1704 	}
1705 
1706 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1707 }
1708 
1709 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1710 				struct ieee80211_bss_conf *info,
1711 				const u8 self_peer[ETH_ALEN])
1712 {
1713 	struct ath10k *ar = arvif->ar;
1714 	u32 vdev_param;
1715 	int ret = 0;
1716 
1717 	lockdep_assert_held(&arvif->ar->conf_mutex);
1718 
1719 	if (!info->ibss_joined) {
1720 		if (is_zero_ether_addr(arvif->bssid))
1721 			return;
1722 
1723 		eth_zero_addr(arvif->bssid);
1724 
1725 		return;
1726 	}
1727 
1728 	vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1729 	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1730 					ATH10K_DEFAULT_ATIM);
1731 	if (ret)
1732 		ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1733 			    arvif->vdev_id, ret);
1734 }
1735 
1736 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1737 {
1738 	struct ath10k *ar = arvif->ar;
1739 	u32 param;
1740 	u32 value;
1741 	int ret;
1742 
1743 	lockdep_assert_held(&arvif->ar->conf_mutex);
1744 
1745 	if (arvif->u.sta.uapsd)
1746 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1747 	else
1748 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1749 
1750 	param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1751 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1752 	if (ret) {
1753 		ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1754 			    value, arvif->vdev_id, ret);
1755 		return ret;
1756 	}
1757 
1758 	return 0;
1759 }
1760 
1761 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1762 {
1763 	struct ath10k *ar = arvif->ar;
1764 	u32 param;
1765 	u32 value;
1766 	int ret;
1767 
1768 	lockdep_assert_held(&arvif->ar->conf_mutex);
1769 
1770 	if (arvif->u.sta.uapsd)
1771 		value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1772 	else
1773 		value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1774 
1775 	param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1776 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1777 					  param, value);
1778 	if (ret) {
1779 		ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1780 			    value, arvif->vdev_id, ret);
1781 		return ret;
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1788 {
1789 	struct ath10k_vif *arvif;
1790 	int num = 0;
1791 
1792 	lockdep_assert_held(&ar->conf_mutex);
1793 
1794 	list_for_each_entry(arvif, &ar->arvifs, list)
1795 		if (arvif->is_started)
1796 			num++;
1797 
1798 	return num;
1799 }
1800 
1801 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1802 {
1803 	struct ath10k *ar = arvif->ar;
1804 	struct ieee80211_vif *vif = arvif->vif;
1805 	struct ieee80211_conf *conf = &ar->hw->conf;
1806 	enum wmi_sta_powersave_param param;
1807 	enum wmi_sta_ps_mode psmode;
1808 	int ret;
1809 	int ps_timeout;
1810 	bool enable_ps;
1811 
1812 	lockdep_assert_held(&arvif->ar->conf_mutex);
1813 
1814 	if (arvif->vif->type != NL80211_IFTYPE_STATION)
1815 		return 0;
1816 
1817 	enable_ps = arvif->ps;
1818 
1819 	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1820 	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1821 		      ar->running_fw->fw_file.fw_features)) {
1822 		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1823 			    arvif->vdev_id);
1824 		enable_ps = false;
1825 	}
1826 
1827 	if (!arvif->is_started) {
1828 		/* mac80211 can update vif powersave state while disconnected.
1829 		 * Firmware doesn't behave nicely and consumes more power than
1830 		 * necessary if PS is disabled on a non-started vdev. Hence
1831 		 * force-enable PS for non-running vdevs.
1832 		 */
1833 		psmode = WMI_STA_PS_MODE_ENABLED;
1834 	} else if (enable_ps) {
1835 		psmode = WMI_STA_PS_MODE_ENABLED;
1836 		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1837 
1838 		ps_timeout = conf->dynamic_ps_timeout;
1839 		if (ps_timeout == 0) {
1840 			/* Firmware doesn't like 0 */
1841 			ps_timeout = ieee80211_tu_to_usec(
1842 				vif->bss_conf.beacon_int) / 1000;
1843 		}
1844 
1845 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1846 						  ps_timeout);
1847 		if (ret) {
1848 			ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1849 				    arvif->vdev_id, ret);
1850 			return ret;
1851 		}
1852 	} else {
1853 		psmode = WMI_STA_PS_MODE_DISABLED;
1854 	}
1855 
1856 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1857 		   arvif->vdev_id, psmode ? "enable" : "disable");
1858 
1859 	ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1860 	if (ret) {
1861 		ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1862 			    psmode, arvif->vdev_id, ret);
1863 		return ret;
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1870 {
1871 	struct ath10k *ar = arvif->ar;
1872 	struct wmi_sta_keepalive_arg arg = {};
1873 	int ret;
1874 
1875 	lockdep_assert_held(&arvif->ar->conf_mutex);
1876 
1877 	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1878 		return 0;
1879 
1880 	if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1881 		return 0;
1882 
1883 	/* Some firmware revisions have a bug and ignore the `enabled` field.
1884 	 * Instead use the interval to disable the keepalive.
1885 	 */
1886 	arg.vdev_id = arvif->vdev_id;
1887 	arg.enabled = 1;
1888 	arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1889 	arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1890 
1891 	ret = ath10k_wmi_sta_keepalive(ar, &arg);
1892 	if (ret) {
1893 		ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1894 			    arvif->vdev_id, ret);
1895 		return ret;
1896 	}
1897 
1898 	return 0;
1899 }
1900 
1901 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1902 {
1903 	struct ath10k *ar = arvif->ar;
1904 	struct ieee80211_vif *vif = arvif->vif;
1905 	int ret;
1906 
1907 	lockdep_assert_held(&arvif->ar->conf_mutex);
1908 
1909 	if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1910 		return;
1911 
1912 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1913 		return;
1914 
1915 	if (!vif->csa_active)
1916 		return;
1917 
1918 	if (!arvif->is_up)
1919 		return;
1920 
1921 	if (!ieee80211_csa_is_complete(vif)) {
1922 		ieee80211_csa_update_counter(vif);
1923 
1924 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
1925 		if (ret)
1926 			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1927 				    ret);
1928 
1929 		ret = ath10k_mac_setup_prb_tmpl(arvif);
1930 		if (ret)
1931 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1932 				    ret);
1933 	} else {
1934 		ieee80211_csa_finish(vif);
1935 	}
1936 }
1937 
1938 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1939 {
1940 	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1941 						ap_csa_work);
1942 	struct ath10k *ar = arvif->ar;
1943 
1944 	mutex_lock(&ar->conf_mutex);
1945 	ath10k_mac_vif_ap_csa_count_down(arvif);
1946 	mutex_unlock(&ar->conf_mutex);
1947 }
1948 
1949 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
1950 					  struct ieee80211_vif *vif)
1951 {
1952 	struct sk_buff *skb = data;
1953 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
1954 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1955 
1956 	if (vif->type != NL80211_IFTYPE_STATION)
1957 		return;
1958 
1959 	if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
1960 		return;
1961 
1962 	cancel_delayed_work(&arvif->connection_loss_work);
1963 }
1964 
1965 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
1966 {
1967 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1968 						   IEEE80211_IFACE_ITER_NORMAL,
1969 						   ath10k_mac_handle_beacon_iter,
1970 						   skb);
1971 }
1972 
1973 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
1974 					       struct ieee80211_vif *vif)
1975 {
1976 	u32 *vdev_id = data;
1977 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1978 	struct ath10k *ar = arvif->ar;
1979 	struct ieee80211_hw *hw = ar->hw;
1980 
1981 	if (arvif->vdev_id != *vdev_id)
1982 		return;
1983 
1984 	if (!arvif->is_up)
1985 		return;
1986 
1987 	ieee80211_beacon_loss(vif);
1988 
1989 	/* Firmware doesn't report beacon loss events repeatedly. If AP probe
1990 	 * (done by mac80211) succeeds but beacons do not resume then it
1991 	 * doesn't make sense to continue operation. Queue connection loss work
1992 	 * which can be cancelled when beacon is received.
1993 	 */
1994 	ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
1995 				     ATH10K_CONNECTION_LOSS_HZ);
1996 }
1997 
1998 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
1999 {
2000 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
2001 						   IEEE80211_IFACE_ITER_NORMAL,
2002 						   ath10k_mac_handle_beacon_miss_iter,
2003 						   &vdev_id);
2004 }
2005 
2006 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2007 {
2008 	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2009 						connection_loss_work.work);
2010 	struct ieee80211_vif *vif = arvif->vif;
2011 
2012 	if (!arvif->is_up)
2013 		return;
2014 
2015 	ieee80211_connection_loss(vif);
2016 }
2017 
2018 /**********************/
2019 /* Station management */
2020 /**********************/
2021 
2022 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2023 					     struct ieee80211_vif *vif)
2024 {
2025 	/* Some firmware revisions have unstable STA powersave when listen
2026 	 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2027 	 * generate NullFunc frames properly even if buffered frames have been
2028 	 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2029 	 * buffered frames. Often pinging the device from AP would simply fail.
2030 	 *
2031 	 * As a workaround set it to 1.
2032 	 */
2033 	if (vif->type == NL80211_IFTYPE_STATION)
2034 		return 1;
2035 
2036 	return ar->hw->conf.listen_interval;
2037 }
2038 
2039 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2040 				      struct ieee80211_vif *vif,
2041 				      struct ieee80211_sta *sta,
2042 				      struct wmi_peer_assoc_complete_arg *arg)
2043 {
2044 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2045 	u32 aid;
2046 
2047 	lockdep_assert_held(&ar->conf_mutex);
2048 
2049 	if (vif->type == NL80211_IFTYPE_STATION)
2050 		aid = vif->bss_conf.aid;
2051 	else
2052 		aid = sta->aid;
2053 
2054 	ether_addr_copy(arg->addr, sta->addr);
2055 	arg->vdev_id = arvif->vdev_id;
2056 	arg->peer_aid = aid;
2057 	arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2058 	arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2059 	arg->peer_num_spatial_streams = 1;
2060 	arg->peer_caps = vif->bss_conf.assoc_capability;
2061 }
2062 
2063 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2064 				       struct ieee80211_vif *vif,
2065 				       struct ieee80211_sta *sta,
2066 				       struct wmi_peer_assoc_complete_arg *arg)
2067 {
2068 	struct ieee80211_bss_conf *info = &vif->bss_conf;
2069 	struct cfg80211_chan_def def;
2070 	struct cfg80211_bss *bss;
2071 	const u8 *rsnie = NULL;
2072 	const u8 *wpaie = NULL;
2073 
2074 	lockdep_assert_held(&ar->conf_mutex);
2075 
2076 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2077 		return;
2078 
2079 	bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2080 			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2081 	if (bss) {
2082 		const struct cfg80211_bss_ies *ies;
2083 
2084 		rcu_read_lock();
2085 		rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2086 
2087 		ies = rcu_dereference(bss->ies);
2088 
2089 		wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2090 						WLAN_OUI_TYPE_MICROSOFT_WPA,
2091 						ies->data,
2092 						ies->len);
2093 		rcu_read_unlock();
2094 		cfg80211_put_bss(ar->hw->wiphy, bss);
2095 	}
2096 
2097 	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
2098 	if (rsnie || wpaie) {
2099 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2100 		arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2101 	}
2102 
2103 	if (wpaie) {
2104 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2105 		arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2106 	}
2107 
2108 	if (sta->mfp &&
2109 	    test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2110 		     ar->running_fw->fw_file.fw_features)) {
2111 		arg->peer_flags |= ar->wmi.peer_flags->pmf;
2112 	}
2113 }
2114 
2115 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2116 				      struct ieee80211_vif *vif,
2117 				      struct ieee80211_sta *sta,
2118 				      struct wmi_peer_assoc_complete_arg *arg)
2119 {
2120 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2121 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2122 	struct cfg80211_chan_def def;
2123 	const struct ieee80211_supported_band *sband;
2124 	const struct ieee80211_rate *rates;
2125 	enum nl80211_band band;
2126 	u32 ratemask;
2127 	u8 rate;
2128 	int i;
2129 
2130 	lockdep_assert_held(&ar->conf_mutex);
2131 
2132 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2133 		return;
2134 
2135 	band = def.chan->band;
2136 	sband = ar->hw->wiphy->bands[band];
2137 	ratemask = sta->supp_rates[band];
2138 	ratemask &= arvif->bitrate_mask.control[band].legacy;
2139 	rates = sband->bitrates;
2140 
2141 	rateset->num_rates = 0;
2142 
2143 	for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2144 		if (!(ratemask & 1))
2145 			continue;
2146 
2147 		rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2148 		rateset->rates[rateset->num_rates] = rate;
2149 		rateset->num_rates++;
2150 	}
2151 }
2152 
2153 static bool
2154 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2155 {
2156 	int nss;
2157 
2158 	for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2159 		if (ht_mcs_mask[nss])
2160 			return false;
2161 
2162 	return true;
2163 }
2164 
2165 static bool
2166 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2167 {
2168 	int nss;
2169 
2170 	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2171 		if (vht_mcs_mask[nss])
2172 			return false;
2173 
2174 	return true;
2175 }
2176 
2177 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2178 				   struct ieee80211_vif *vif,
2179 				   struct ieee80211_sta *sta,
2180 				   struct wmi_peer_assoc_complete_arg *arg)
2181 {
2182 	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2183 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2184 	struct cfg80211_chan_def def;
2185 	enum nl80211_band band;
2186 	const u8 *ht_mcs_mask;
2187 	const u16 *vht_mcs_mask;
2188 	int i, n;
2189 	u8 max_nss;
2190 	u32 stbc;
2191 
2192 	lockdep_assert_held(&ar->conf_mutex);
2193 
2194 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2195 		return;
2196 
2197 	if (!ht_cap->ht_supported)
2198 		return;
2199 
2200 	band = def.chan->band;
2201 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2202 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2203 
2204 	if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2205 	    ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2206 		return;
2207 
2208 	arg->peer_flags |= ar->wmi.peer_flags->ht;
2209 	arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2210 				    ht_cap->ampdu_factor)) - 1;
2211 
2212 	arg->peer_mpdu_density =
2213 		ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2214 
2215 	arg->peer_ht_caps = ht_cap->cap;
2216 	arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2217 
2218 	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2219 		arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2220 
2221 	if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2222 		arg->peer_flags |= ar->wmi.peer_flags->bw40;
2223 		arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2224 	}
2225 
2226 	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2227 		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2228 			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2229 
2230 		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2231 			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2232 	}
2233 
2234 	if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2235 		arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2236 		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2237 	}
2238 
2239 	if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2240 		stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2241 		stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2242 		stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2243 		arg->peer_rate_caps |= stbc;
2244 		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2245 	}
2246 
2247 	if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2248 		arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2249 	else if (ht_cap->mcs.rx_mask[1])
2250 		arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2251 
2252 	for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2253 		if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2254 		    (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2255 			max_nss = (i / 8) + 1;
2256 			arg->peer_ht_rates.rates[n++] = i;
2257 		}
2258 
2259 	/*
2260 	 * This is a workaround for HT-enabled STAs which break the spec
2261 	 * and have no HT capabilities RX mask (no HT RX MCS map).
2262 	 *
2263 	 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2264 	 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2265 	 *
2266 	 * Firmware asserts if such situation occurs.
2267 	 */
2268 	if (n == 0) {
2269 		arg->peer_ht_rates.num_rates = 8;
2270 		for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2271 			arg->peer_ht_rates.rates[i] = i;
2272 	} else {
2273 		arg->peer_ht_rates.num_rates = n;
2274 		arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2275 	}
2276 
2277 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2278 		   arg->addr,
2279 		   arg->peer_ht_rates.num_rates,
2280 		   arg->peer_num_spatial_streams);
2281 }
2282 
2283 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2284 				    struct ath10k_vif *arvif,
2285 				    struct ieee80211_sta *sta)
2286 {
2287 	u32 uapsd = 0;
2288 	u32 max_sp = 0;
2289 	int ret = 0;
2290 
2291 	lockdep_assert_held(&ar->conf_mutex);
2292 
2293 	if (sta->wme && sta->uapsd_queues) {
2294 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2295 			   sta->uapsd_queues, sta->max_sp);
2296 
2297 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2298 			uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2299 				 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2300 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2301 			uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2302 				 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2303 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2304 			uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2305 				 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2306 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2307 			uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2308 				 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2309 
2310 		if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2311 			max_sp = sta->max_sp;
2312 
2313 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2314 						 sta->addr,
2315 						 WMI_AP_PS_PEER_PARAM_UAPSD,
2316 						 uapsd);
2317 		if (ret) {
2318 			ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2319 				    arvif->vdev_id, ret);
2320 			return ret;
2321 		}
2322 
2323 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2324 						 sta->addr,
2325 						 WMI_AP_PS_PEER_PARAM_MAX_SP,
2326 						 max_sp);
2327 		if (ret) {
2328 			ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2329 				    arvif->vdev_id, ret);
2330 			return ret;
2331 		}
2332 
2333 		/* TODO setup this based on STA listen interval and
2334 		   beacon interval. Currently we don't know
2335 		   sta->listen_interval - mac80211 patch required.
2336 		   Currently use 10 seconds */
2337 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2338 						 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2339 						 10);
2340 		if (ret) {
2341 			ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2342 				    arvif->vdev_id, ret);
2343 			return ret;
2344 		}
2345 	}
2346 
2347 	return 0;
2348 }
2349 
2350 static u16
2351 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2352 			      const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2353 {
2354 	int idx_limit;
2355 	int nss;
2356 	u16 mcs_map;
2357 	u16 mcs;
2358 
2359 	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2360 		mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2361 			  vht_mcs_limit[nss];
2362 
2363 		if (mcs_map)
2364 			idx_limit = fls(mcs_map) - 1;
2365 		else
2366 			idx_limit = -1;
2367 
2368 		switch (idx_limit) {
2369 		case 0: /* fall through */
2370 		case 1: /* fall through */
2371 		case 2: /* fall through */
2372 		case 3: /* fall through */
2373 		case 4: /* fall through */
2374 		case 5: /* fall through */
2375 		case 6: /* fall through */
2376 		default:
2377 			/* see ath10k_mac_can_set_bitrate_mask() */
2378 			WARN_ON(1);
2379 			/* fall through */
2380 		case -1:
2381 			mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2382 			break;
2383 		case 7:
2384 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2385 			break;
2386 		case 8:
2387 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2388 			break;
2389 		case 9:
2390 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2391 			break;
2392 		}
2393 
2394 		tx_mcs_set &= ~(0x3 << (nss * 2));
2395 		tx_mcs_set |= mcs << (nss * 2);
2396 	}
2397 
2398 	return tx_mcs_set;
2399 }
2400 
2401 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2402 				    struct ieee80211_vif *vif,
2403 				    struct ieee80211_sta *sta,
2404 				    struct wmi_peer_assoc_complete_arg *arg)
2405 {
2406 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2407 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2408 	struct cfg80211_chan_def def;
2409 	enum nl80211_band band;
2410 	const u16 *vht_mcs_mask;
2411 	u8 ampdu_factor;
2412 
2413 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2414 		return;
2415 
2416 	if (!vht_cap->vht_supported)
2417 		return;
2418 
2419 	band = def.chan->band;
2420 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2421 
2422 	if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2423 		return;
2424 
2425 	arg->peer_flags |= ar->wmi.peer_flags->vht;
2426 
2427 	if (def.chan->band == NL80211_BAND_2GHZ)
2428 		arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2429 
2430 	arg->peer_vht_caps = vht_cap->cap;
2431 
2432 	ampdu_factor = (vht_cap->cap &
2433 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2434 		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2435 
2436 	/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2437 	 * zero in VHT IE. Using it would result in degraded throughput.
2438 	 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2439 	 * it if VHT max_mpdu is smaller. */
2440 	arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2441 				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2442 					ampdu_factor)) - 1);
2443 
2444 	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2445 		arg->peer_flags |= ar->wmi.peer_flags->bw80;
2446 
2447 	arg->peer_vht_rates.rx_max_rate =
2448 		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2449 	arg->peer_vht_rates.rx_mcs_set =
2450 		__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2451 	arg->peer_vht_rates.tx_max_rate =
2452 		__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2453 	arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2454 		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2455 
2456 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2457 		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2458 }
2459 
2460 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2461 				    struct ieee80211_vif *vif,
2462 				    struct ieee80211_sta *sta,
2463 				    struct wmi_peer_assoc_complete_arg *arg)
2464 {
2465 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2466 
2467 	switch (arvif->vdev_type) {
2468 	case WMI_VDEV_TYPE_AP:
2469 		if (sta->wme)
2470 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2471 
2472 		if (sta->wme && sta->uapsd_queues) {
2473 			arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2474 			arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2475 		}
2476 		break;
2477 	case WMI_VDEV_TYPE_STA:
2478 		if (vif->bss_conf.qos)
2479 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2480 		break;
2481 	case WMI_VDEV_TYPE_IBSS:
2482 		if (sta->wme)
2483 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2484 		break;
2485 	default:
2486 		break;
2487 	}
2488 
2489 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2490 		   sta->addr, !!(arg->peer_flags &
2491 		   arvif->ar->wmi.peer_flags->qos));
2492 }
2493 
2494 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2495 {
2496 	return sta->supp_rates[NL80211_BAND_2GHZ] >>
2497 	       ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2498 }
2499 
2500 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2501 					struct ieee80211_vif *vif,
2502 					struct ieee80211_sta *sta,
2503 					struct wmi_peer_assoc_complete_arg *arg)
2504 {
2505 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2506 	struct cfg80211_chan_def def;
2507 	enum nl80211_band band;
2508 	const u8 *ht_mcs_mask;
2509 	const u16 *vht_mcs_mask;
2510 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
2511 
2512 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2513 		return;
2514 
2515 	band = def.chan->band;
2516 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2517 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2518 
2519 	switch (band) {
2520 	case NL80211_BAND_2GHZ:
2521 		if (sta->vht_cap.vht_supported &&
2522 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2523 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2524 				phymode = MODE_11AC_VHT40;
2525 			else
2526 				phymode = MODE_11AC_VHT20;
2527 		} else if (sta->ht_cap.ht_supported &&
2528 			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2529 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2530 				phymode = MODE_11NG_HT40;
2531 			else
2532 				phymode = MODE_11NG_HT20;
2533 		} else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2534 			phymode = MODE_11G;
2535 		} else {
2536 			phymode = MODE_11B;
2537 		}
2538 
2539 		break;
2540 	case NL80211_BAND_5GHZ:
2541 		/*
2542 		 * Check VHT first.
2543 		 */
2544 		if (sta->vht_cap.vht_supported &&
2545 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2546 			if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2547 				phymode = MODE_11AC_VHT80;
2548 			else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2549 				phymode = MODE_11AC_VHT40;
2550 			else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2551 				phymode = MODE_11AC_VHT20;
2552 		} else if (sta->ht_cap.ht_supported &&
2553 			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2554 			if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2555 				phymode = MODE_11NA_HT40;
2556 			else
2557 				phymode = MODE_11NA_HT20;
2558 		} else {
2559 			phymode = MODE_11A;
2560 		}
2561 
2562 		break;
2563 	default:
2564 		break;
2565 	}
2566 
2567 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2568 		   sta->addr, ath10k_wmi_phymode_str(phymode));
2569 
2570 	arg->peer_phymode = phymode;
2571 	WARN_ON(phymode == MODE_UNKNOWN);
2572 }
2573 
2574 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2575 				     struct ieee80211_vif *vif,
2576 				     struct ieee80211_sta *sta,
2577 				     struct wmi_peer_assoc_complete_arg *arg)
2578 {
2579 	lockdep_assert_held(&ar->conf_mutex);
2580 
2581 	memset(arg, 0, sizeof(*arg));
2582 
2583 	ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2584 	ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2585 	ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2586 	ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2587 	ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2588 	ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2589 	ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2590 
2591 	return 0;
2592 }
2593 
2594 static const u32 ath10k_smps_map[] = {
2595 	[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2596 	[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2597 	[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2598 	[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2599 };
2600 
2601 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2602 				  const u8 *addr,
2603 				  const struct ieee80211_sta_ht_cap *ht_cap)
2604 {
2605 	int smps;
2606 
2607 	if (!ht_cap->ht_supported)
2608 		return 0;
2609 
2610 	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2611 	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2612 
2613 	if (smps >= ARRAY_SIZE(ath10k_smps_map))
2614 		return -EINVAL;
2615 
2616 	return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2617 					 WMI_PEER_SMPS_STATE,
2618 					 ath10k_smps_map[smps]);
2619 }
2620 
2621 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2622 				      struct ieee80211_vif *vif,
2623 				      struct ieee80211_sta_vht_cap vht_cap)
2624 {
2625 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2626 	int ret;
2627 	u32 param;
2628 	u32 value;
2629 
2630 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2631 		return 0;
2632 
2633 	if (!(ar->vht_cap_info &
2634 	      (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2635 	       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2636 	       IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2637 	       IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2638 		return 0;
2639 
2640 	param = ar->wmi.vdev_param->txbf;
2641 	value = 0;
2642 
2643 	if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2644 		return 0;
2645 
2646 	/* The following logic is correct. If a remote STA advertises support
2647 	 * for being a beamformer then we should enable us being a beamformee.
2648 	 */
2649 
2650 	if (ar->vht_cap_info &
2651 	    (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2652 	     IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2653 		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2654 			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2655 
2656 		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2657 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2658 	}
2659 
2660 	if (ar->vht_cap_info &
2661 	    (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2662 	     IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2663 		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2664 			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2665 
2666 		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2667 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2668 	}
2669 
2670 	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2671 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2672 
2673 	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2674 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2675 
2676 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2677 	if (ret) {
2678 		ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2679 			    value, ret);
2680 		return ret;
2681 	}
2682 
2683 	return 0;
2684 }
2685 
2686 /* can be called only in mac80211 callbacks due to `key_count` usage */
2687 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2688 			     struct ieee80211_vif *vif,
2689 			     struct ieee80211_bss_conf *bss_conf)
2690 {
2691 	struct ath10k *ar = hw->priv;
2692 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2693 	struct ieee80211_sta_ht_cap ht_cap;
2694 	struct ieee80211_sta_vht_cap vht_cap;
2695 	struct wmi_peer_assoc_complete_arg peer_arg;
2696 	struct ieee80211_sta *ap_sta;
2697 	int ret;
2698 
2699 	lockdep_assert_held(&ar->conf_mutex);
2700 
2701 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2702 		   arvif->vdev_id, arvif->bssid, arvif->aid);
2703 
2704 	rcu_read_lock();
2705 
2706 	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2707 	if (!ap_sta) {
2708 		ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2709 			    bss_conf->bssid, arvif->vdev_id);
2710 		rcu_read_unlock();
2711 		return;
2712 	}
2713 
2714 	/* ap_sta must be accessed only within rcu section which must be left
2715 	 * before calling ath10k_setup_peer_smps() which might sleep. */
2716 	ht_cap = ap_sta->ht_cap;
2717 	vht_cap = ap_sta->vht_cap;
2718 
2719 	ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2720 	if (ret) {
2721 		ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2722 			    bss_conf->bssid, arvif->vdev_id, ret);
2723 		rcu_read_unlock();
2724 		return;
2725 	}
2726 
2727 	rcu_read_unlock();
2728 
2729 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2730 	if (ret) {
2731 		ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2732 			    bss_conf->bssid, arvif->vdev_id, ret);
2733 		return;
2734 	}
2735 
2736 	ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2737 	if (ret) {
2738 		ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2739 			    arvif->vdev_id, ret);
2740 		return;
2741 	}
2742 
2743 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2744 	if (ret) {
2745 		ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2746 			    arvif->vdev_id, bss_conf->bssid, ret);
2747 		return;
2748 	}
2749 
2750 	ath10k_dbg(ar, ATH10K_DBG_MAC,
2751 		   "mac vdev %d up (associated) bssid %pM aid %d\n",
2752 		   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2753 
2754 	WARN_ON(arvif->is_up);
2755 
2756 	arvif->aid = bss_conf->aid;
2757 	ether_addr_copy(arvif->bssid, bss_conf->bssid);
2758 
2759 	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2760 	if (ret) {
2761 		ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2762 			    arvif->vdev_id, ret);
2763 		return;
2764 	}
2765 
2766 	arvif->is_up = true;
2767 
2768 	/* Workaround: Some firmware revisions (tested with qca6174
2769 	 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2770 	 * poked with peer param command.
2771 	 */
2772 	ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2773 					WMI_PEER_DUMMY_VAR, 1);
2774 	if (ret) {
2775 		ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2776 			    arvif->bssid, arvif->vdev_id, ret);
2777 		return;
2778 	}
2779 }
2780 
2781 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2782 				struct ieee80211_vif *vif)
2783 {
2784 	struct ath10k *ar = hw->priv;
2785 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2786 	struct ieee80211_sta_vht_cap vht_cap = {};
2787 	int ret;
2788 
2789 	lockdep_assert_held(&ar->conf_mutex);
2790 
2791 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2792 		   arvif->vdev_id, arvif->bssid);
2793 
2794 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2795 	if (ret)
2796 		ath10k_warn(ar, "failed to down vdev %i: %d\n",
2797 			    arvif->vdev_id, ret);
2798 
2799 	arvif->def_wep_key_idx = -1;
2800 
2801 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2802 	if (ret) {
2803 		ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2804 			    arvif->vdev_id, ret);
2805 		return;
2806 	}
2807 
2808 	arvif->is_up = false;
2809 
2810 	cancel_delayed_work_sync(&arvif->connection_loss_work);
2811 }
2812 
2813 static int ath10k_station_assoc(struct ath10k *ar,
2814 				struct ieee80211_vif *vif,
2815 				struct ieee80211_sta *sta,
2816 				bool reassoc)
2817 {
2818 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2819 	struct wmi_peer_assoc_complete_arg peer_arg;
2820 	int ret = 0;
2821 
2822 	lockdep_assert_held(&ar->conf_mutex);
2823 
2824 	ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2825 	if (ret) {
2826 		ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2827 			    sta->addr, arvif->vdev_id, ret);
2828 		return ret;
2829 	}
2830 
2831 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2832 	if (ret) {
2833 		ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2834 			    sta->addr, arvif->vdev_id, ret);
2835 		return ret;
2836 	}
2837 
2838 	/* Re-assoc is run only to update supported rates for given station. It
2839 	 * doesn't make much sense to reconfigure the peer completely.
2840 	 */
2841 	if (!reassoc) {
2842 		ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2843 					     &sta->ht_cap);
2844 		if (ret) {
2845 			ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2846 				    arvif->vdev_id, ret);
2847 			return ret;
2848 		}
2849 
2850 		ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2851 		if (ret) {
2852 			ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2853 				    sta->addr, arvif->vdev_id, ret);
2854 			return ret;
2855 		}
2856 
2857 		if (!sta->wme) {
2858 			arvif->num_legacy_stations++;
2859 			ret  = ath10k_recalc_rtscts_prot(arvif);
2860 			if (ret) {
2861 				ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2862 					    arvif->vdev_id, ret);
2863 				return ret;
2864 			}
2865 		}
2866 
2867 		/* Plumb cached keys only for static WEP */
2868 		if (arvif->def_wep_key_idx != -1) {
2869 			ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2870 			if (ret) {
2871 				ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2872 					    arvif->vdev_id, ret);
2873 				return ret;
2874 			}
2875 		}
2876 	}
2877 
2878 	return ret;
2879 }
2880 
2881 static int ath10k_station_disassoc(struct ath10k *ar,
2882 				   struct ieee80211_vif *vif,
2883 				   struct ieee80211_sta *sta)
2884 {
2885 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2886 	int ret = 0;
2887 
2888 	lockdep_assert_held(&ar->conf_mutex);
2889 
2890 	if (!sta->wme) {
2891 		arvif->num_legacy_stations--;
2892 		ret = ath10k_recalc_rtscts_prot(arvif);
2893 		if (ret) {
2894 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2895 				    arvif->vdev_id, ret);
2896 			return ret;
2897 		}
2898 	}
2899 
2900 	ret = ath10k_clear_peer_keys(arvif, sta->addr);
2901 	if (ret) {
2902 		ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
2903 			    arvif->vdev_id, ret);
2904 		return ret;
2905 	}
2906 
2907 	return ret;
2908 }
2909 
2910 /**************/
2911 /* Regulatory */
2912 /**************/
2913 
2914 static int ath10k_update_channel_list(struct ath10k *ar)
2915 {
2916 	struct ieee80211_hw *hw = ar->hw;
2917 	struct ieee80211_supported_band **bands;
2918 	enum nl80211_band band;
2919 	struct ieee80211_channel *channel;
2920 	struct wmi_scan_chan_list_arg arg = {0};
2921 	struct wmi_channel_arg *ch;
2922 	bool passive;
2923 	int len;
2924 	int ret;
2925 	int i;
2926 
2927 	lockdep_assert_held(&ar->conf_mutex);
2928 
2929 	bands = hw->wiphy->bands;
2930 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
2931 		if (!bands[band])
2932 			continue;
2933 
2934 		for (i = 0; i < bands[band]->n_channels; i++) {
2935 			if (bands[band]->channels[i].flags &
2936 			    IEEE80211_CHAN_DISABLED)
2937 				continue;
2938 
2939 			arg.n_channels++;
2940 		}
2941 	}
2942 
2943 	len = sizeof(struct wmi_channel_arg) * arg.n_channels;
2944 	arg.channels = kzalloc(len, GFP_KERNEL);
2945 	if (!arg.channels)
2946 		return -ENOMEM;
2947 
2948 	ch = arg.channels;
2949 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
2950 		if (!bands[band])
2951 			continue;
2952 
2953 		for (i = 0; i < bands[band]->n_channels; i++) {
2954 			channel = &bands[band]->channels[i];
2955 
2956 			if (channel->flags & IEEE80211_CHAN_DISABLED)
2957 				continue;
2958 
2959 			ch->allow_ht = true;
2960 
2961 			/* FIXME: when should we really allow VHT? */
2962 			ch->allow_vht = true;
2963 
2964 			ch->allow_ibss =
2965 				!(channel->flags & IEEE80211_CHAN_NO_IR);
2966 
2967 			ch->ht40plus =
2968 				!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
2969 
2970 			ch->chan_radar =
2971 				!!(channel->flags & IEEE80211_CHAN_RADAR);
2972 
2973 			passive = channel->flags & IEEE80211_CHAN_NO_IR;
2974 			ch->passive = passive;
2975 
2976 			ch->freq = channel->center_freq;
2977 			ch->band_center_freq1 = channel->center_freq;
2978 			ch->min_power = 0;
2979 			ch->max_power = channel->max_power * 2;
2980 			ch->max_reg_power = channel->max_reg_power * 2;
2981 			ch->max_antenna_gain = channel->max_antenna_gain * 2;
2982 			ch->reg_class_id = 0; /* FIXME */
2983 
2984 			/* FIXME: why use only legacy modes, why not any
2985 			 * HT/VHT modes? Would that even make any
2986 			 * difference? */
2987 			if (channel->band == NL80211_BAND_2GHZ)
2988 				ch->mode = MODE_11G;
2989 			else
2990 				ch->mode = MODE_11A;
2991 
2992 			if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
2993 				continue;
2994 
2995 			ath10k_dbg(ar, ATH10K_DBG_WMI,
2996 				   "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
2997 				    ch - arg.channels, arg.n_channels,
2998 				   ch->freq, ch->max_power, ch->max_reg_power,
2999 				   ch->max_antenna_gain, ch->mode);
3000 
3001 			ch++;
3002 		}
3003 	}
3004 
3005 	ret = ath10k_wmi_scan_chan_list(ar, &arg);
3006 	kfree(arg.channels);
3007 
3008 	return ret;
3009 }
3010 
3011 static enum wmi_dfs_region
3012 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3013 {
3014 	switch (dfs_region) {
3015 	case NL80211_DFS_UNSET:
3016 		return WMI_UNINIT_DFS_DOMAIN;
3017 	case NL80211_DFS_FCC:
3018 		return WMI_FCC_DFS_DOMAIN;
3019 	case NL80211_DFS_ETSI:
3020 		return WMI_ETSI_DFS_DOMAIN;
3021 	case NL80211_DFS_JP:
3022 		return WMI_MKK4_DFS_DOMAIN;
3023 	}
3024 	return WMI_UNINIT_DFS_DOMAIN;
3025 }
3026 
3027 static void ath10k_regd_update(struct ath10k *ar)
3028 {
3029 	struct reg_dmn_pair_mapping *regpair;
3030 	int ret;
3031 	enum wmi_dfs_region wmi_dfs_reg;
3032 	enum nl80211_dfs_regions nl_dfs_reg;
3033 
3034 	lockdep_assert_held(&ar->conf_mutex);
3035 
3036 	ret = ath10k_update_channel_list(ar);
3037 	if (ret)
3038 		ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3039 
3040 	regpair = ar->ath_common.regulatory.regpair;
3041 
3042 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3043 		nl_dfs_reg = ar->dfs_detector->region;
3044 		wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3045 	} else {
3046 		wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3047 	}
3048 
3049 	/* Target allows setting up per-band regdomain but ath_common provides
3050 	 * a combined one only */
3051 	ret = ath10k_wmi_pdev_set_regdomain(ar,
3052 					    regpair->reg_domain,
3053 					    regpair->reg_domain, /* 2ghz */
3054 					    regpair->reg_domain, /* 5ghz */
3055 					    regpair->reg_2ghz_ctl,
3056 					    regpair->reg_5ghz_ctl,
3057 					    wmi_dfs_reg);
3058 	if (ret)
3059 		ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3060 }
3061 
3062 static void ath10k_reg_notifier(struct wiphy *wiphy,
3063 				struct regulatory_request *request)
3064 {
3065 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3066 	struct ath10k *ar = hw->priv;
3067 	bool result;
3068 
3069 	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3070 
3071 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3072 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3073 			   request->dfs_region);
3074 		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3075 							  request->dfs_region);
3076 		if (!result)
3077 			ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3078 				    request->dfs_region);
3079 	}
3080 
3081 	mutex_lock(&ar->conf_mutex);
3082 	if (ar->state == ATH10K_STATE_ON)
3083 		ath10k_regd_update(ar);
3084 	mutex_unlock(&ar->conf_mutex);
3085 }
3086 
3087 /***************/
3088 /* TX handlers */
3089 /***************/
3090 
3091 enum ath10k_mac_tx_path {
3092 	ATH10K_MAC_TX_HTT,
3093 	ATH10K_MAC_TX_HTT_MGMT,
3094 	ATH10K_MAC_TX_WMI_MGMT,
3095 	ATH10K_MAC_TX_UNKNOWN,
3096 };
3097 
3098 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3099 {
3100 	lockdep_assert_held(&ar->htt.tx_lock);
3101 
3102 	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3103 	ar->tx_paused |= BIT(reason);
3104 	ieee80211_stop_queues(ar->hw);
3105 }
3106 
3107 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3108 				      struct ieee80211_vif *vif)
3109 {
3110 	struct ath10k *ar = data;
3111 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3112 
3113 	if (arvif->tx_paused)
3114 		return;
3115 
3116 	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3117 }
3118 
3119 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3120 {
3121 	lockdep_assert_held(&ar->htt.tx_lock);
3122 
3123 	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3124 	ar->tx_paused &= ~BIT(reason);
3125 
3126 	if (ar->tx_paused)
3127 		return;
3128 
3129 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3130 						   IEEE80211_IFACE_ITER_RESUME_ALL,
3131 						   ath10k_mac_tx_unlock_iter,
3132 						   ar);
3133 
3134 	ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3135 }
3136 
3137 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3138 {
3139 	struct ath10k *ar = arvif->ar;
3140 
3141 	lockdep_assert_held(&ar->htt.tx_lock);
3142 
3143 	WARN_ON(reason >= BITS_PER_LONG);
3144 	arvif->tx_paused |= BIT(reason);
3145 	ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3146 }
3147 
3148 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3149 {
3150 	struct ath10k *ar = arvif->ar;
3151 
3152 	lockdep_assert_held(&ar->htt.tx_lock);
3153 
3154 	WARN_ON(reason >= BITS_PER_LONG);
3155 	arvif->tx_paused &= ~BIT(reason);
3156 
3157 	if (ar->tx_paused)
3158 		return;
3159 
3160 	if (arvif->tx_paused)
3161 		return;
3162 
3163 	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3164 }
3165 
3166 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3167 					   enum wmi_tlv_tx_pause_id pause_id,
3168 					   enum wmi_tlv_tx_pause_action action)
3169 {
3170 	struct ath10k *ar = arvif->ar;
3171 
3172 	lockdep_assert_held(&ar->htt.tx_lock);
3173 
3174 	switch (action) {
3175 	case WMI_TLV_TX_PAUSE_ACTION_STOP:
3176 		ath10k_mac_vif_tx_lock(arvif, pause_id);
3177 		break;
3178 	case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3179 		ath10k_mac_vif_tx_unlock(arvif, pause_id);
3180 		break;
3181 	default:
3182 		ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
3183 			    action, arvif->vdev_id);
3184 		break;
3185 	}
3186 }
3187 
3188 struct ath10k_mac_tx_pause {
3189 	u32 vdev_id;
3190 	enum wmi_tlv_tx_pause_id pause_id;
3191 	enum wmi_tlv_tx_pause_action action;
3192 };
3193 
3194 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3195 					    struct ieee80211_vif *vif)
3196 {
3197 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3198 	struct ath10k_mac_tx_pause *arg = data;
3199 
3200 	if (arvif->vdev_id != arg->vdev_id)
3201 		return;
3202 
3203 	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3204 }
3205 
3206 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3207 				     enum wmi_tlv_tx_pause_id pause_id,
3208 				     enum wmi_tlv_tx_pause_action action)
3209 {
3210 	struct ath10k_mac_tx_pause arg = {
3211 		.vdev_id = vdev_id,
3212 		.pause_id = pause_id,
3213 		.action = action,
3214 	};
3215 
3216 	spin_lock_bh(&ar->htt.tx_lock);
3217 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3218 						   IEEE80211_IFACE_ITER_RESUME_ALL,
3219 						   ath10k_mac_handle_tx_pause_iter,
3220 						   &arg);
3221 	spin_unlock_bh(&ar->htt.tx_lock);
3222 }
3223 
3224 static enum ath10k_hw_txrx_mode
3225 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3226 			   struct ieee80211_vif *vif,
3227 			   struct ieee80211_sta *sta,
3228 			   struct sk_buff *skb)
3229 {
3230 	const struct ieee80211_hdr *hdr = (void *)skb->data;
3231 	__le16 fc = hdr->frame_control;
3232 
3233 	if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3234 		return ATH10K_HW_TXRX_RAW;
3235 
3236 	if (ieee80211_is_mgmt(fc))
3237 		return ATH10K_HW_TXRX_MGMT;
3238 
3239 	/* Workaround:
3240 	 *
3241 	 * NullFunc frames are mostly used to ping if a client or AP are still
3242 	 * reachable and responsive. This implies tx status reports must be
3243 	 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3244 	 * come to a conclusion that the other end disappeared and tear down
3245 	 * BSS connection or it can never disconnect from BSS/client (which is
3246 	 * the case).
3247 	 *
3248 	 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3249 	 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3250 	 * which seems to deliver correct tx reports for NullFunc frames. The
3251 	 * downside of using it is it ignores client powersave state so it can
3252 	 * end up disconnecting sleeping clients in AP mode. It should fix STA
3253 	 * mode though because AP don't sleep.
3254 	 */
3255 	if (ar->htt.target_version_major < 3 &&
3256 	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3257 	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3258 		      ar->running_fw->fw_file.fw_features) &&
3259 	    !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
3260 		      ar->running_fw->fw_file.fw_features))
3261 		return ATH10K_HW_TXRX_MGMT;
3262 
3263 	/* Workaround:
3264 	 *
3265 	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3266 	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3267 	 * to work with Ethernet txmode so use it.
3268 	 *
3269 	 * FIXME: Check if raw mode works with TDLS.
3270 	 */
3271 	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3272 		return ATH10K_HW_TXRX_ETHERNET;
3273 
3274 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3275 		return ATH10K_HW_TXRX_RAW;
3276 
3277 	return ATH10K_HW_TXRX_NATIVE_WIFI;
3278 }
3279 
3280 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3281 				     struct sk_buff *skb)
3282 {
3283 	const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3284 	const struct ieee80211_hdr *hdr = (void *)skb->data;
3285 	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3286 			 IEEE80211_TX_CTL_INJECTED;
3287 
3288 	if (!ieee80211_has_protected(hdr->frame_control))
3289 		return false;
3290 
3291 	if ((info->flags & mask) == mask)
3292 		return false;
3293 
3294 	if (vif)
3295 		return !ath10k_vif_to_arvif(vif)->nohwcrypt;
3296 
3297 	return true;
3298 }
3299 
3300 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3301  * Control in the header.
3302  */
3303 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3304 {
3305 	struct ieee80211_hdr *hdr = (void *)skb->data;
3306 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3307 	u8 *qos_ctl;
3308 
3309 	if (!ieee80211_is_data_qos(hdr->frame_control))
3310 		return;
3311 
3312 	qos_ctl = ieee80211_get_qos_ctl(hdr);
3313 	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3314 		skb->data, (void *)qos_ctl - (void *)skb->data);
3315 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3316 
3317 	/* Some firmware revisions don't handle sending QoS NullFunc well.
3318 	 * These frames are mainly used for CQM purposes so it doesn't really
3319 	 * matter whether QoS NullFunc or NullFunc are sent.
3320 	 */
3321 	hdr = (void *)skb->data;
3322 	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3323 		cb->flags &= ~ATH10K_SKB_F_QOS;
3324 
3325 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3326 }
3327 
3328 static void ath10k_tx_h_8023(struct sk_buff *skb)
3329 {
3330 	struct ieee80211_hdr *hdr;
3331 	struct rfc1042_hdr *rfc1042;
3332 	struct ethhdr *eth;
3333 	size_t hdrlen;
3334 	u8 da[ETH_ALEN];
3335 	u8 sa[ETH_ALEN];
3336 	__be16 type;
3337 
3338 	hdr = (void *)skb->data;
3339 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
3340 	rfc1042 = (void *)skb->data + hdrlen;
3341 
3342 	ether_addr_copy(da, ieee80211_get_DA(hdr));
3343 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
3344 	type = rfc1042->snap_type;
3345 
3346 	skb_pull(skb, hdrlen + sizeof(*rfc1042));
3347 	skb_push(skb, sizeof(*eth));
3348 
3349 	eth = (void *)skb->data;
3350 	ether_addr_copy(eth->h_dest, da);
3351 	ether_addr_copy(eth->h_source, sa);
3352 	eth->h_proto = type;
3353 }
3354 
3355 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3356 				       struct ieee80211_vif *vif,
3357 				       struct sk_buff *skb)
3358 {
3359 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3360 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3361 
3362 	/* This is case only for P2P_GO */
3363 	if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3364 		return;
3365 
3366 	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3367 		spin_lock_bh(&ar->data_lock);
3368 		if (arvif->u.ap.noa_data)
3369 			if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3370 					      GFP_ATOMIC))
3371 				memcpy(skb_put(skb, arvif->u.ap.noa_len),
3372 				       arvif->u.ap.noa_data,
3373 				       arvif->u.ap.noa_len);
3374 		spin_unlock_bh(&ar->data_lock);
3375 	}
3376 }
3377 
3378 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3379 				    struct ieee80211_vif *vif,
3380 				    struct ieee80211_txq *txq,
3381 				    struct sk_buff *skb)
3382 {
3383 	struct ieee80211_hdr *hdr = (void *)skb->data;
3384 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3385 
3386 	cb->flags = 0;
3387 	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3388 		cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3389 
3390 	if (ieee80211_is_mgmt(hdr->frame_control))
3391 		cb->flags |= ATH10K_SKB_F_MGMT;
3392 
3393 	if (ieee80211_is_data_qos(hdr->frame_control))
3394 		cb->flags |= ATH10K_SKB_F_QOS;
3395 
3396 	cb->vif = vif;
3397 	cb->txq = txq;
3398 }
3399 
3400 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3401 {
3402 	/* FIXME: Not really sure since when the behaviour changed. At some
3403 	 * point new firmware stopped requiring creation of peer entries for
3404 	 * offchannel tx (and actually creating them causes issues with wmi-htc
3405 	 * tx credit replenishment and reliability). Assuming it's at least 3.4
3406 	 * because that's when the `freq` was introduced to TX_FRM HTT command.
3407 	 */
3408 	return (ar->htt.target_version_major >= 3 &&
3409 		ar->htt.target_version_minor >= 4 &&
3410 		ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3411 }
3412 
3413 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3414 {
3415 	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3416 	int ret = 0;
3417 
3418 	spin_lock_bh(&ar->data_lock);
3419 
3420 	if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3421 		ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3422 		ret = -ENOSPC;
3423 		goto unlock;
3424 	}
3425 
3426 	__skb_queue_tail(q, skb);
3427 	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3428 
3429 unlock:
3430 	spin_unlock_bh(&ar->data_lock);
3431 
3432 	return ret;
3433 }
3434 
3435 static enum ath10k_mac_tx_path
3436 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3437 			   struct sk_buff *skb,
3438 			   enum ath10k_hw_txrx_mode txmode)
3439 {
3440 	switch (txmode) {
3441 	case ATH10K_HW_TXRX_RAW:
3442 	case ATH10K_HW_TXRX_NATIVE_WIFI:
3443 	case ATH10K_HW_TXRX_ETHERNET:
3444 		return ATH10K_MAC_TX_HTT;
3445 	case ATH10K_HW_TXRX_MGMT:
3446 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3447 			     ar->running_fw->fw_file.fw_features))
3448 			return ATH10K_MAC_TX_WMI_MGMT;
3449 		else if (ar->htt.target_version_major >= 3)
3450 			return ATH10K_MAC_TX_HTT;
3451 		else
3452 			return ATH10K_MAC_TX_HTT_MGMT;
3453 	}
3454 
3455 	return ATH10K_MAC_TX_UNKNOWN;
3456 }
3457 
3458 static int ath10k_mac_tx_submit(struct ath10k *ar,
3459 				enum ath10k_hw_txrx_mode txmode,
3460 				enum ath10k_mac_tx_path txpath,
3461 				struct sk_buff *skb)
3462 {
3463 	struct ath10k_htt *htt = &ar->htt;
3464 	int ret = -EINVAL;
3465 
3466 	switch (txpath) {
3467 	case ATH10K_MAC_TX_HTT:
3468 		ret = ath10k_htt_tx(htt, txmode, skb);
3469 		break;
3470 	case ATH10K_MAC_TX_HTT_MGMT:
3471 		ret = ath10k_htt_mgmt_tx(htt, skb);
3472 		break;
3473 	case ATH10K_MAC_TX_WMI_MGMT:
3474 		ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3475 		break;
3476 	case ATH10K_MAC_TX_UNKNOWN:
3477 		WARN_ON_ONCE(1);
3478 		ret = -EINVAL;
3479 		break;
3480 	}
3481 
3482 	if (ret) {
3483 		ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3484 			    ret);
3485 		ieee80211_free_txskb(ar->hw, skb);
3486 	}
3487 
3488 	return ret;
3489 }
3490 
3491 /* This function consumes the sk_buff regardless of return value as far as
3492  * caller is concerned so no freeing is necessary afterwards.
3493  */
3494 static int ath10k_mac_tx(struct ath10k *ar,
3495 			 struct ieee80211_vif *vif,
3496 			 struct ieee80211_sta *sta,
3497 			 enum ath10k_hw_txrx_mode txmode,
3498 			 enum ath10k_mac_tx_path txpath,
3499 			 struct sk_buff *skb)
3500 {
3501 	struct ieee80211_hw *hw = ar->hw;
3502 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3503 	int ret;
3504 
3505 	/* We should disable CCK RATE due to P2P */
3506 	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3507 		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3508 
3509 	switch (txmode) {
3510 	case ATH10K_HW_TXRX_MGMT:
3511 	case ATH10K_HW_TXRX_NATIVE_WIFI:
3512 		ath10k_tx_h_nwifi(hw, skb);
3513 		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3514 		ath10k_tx_h_seq_no(vif, skb);
3515 		break;
3516 	case ATH10K_HW_TXRX_ETHERNET:
3517 		ath10k_tx_h_8023(skb);
3518 		break;
3519 	case ATH10K_HW_TXRX_RAW:
3520 		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3521 			WARN_ON_ONCE(1);
3522 			ieee80211_free_txskb(hw, skb);
3523 			return -ENOTSUPP;
3524 		}
3525 	}
3526 
3527 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3528 		if (!ath10k_mac_tx_frm_has_freq(ar)) {
3529 			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
3530 				   skb);
3531 
3532 			skb_queue_tail(&ar->offchan_tx_queue, skb);
3533 			ieee80211_queue_work(hw, &ar->offchan_tx_work);
3534 			return 0;
3535 		}
3536 	}
3537 
3538 	ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3539 	if (ret) {
3540 		ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3541 		return ret;
3542 	}
3543 
3544 	return 0;
3545 }
3546 
3547 void ath10k_offchan_tx_purge(struct ath10k *ar)
3548 {
3549 	struct sk_buff *skb;
3550 
3551 	for (;;) {
3552 		skb = skb_dequeue(&ar->offchan_tx_queue);
3553 		if (!skb)
3554 			break;
3555 
3556 		ieee80211_free_txskb(ar->hw, skb);
3557 	}
3558 }
3559 
3560 void ath10k_offchan_tx_work(struct work_struct *work)
3561 {
3562 	struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3563 	struct ath10k_peer *peer;
3564 	struct ath10k_vif *arvif;
3565 	enum ath10k_hw_txrx_mode txmode;
3566 	enum ath10k_mac_tx_path txpath;
3567 	struct ieee80211_hdr *hdr;
3568 	struct ieee80211_vif *vif;
3569 	struct ieee80211_sta *sta;
3570 	struct sk_buff *skb;
3571 	const u8 *peer_addr;
3572 	int vdev_id;
3573 	int ret;
3574 	unsigned long time_left;
3575 	bool tmp_peer_created = false;
3576 
3577 	/* FW requirement: We must create a peer before FW will send out
3578 	 * an offchannel frame. Otherwise the frame will be stuck and
3579 	 * never transmitted. We delete the peer upon tx completion.
3580 	 * It is unlikely that a peer for offchannel tx will already be
3581 	 * present. However it may be in some rare cases so account for that.
3582 	 * Otherwise we might remove a legitimate peer and break stuff. */
3583 
3584 	for (;;) {
3585 		skb = skb_dequeue(&ar->offchan_tx_queue);
3586 		if (!skb)
3587 			break;
3588 
3589 		mutex_lock(&ar->conf_mutex);
3590 
3591 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
3592 			   skb);
3593 
3594 		hdr = (struct ieee80211_hdr *)skb->data;
3595 		peer_addr = ieee80211_get_DA(hdr);
3596 
3597 		spin_lock_bh(&ar->data_lock);
3598 		vdev_id = ar->scan.vdev_id;
3599 		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3600 		spin_unlock_bh(&ar->data_lock);
3601 
3602 		if (peer)
3603 			/* FIXME: should this use ath10k_warn()? */
3604 			ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3605 				   peer_addr, vdev_id);
3606 
3607 		if (!peer) {
3608 			ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3609 						 peer_addr,
3610 						 WMI_PEER_TYPE_DEFAULT);
3611 			if (ret)
3612 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3613 					    peer_addr, vdev_id, ret);
3614 			tmp_peer_created = (ret == 0);
3615 		}
3616 
3617 		spin_lock_bh(&ar->data_lock);
3618 		reinit_completion(&ar->offchan_tx_completed);
3619 		ar->offchan_tx_skb = skb;
3620 		spin_unlock_bh(&ar->data_lock);
3621 
3622 		/* It's safe to access vif and sta - conf_mutex guarantees that
3623 		 * sta_state() and remove_interface() are locked exclusively
3624 		 * out wrt to this offchannel worker.
3625 		 */
3626 		arvif = ath10k_get_arvif(ar, vdev_id);
3627 		if (arvif) {
3628 			vif = arvif->vif;
3629 			sta = ieee80211_find_sta(vif, peer_addr);
3630 		} else {
3631 			vif = NULL;
3632 			sta = NULL;
3633 		}
3634 
3635 		txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3636 		txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3637 
3638 		ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3639 		if (ret) {
3640 			ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3641 				    ret);
3642 			/* not serious */
3643 		}
3644 
3645 		time_left =
3646 		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3647 		if (time_left == 0)
3648 			ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
3649 				    skb);
3650 
3651 		if (!peer && tmp_peer_created) {
3652 			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3653 			if (ret)
3654 				ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3655 					    peer_addr, vdev_id, ret);
3656 		}
3657 
3658 		mutex_unlock(&ar->conf_mutex);
3659 	}
3660 }
3661 
3662 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3663 {
3664 	struct sk_buff *skb;
3665 
3666 	for (;;) {
3667 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3668 		if (!skb)
3669 			break;
3670 
3671 		ieee80211_free_txskb(ar->hw, skb);
3672 	}
3673 }
3674 
3675 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3676 {
3677 	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3678 	struct sk_buff *skb;
3679 	int ret;
3680 
3681 	for (;;) {
3682 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3683 		if (!skb)
3684 			break;
3685 
3686 		ret = ath10k_wmi_mgmt_tx(ar, skb);
3687 		if (ret) {
3688 			ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3689 				    ret);
3690 			ieee80211_free_txskb(ar->hw, skb);
3691 		}
3692 	}
3693 }
3694 
3695 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3696 {
3697 	struct ath10k_txq *artxq;
3698 
3699 	if (!txq)
3700 		return;
3701 
3702 	artxq = (void *)txq->drv_priv;
3703 	INIT_LIST_HEAD(&artxq->list);
3704 }
3705 
3706 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3707 {
3708 	struct ath10k_txq *artxq;
3709 	struct ath10k_skb_cb *cb;
3710 	struct sk_buff *msdu;
3711 	int msdu_id;
3712 
3713 	if (!txq)
3714 		return;
3715 
3716 	artxq = (void *)txq->drv_priv;
3717 	spin_lock_bh(&ar->txqs_lock);
3718 	if (!list_empty(&artxq->list))
3719 		list_del_init(&artxq->list);
3720 	spin_unlock_bh(&ar->txqs_lock);
3721 
3722 	spin_lock_bh(&ar->htt.tx_lock);
3723 	idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3724 		cb = ATH10K_SKB_CB(msdu);
3725 		if (cb->txq == txq)
3726 			cb->txq = NULL;
3727 	}
3728 	spin_unlock_bh(&ar->htt.tx_lock);
3729 }
3730 
3731 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3732 					    u16 peer_id,
3733 					    u8 tid)
3734 {
3735 	struct ath10k_peer *peer;
3736 
3737 	lockdep_assert_held(&ar->data_lock);
3738 
3739 	peer = ar->peer_map[peer_id];
3740 	if (!peer)
3741 		return NULL;
3742 
3743 	if (peer->sta)
3744 		return peer->sta->txq[tid];
3745 	else if (peer->vif)
3746 		return peer->vif->txq;
3747 	else
3748 		return NULL;
3749 }
3750 
3751 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3752 				   struct ieee80211_txq *txq)
3753 {
3754 	struct ath10k *ar = hw->priv;
3755 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3756 
3757 	/* No need to get locks */
3758 
3759 	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3760 		return true;
3761 
3762 	if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3763 		return true;
3764 
3765 	if (artxq->num_fw_queued < artxq->num_push_allowed)
3766 		return true;
3767 
3768 	return false;
3769 }
3770 
3771 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3772 			   struct ieee80211_txq *txq)
3773 {
3774 	struct ath10k *ar = hw->priv;
3775 	struct ath10k_htt *htt = &ar->htt;
3776 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3777 	struct ieee80211_vif *vif = txq->vif;
3778 	struct ieee80211_sta *sta = txq->sta;
3779 	enum ath10k_hw_txrx_mode txmode;
3780 	enum ath10k_mac_tx_path txpath;
3781 	struct sk_buff *skb;
3782 	struct ieee80211_hdr *hdr;
3783 	size_t skb_len;
3784 	bool is_mgmt, is_presp;
3785 	int ret;
3786 
3787 	spin_lock_bh(&ar->htt.tx_lock);
3788 	ret = ath10k_htt_tx_inc_pending(htt);
3789 	spin_unlock_bh(&ar->htt.tx_lock);
3790 
3791 	if (ret)
3792 		return ret;
3793 
3794 	skb = ieee80211_tx_dequeue(hw, txq);
3795 	if (!skb) {
3796 		spin_lock_bh(&ar->htt.tx_lock);
3797 		ath10k_htt_tx_dec_pending(htt);
3798 		spin_unlock_bh(&ar->htt.tx_lock);
3799 
3800 		return -ENOENT;
3801 	}
3802 
3803 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3804 
3805 	skb_len = skb->len;
3806 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3807 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3808 	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
3809 
3810 	if (is_mgmt) {
3811 		hdr = (struct ieee80211_hdr *)skb->data;
3812 		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3813 
3814 		spin_lock_bh(&ar->htt.tx_lock);
3815 		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
3816 
3817 		if (ret) {
3818 			ath10k_htt_tx_dec_pending(htt);
3819 			spin_unlock_bh(&ar->htt.tx_lock);
3820 			return ret;
3821 		}
3822 		spin_unlock_bh(&ar->htt.tx_lock);
3823 	}
3824 
3825 	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3826 	if (unlikely(ret)) {
3827 		ath10k_warn(ar, "failed to push frame: %d\n", ret);
3828 
3829 		spin_lock_bh(&ar->htt.tx_lock);
3830 		ath10k_htt_tx_dec_pending(htt);
3831 		if (is_mgmt)
3832 			ath10k_htt_tx_mgmt_dec_pending(htt);
3833 		spin_unlock_bh(&ar->htt.tx_lock);
3834 
3835 		return ret;
3836 	}
3837 
3838 	spin_lock_bh(&ar->htt.tx_lock);
3839 	artxq->num_fw_queued++;
3840 	spin_unlock_bh(&ar->htt.tx_lock);
3841 
3842 	return skb_len;
3843 }
3844 
3845 void ath10k_mac_tx_push_pending(struct ath10k *ar)
3846 {
3847 	struct ieee80211_hw *hw = ar->hw;
3848 	struct ieee80211_txq *txq;
3849 	struct ath10k_txq *artxq;
3850 	struct ath10k_txq *last;
3851 	int ret;
3852 	int max;
3853 
3854 	if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
3855 		return;
3856 
3857 	spin_lock_bh(&ar->txqs_lock);
3858 	rcu_read_lock();
3859 
3860 	last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3861 	while (!list_empty(&ar->txqs)) {
3862 		artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3863 		txq = container_of((void *)artxq, struct ieee80211_txq,
3864 				   drv_priv);
3865 
3866 		/* Prevent aggressive sta/tid taking over tx queue */
3867 		max = 16;
3868 		ret = 0;
3869 		while (ath10k_mac_tx_can_push(hw, txq) && max--) {
3870 			ret = ath10k_mac_tx_push_txq(hw, txq);
3871 			if (ret < 0)
3872 				break;
3873 		}
3874 
3875 		list_del_init(&artxq->list);
3876 		if (ret != -ENOENT)
3877 			list_add_tail(&artxq->list, &ar->txqs);
3878 
3879 		ath10k_htt_tx_txq_update(hw, txq);
3880 
3881 		if (artxq == last || (ret < 0 && ret != -ENOENT))
3882 			break;
3883 	}
3884 
3885 	rcu_read_unlock();
3886 	spin_unlock_bh(&ar->txqs_lock);
3887 }
3888 
3889 /************/
3890 /* Scanning */
3891 /************/
3892 
3893 void __ath10k_scan_finish(struct ath10k *ar)
3894 {
3895 	lockdep_assert_held(&ar->data_lock);
3896 
3897 	switch (ar->scan.state) {
3898 	case ATH10K_SCAN_IDLE:
3899 		break;
3900 	case ATH10K_SCAN_RUNNING:
3901 	case ATH10K_SCAN_ABORTING:
3902 		if (!ar->scan.is_roc) {
3903 			struct cfg80211_scan_info info = {
3904 				.aborted = (ar->scan.state ==
3905 					    ATH10K_SCAN_ABORTING),
3906 			};
3907 
3908 			ieee80211_scan_completed(ar->hw, &info);
3909 		} else if (ar->scan.roc_notify) {
3910 			ieee80211_remain_on_channel_expired(ar->hw);
3911 		}
3912 		/* fall through */
3913 	case ATH10K_SCAN_STARTING:
3914 		ar->scan.state = ATH10K_SCAN_IDLE;
3915 		ar->scan_channel = NULL;
3916 		ar->scan.roc_freq = 0;
3917 		ath10k_offchan_tx_purge(ar);
3918 		cancel_delayed_work(&ar->scan.timeout);
3919 		complete(&ar->scan.completed);
3920 		break;
3921 	}
3922 }
3923 
3924 void ath10k_scan_finish(struct ath10k *ar)
3925 {
3926 	spin_lock_bh(&ar->data_lock);
3927 	__ath10k_scan_finish(ar);
3928 	spin_unlock_bh(&ar->data_lock);
3929 }
3930 
3931 static int ath10k_scan_stop(struct ath10k *ar)
3932 {
3933 	struct wmi_stop_scan_arg arg = {
3934 		.req_id = 1, /* FIXME */
3935 		.req_type = WMI_SCAN_STOP_ONE,
3936 		.u.scan_id = ATH10K_SCAN_ID,
3937 	};
3938 	int ret;
3939 
3940 	lockdep_assert_held(&ar->conf_mutex);
3941 
3942 	ret = ath10k_wmi_stop_scan(ar, &arg);
3943 	if (ret) {
3944 		ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
3945 		goto out;
3946 	}
3947 
3948 	ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
3949 	if (ret == 0) {
3950 		ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
3951 		ret = -ETIMEDOUT;
3952 	} else if (ret > 0) {
3953 		ret = 0;
3954 	}
3955 
3956 out:
3957 	/* Scan state should be updated upon scan completion but in case
3958 	 * firmware fails to deliver the event (for whatever reason) it is
3959 	 * desired to clean up scan state anyway. Firmware may have just
3960 	 * dropped the scan completion event delivery due to transport pipe
3961 	 * being overflown with data and/or it can recover on its own before
3962 	 * next scan request is submitted.
3963 	 */
3964 	spin_lock_bh(&ar->data_lock);
3965 	if (ar->scan.state != ATH10K_SCAN_IDLE)
3966 		__ath10k_scan_finish(ar);
3967 	spin_unlock_bh(&ar->data_lock);
3968 
3969 	return ret;
3970 }
3971 
3972 static void ath10k_scan_abort(struct ath10k *ar)
3973 {
3974 	int ret;
3975 
3976 	lockdep_assert_held(&ar->conf_mutex);
3977 
3978 	spin_lock_bh(&ar->data_lock);
3979 
3980 	switch (ar->scan.state) {
3981 	case ATH10K_SCAN_IDLE:
3982 		/* This can happen if timeout worker kicked in and called
3983 		 * abortion while scan completion was being processed.
3984 		 */
3985 		break;
3986 	case ATH10K_SCAN_STARTING:
3987 	case ATH10K_SCAN_ABORTING:
3988 		ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
3989 			    ath10k_scan_state_str(ar->scan.state),
3990 			    ar->scan.state);
3991 		break;
3992 	case ATH10K_SCAN_RUNNING:
3993 		ar->scan.state = ATH10K_SCAN_ABORTING;
3994 		spin_unlock_bh(&ar->data_lock);
3995 
3996 		ret = ath10k_scan_stop(ar);
3997 		if (ret)
3998 			ath10k_warn(ar, "failed to abort scan: %d\n", ret);
3999 
4000 		spin_lock_bh(&ar->data_lock);
4001 		break;
4002 	}
4003 
4004 	spin_unlock_bh(&ar->data_lock);
4005 }
4006 
4007 void ath10k_scan_timeout_work(struct work_struct *work)
4008 {
4009 	struct ath10k *ar = container_of(work, struct ath10k,
4010 					 scan.timeout.work);
4011 
4012 	mutex_lock(&ar->conf_mutex);
4013 	ath10k_scan_abort(ar);
4014 	mutex_unlock(&ar->conf_mutex);
4015 }
4016 
4017 static int ath10k_start_scan(struct ath10k *ar,
4018 			     const struct wmi_start_scan_arg *arg)
4019 {
4020 	int ret;
4021 
4022 	lockdep_assert_held(&ar->conf_mutex);
4023 
4024 	ret = ath10k_wmi_start_scan(ar, arg);
4025 	if (ret)
4026 		return ret;
4027 
4028 	ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
4029 	if (ret == 0) {
4030 		ret = ath10k_scan_stop(ar);
4031 		if (ret)
4032 			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
4033 
4034 		return -ETIMEDOUT;
4035 	}
4036 
4037 	/* If we failed to start the scan, return error code at
4038 	 * this point.  This is probably due to some issue in the
4039 	 * firmware, but no need to wedge the driver due to that...
4040 	 */
4041 	spin_lock_bh(&ar->data_lock);
4042 	if (ar->scan.state == ATH10K_SCAN_IDLE) {
4043 		spin_unlock_bh(&ar->data_lock);
4044 		return -EINVAL;
4045 	}
4046 	spin_unlock_bh(&ar->data_lock);
4047 
4048 	return 0;
4049 }
4050 
4051 /**********************/
4052 /* mac80211 callbacks */
4053 /**********************/
4054 
4055 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4056 			     struct ieee80211_tx_control *control,
4057 			     struct sk_buff *skb)
4058 {
4059 	struct ath10k *ar = hw->priv;
4060 	struct ath10k_htt *htt = &ar->htt;
4061 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4062 	struct ieee80211_vif *vif = info->control.vif;
4063 	struct ieee80211_sta *sta = control->sta;
4064 	struct ieee80211_txq *txq = NULL;
4065 	struct ieee80211_hdr *hdr = (void *)skb->data;
4066 	enum ath10k_hw_txrx_mode txmode;
4067 	enum ath10k_mac_tx_path txpath;
4068 	bool is_htt;
4069 	bool is_mgmt;
4070 	bool is_presp;
4071 	int ret;
4072 
4073 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
4074 
4075 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4076 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4077 	is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4078 		  txpath == ATH10K_MAC_TX_HTT_MGMT);
4079 	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4080 
4081 	if (is_htt) {
4082 		spin_lock_bh(&ar->htt.tx_lock);
4083 		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4084 
4085 		ret = ath10k_htt_tx_inc_pending(htt);
4086 		if (ret) {
4087 			ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4088 				    ret);
4089 			spin_unlock_bh(&ar->htt.tx_lock);
4090 			ieee80211_free_txskb(ar->hw, skb);
4091 			return;
4092 		}
4093 
4094 		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4095 		if (ret) {
4096 			ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4097 				   ret);
4098 			ath10k_htt_tx_dec_pending(htt);
4099 			spin_unlock_bh(&ar->htt.tx_lock);
4100 			ieee80211_free_txskb(ar->hw, skb);
4101 			return;
4102 		}
4103 		spin_unlock_bh(&ar->htt.tx_lock);
4104 	}
4105 
4106 	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
4107 	if (ret) {
4108 		ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4109 		if (is_htt) {
4110 			spin_lock_bh(&ar->htt.tx_lock);
4111 			ath10k_htt_tx_dec_pending(htt);
4112 			if (is_mgmt)
4113 				ath10k_htt_tx_mgmt_dec_pending(htt);
4114 			spin_unlock_bh(&ar->htt.tx_lock);
4115 		}
4116 		return;
4117 	}
4118 }
4119 
4120 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4121 					struct ieee80211_txq *txq)
4122 {
4123 	struct ath10k *ar = hw->priv;
4124 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
4125 	struct ieee80211_txq *f_txq;
4126 	struct ath10k_txq *f_artxq;
4127 	int ret = 0;
4128 	int max = 16;
4129 
4130 	spin_lock_bh(&ar->txqs_lock);
4131 	if (list_empty(&artxq->list))
4132 		list_add_tail(&artxq->list, &ar->txqs);
4133 
4134 	f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
4135 	f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
4136 	list_del_init(&f_artxq->list);
4137 
4138 	while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
4139 		ret = ath10k_mac_tx_push_txq(hw, f_txq);
4140 		if (ret)
4141 			break;
4142 	}
4143 	if (ret != -ENOENT)
4144 		list_add_tail(&f_artxq->list, &ar->txqs);
4145 	spin_unlock_bh(&ar->txqs_lock);
4146 
4147 	ath10k_htt_tx_txq_update(hw, f_txq);
4148 	ath10k_htt_tx_txq_update(hw, txq);
4149 }
4150 
4151 /* Must not be called with conf_mutex held as workers can use that also. */
4152 void ath10k_drain_tx(struct ath10k *ar)
4153 {
4154 	/* make sure rcu-protected mac80211 tx path itself is drained */
4155 	synchronize_net();
4156 
4157 	ath10k_offchan_tx_purge(ar);
4158 	ath10k_mgmt_over_wmi_tx_purge(ar);
4159 
4160 	cancel_work_sync(&ar->offchan_tx_work);
4161 	cancel_work_sync(&ar->wmi_mgmt_tx_work);
4162 }
4163 
4164 void ath10k_halt(struct ath10k *ar)
4165 {
4166 	struct ath10k_vif *arvif;
4167 
4168 	lockdep_assert_held(&ar->conf_mutex);
4169 
4170 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4171 	ar->filter_flags = 0;
4172 	ar->monitor = false;
4173 	ar->monitor_arvif = NULL;
4174 
4175 	if (ar->monitor_started)
4176 		ath10k_monitor_stop(ar);
4177 
4178 	ar->monitor_started = false;
4179 	ar->tx_paused = 0;
4180 
4181 	ath10k_scan_finish(ar);
4182 	ath10k_peer_cleanup_all(ar);
4183 	ath10k_core_stop(ar);
4184 	ath10k_hif_power_down(ar);
4185 
4186 	spin_lock_bh(&ar->data_lock);
4187 	list_for_each_entry(arvif, &ar->arvifs, list)
4188 		ath10k_mac_vif_beacon_cleanup(arvif);
4189 	spin_unlock_bh(&ar->data_lock);
4190 }
4191 
4192 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4193 {
4194 	struct ath10k *ar = hw->priv;
4195 
4196 	mutex_lock(&ar->conf_mutex);
4197 
4198 	*tx_ant = ar->cfg_tx_chainmask;
4199 	*rx_ant = ar->cfg_rx_chainmask;
4200 
4201 	mutex_unlock(&ar->conf_mutex);
4202 
4203 	return 0;
4204 }
4205 
4206 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4207 {
4208 	/* It is not clear that allowing gaps in chainmask
4209 	 * is helpful.  Probably it will not do what user
4210 	 * is hoping for, so warn in that case.
4211 	 */
4212 	if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4213 		return;
4214 
4215 	ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x.  Suggested values: 15, 7, 3, 1 or 0.\n",
4216 		    dbg, cm);
4217 }
4218 
4219 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4220 {
4221 	int nsts = ar->vht_cap_info;
4222 
4223 	nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4224 	nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4225 
4226 	/* If firmware does not deliver to host number of space-time
4227 	 * streams supported, assume it support up to 4 BF STS and return
4228 	 * the value for VHT CAP: nsts-1)
4229 	 */
4230 	if (nsts == 0)
4231 		return 3;
4232 
4233 	return nsts;
4234 }
4235 
4236 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4237 {
4238 	int sound_dim = ar->vht_cap_info;
4239 
4240 	sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4241 	sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4242 
4243 	/* If the sounding dimension is not advertised by the firmware,
4244 	 * let's use a default value of 1
4245 	 */
4246 	if (sound_dim == 0)
4247 		return 1;
4248 
4249 	return sound_dim;
4250 }
4251 
4252 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4253 {
4254 	struct ieee80211_sta_vht_cap vht_cap = {0};
4255 	u16 mcs_map;
4256 	u32 val;
4257 	int i;
4258 
4259 	vht_cap.vht_supported = 1;
4260 	vht_cap.cap = ar->vht_cap_info;
4261 
4262 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4263 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4264 		val = ath10k_mac_get_vht_cap_bf_sts(ar);
4265 		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4266 		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4267 
4268 		vht_cap.cap |= val;
4269 	}
4270 
4271 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4272 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4273 		val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4274 		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4275 		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4276 
4277 		vht_cap.cap |= val;
4278 	}
4279 
4280 	mcs_map = 0;
4281 	for (i = 0; i < 8; i++) {
4282 		if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4283 			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4284 		else
4285 			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4286 	}
4287 
4288 	if (ar->cfg_tx_chainmask <= 1)
4289 		vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4290 
4291 	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4292 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4293 
4294 	return vht_cap;
4295 }
4296 
4297 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4298 {
4299 	int i;
4300 	struct ieee80211_sta_ht_cap ht_cap = {0};
4301 
4302 	if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4303 		return ht_cap;
4304 
4305 	ht_cap.ht_supported = 1;
4306 	ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4307 	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4308 	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4309 	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4310 	ht_cap.cap |=
4311 		WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4312 
4313 	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4314 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4315 
4316 	if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4317 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4318 
4319 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4320 		u32 smps;
4321 
4322 		smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
4323 		smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4324 
4325 		ht_cap.cap |= smps;
4326 	}
4327 
4328 	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4329 		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4330 
4331 	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4332 		u32 stbc;
4333 
4334 		stbc   = ar->ht_cap_info;
4335 		stbc  &= WMI_HT_CAP_RX_STBC;
4336 		stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4337 		stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4338 		stbc  &= IEEE80211_HT_CAP_RX_STBC;
4339 
4340 		ht_cap.cap |= stbc;
4341 	}
4342 
4343 	if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4344 		ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4345 
4346 	if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4347 		ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4348 
4349 	/* max AMSDU is implicitly taken from vht_cap_info */
4350 	if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4351 		ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4352 
4353 	for (i = 0; i < ar->num_rf_chains; i++) {
4354 		if (ar->cfg_rx_chainmask & BIT(i))
4355 			ht_cap.mcs.rx_mask[i] = 0xFF;
4356 	}
4357 
4358 	ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4359 
4360 	return ht_cap;
4361 }
4362 
4363 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4364 {
4365 	struct ieee80211_supported_band *band;
4366 	struct ieee80211_sta_vht_cap vht_cap;
4367 	struct ieee80211_sta_ht_cap ht_cap;
4368 
4369 	ht_cap = ath10k_get_ht_cap(ar);
4370 	vht_cap = ath10k_create_vht_cap(ar);
4371 
4372 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4373 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4374 		band->ht_cap = ht_cap;
4375 	}
4376 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4377 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4378 		band->ht_cap = ht_cap;
4379 		band->vht_cap = vht_cap;
4380 	}
4381 }
4382 
4383 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4384 {
4385 	int ret;
4386 
4387 	lockdep_assert_held(&ar->conf_mutex);
4388 
4389 	ath10k_check_chain_mask(ar, tx_ant, "tx");
4390 	ath10k_check_chain_mask(ar, rx_ant, "rx");
4391 
4392 	ar->cfg_tx_chainmask = tx_ant;
4393 	ar->cfg_rx_chainmask = rx_ant;
4394 
4395 	if ((ar->state != ATH10K_STATE_ON) &&
4396 	    (ar->state != ATH10K_STATE_RESTARTED))
4397 		return 0;
4398 
4399 	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4400 					tx_ant);
4401 	if (ret) {
4402 		ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4403 			    ret, tx_ant);
4404 		return ret;
4405 	}
4406 
4407 	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4408 					rx_ant);
4409 	if (ret) {
4410 		ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4411 			    ret, rx_ant);
4412 		return ret;
4413 	}
4414 
4415 	/* Reload HT/VHT capability */
4416 	ath10k_mac_setup_ht_vht_cap(ar);
4417 
4418 	return 0;
4419 }
4420 
4421 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4422 {
4423 	struct ath10k *ar = hw->priv;
4424 	int ret;
4425 
4426 	mutex_lock(&ar->conf_mutex);
4427 	ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4428 	mutex_unlock(&ar->conf_mutex);
4429 	return ret;
4430 }
4431 
4432 static int ath10k_start(struct ieee80211_hw *hw)
4433 {
4434 	struct ath10k *ar = hw->priv;
4435 	u32 param;
4436 	int ret = 0;
4437 
4438 	/*
4439 	 * This makes sense only when restarting hw. It is harmless to call
4440 	 * unconditionally. This is necessary to make sure no HTT/WMI tx
4441 	 * commands will be submitted while restarting.
4442 	 */
4443 	ath10k_drain_tx(ar);
4444 
4445 	mutex_lock(&ar->conf_mutex);
4446 
4447 	switch (ar->state) {
4448 	case ATH10K_STATE_OFF:
4449 		ar->state = ATH10K_STATE_ON;
4450 		break;
4451 	case ATH10K_STATE_RESTARTING:
4452 		ath10k_halt(ar);
4453 		ar->state = ATH10K_STATE_RESTARTED;
4454 		break;
4455 	case ATH10K_STATE_ON:
4456 	case ATH10K_STATE_RESTARTED:
4457 	case ATH10K_STATE_WEDGED:
4458 		WARN_ON(1);
4459 		ret = -EINVAL;
4460 		goto err;
4461 	case ATH10K_STATE_UTF:
4462 		ret = -EBUSY;
4463 		goto err;
4464 	}
4465 
4466 	ret = ath10k_hif_power_up(ar);
4467 	if (ret) {
4468 		ath10k_err(ar, "Could not init hif: %d\n", ret);
4469 		goto err_off;
4470 	}
4471 
4472 	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4473 				&ar->normal_mode_fw);
4474 	if (ret) {
4475 		ath10k_err(ar, "Could not init core: %d\n", ret);
4476 		goto err_power_down;
4477 	}
4478 
4479 	param = ar->wmi.pdev_param->pmf_qos;
4480 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4481 	if (ret) {
4482 		ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4483 		goto err_core_stop;
4484 	}
4485 
4486 	param = ar->wmi.pdev_param->dynamic_bw;
4487 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4488 	if (ret) {
4489 		ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4490 		goto err_core_stop;
4491 	}
4492 
4493 	if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4494 		ret = ath10k_wmi_adaptive_qcs(ar, true);
4495 		if (ret) {
4496 			ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4497 				    ret);
4498 			goto err_core_stop;
4499 		}
4500 	}
4501 
4502 	if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4503 		param = ar->wmi.pdev_param->burst_enable;
4504 		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4505 		if (ret) {
4506 			ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4507 			goto err_core_stop;
4508 		}
4509 	}
4510 
4511 	__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4512 
4513 	/*
4514 	 * By default FW set ARP frames ac to voice (6). In that case ARP
4515 	 * exchange is not working properly for UAPSD enabled AP. ARP requests
4516 	 * which arrives with access category 0 are processed by network stack
4517 	 * and send back with access category 0, but FW changes access category
4518 	 * to 6. Set ARP frames access category to best effort (0) solves
4519 	 * this problem.
4520 	 */
4521 
4522 	param = ar->wmi.pdev_param->arp_ac_override;
4523 	ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4524 	if (ret) {
4525 		ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4526 			    ret);
4527 		goto err_core_stop;
4528 	}
4529 
4530 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4531 		     ar->running_fw->fw_file.fw_features)) {
4532 		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4533 							  WMI_CCA_DETECT_LEVEL_AUTO,
4534 							  WMI_CCA_DETECT_MARGIN_AUTO);
4535 		if (ret) {
4536 			ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4537 				    ret);
4538 			goto err_core_stop;
4539 		}
4540 	}
4541 
4542 	param = ar->wmi.pdev_param->ani_enable;
4543 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4544 	if (ret) {
4545 		ath10k_warn(ar, "failed to enable ani by default: %d\n",
4546 			    ret);
4547 		goto err_core_stop;
4548 	}
4549 
4550 	ar->ani_enabled = true;
4551 
4552 	if (ath10k_peer_stats_enabled(ar)) {
4553 		param = ar->wmi.pdev_param->peer_stats_update_period;
4554 		ret = ath10k_wmi_pdev_set_param(ar, param,
4555 						PEER_DEFAULT_STATS_UPDATE_PERIOD);
4556 		if (ret) {
4557 			ath10k_warn(ar,
4558 				    "failed to set peer stats period : %d\n",
4559 				    ret);
4560 			goto err_core_stop;
4561 		}
4562 	}
4563 
4564 	param = ar->wmi.pdev_param->enable_btcoex;
4565 	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4566 	    test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4567 		     ar->running_fw->fw_file.fw_features)) {
4568 		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4569 		if (ret) {
4570 			ath10k_warn(ar,
4571 				    "failed to set btcoex param: %d\n", ret);
4572 			goto err_core_stop;
4573 		}
4574 		clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4575 	}
4576 
4577 	ar->num_started_vdevs = 0;
4578 	ath10k_regd_update(ar);
4579 
4580 	ath10k_spectral_start(ar);
4581 	ath10k_thermal_set_throttling(ar);
4582 
4583 	mutex_unlock(&ar->conf_mutex);
4584 	return 0;
4585 
4586 err_core_stop:
4587 	ath10k_core_stop(ar);
4588 
4589 err_power_down:
4590 	ath10k_hif_power_down(ar);
4591 
4592 err_off:
4593 	ar->state = ATH10K_STATE_OFF;
4594 
4595 err:
4596 	mutex_unlock(&ar->conf_mutex);
4597 	return ret;
4598 }
4599 
4600 static void ath10k_stop(struct ieee80211_hw *hw)
4601 {
4602 	struct ath10k *ar = hw->priv;
4603 
4604 	ath10k_drain_tx(ar);
4605 
4606 	mutex_lock(&ar->conf_mutex);
4607 	if (ar->state != ATH10K_STATE_OFF) {
4608 		ath10k_halt(ar);
4609 		ar->state = ATH10K_STATE_OFF;
4610 	}
4611 	mutex_unlock(&ar->conf_mutex);
4612 
4613 	cancel_delayed_work_sync(&ar->scan.timeout);
4614 	cancel_work_sync(&ar->restart_work);
4615 }
4616 
4617 static int ath10k_config_ps(struct ath10k *ar)
4618 {
4619 	struct ath10k_vif *arvif;
4620 	int ret = 0;
4621 
4622 	lockdep_assert_held(&ar->conf_mutex);
4623 
4624 	list_for_each_entry(arvif, &ar->arvifs, list) {
4625 		ret = ath10k_mac_vif_setup_ps(arvif);
4626 		if (ret) {
4627 			ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4628 			break;
4629 		}
4630 	}
4631 
4632 	return ret;
4633 }
4634 
4635 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4636 {
4637 	int ret;
4638 	u32 param;
4639 
4640 	lockdep_assert_held(&ar->conf_mutex);
4641 
4642 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4643 
4644 	param = ar->wmi.pdev_param->txpower_limit2g;
4645 	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4646 	if (ret) {
4647 		ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4648 			    txpower, ret);
4649 		return ret;
4650 	}
4651 
4652 	param = ar->wmi.pdev_param->txpower_limit5g;
4653 	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4654 	if (ret) {
4655 		ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4656 			    txpower, ret);
4657 		return ret;
4658 	}
4659 
4660 	return 0;
4661 }
4662 
4663 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4664 {
4665 	struct ath10k_vif *arvif;
4666 	int ret, txpower = -1;
4667 
4668 	lockdep_assert_held(&ar->conf_mutex);
4669 
4670 	list_for_each_entry(arvif, &ar->arvifs, list) {
4671 		WARN_ON(arvif->txpower < 0);
4672 
4673 		if (txpower == -1)
4674 			txpower = arvif->txpower;
4675 		else
4676 			txpower = min(txpower, arvif->txpower);
4677 	}
4678 
4679 	if (WARN_ON(txpower == -1))
4680 		return -EINVAL;
4681 
4682 	ret = ath10k_mac_txpower_setup(ar, txpower);
4683 	if (ret) {
4684 		ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4685 			    txpower, ret);
4686 		return ret;
4687 	}
4688 
4689 	return 0;
4690 }
4691 
4692 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4693 {
4694 	struct ath10k *ar = hw->priv;
4695 	struct ieee80211_conf *conf = &hw->conf;
4696 	int ret = 0;
4697 
4698 	mutex_lock(&ar->conf_mutex);
4699 
4700 	if (changed & IEEE80211_CONF_CHANGE_PS)
4701 		ath10k_config_ps(ar);
4702 
4703 	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4704 		ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4705 		ret = ath10k_monitor_recalc(ar);
4706 		if (ret)
4707 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4708 	}
4709 
4710 	mutex_unlock(&ar->conf_mutex);
4711 	return ret;
4712 }
4713 
4714 static u32 get_nss_from_chainmask(u16 chain_mask)
4715 {
4716 	if ((chain_mask & 0xf) == 0xf)
4717 		return 4;
4718 	else if ((chain_mask & 0x7) == 0x7)
4719 		return 3;
4720 	else if ((chain_mask & 0x3) == 0x3)
4721 		return 2;
4722 	return 1;
4723 }
4724 
4725 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4726 {
4727 	u32 value = 0;
4728 	struct ath10k *ar = arvif->ar;
4729 	int nsts;
4730 	int sound_dim;
4731 
4732 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4733 		return 0;
4734 
4735 	nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4736 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4737 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4738 		value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4739 
4740 	sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4741 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4742 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4743 		value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4744 
4745 	if (!value)
4746 		return 0;
4747 
4748 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4749 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4750 
4751 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4752 		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4753 			  WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4754 
4755 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4756 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4757 
4758 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4759 		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4760 			  WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4761 
4762 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4763 					 ar->wmi.vdev_param->txbf, value);
4764 }
4765 
4766 /*
4767  * TODO:
4768  * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4769  * because we will send mgmt frames without CCK. This requirement
4770  * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4771  * in the TX packet.
4772  */
4773 static int ath10k_add_interface(struct ieee80211_hw *hw,
4774 				struct ieee80211_vif *vif)
4775 {
4776 	struct ath10k *ar = hw->priv;
4777 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4778 	struct ath10k_peer *peer;
4779 	enum wmi_sta_powersave_param param;
4780 	int ret = 0;
4781 	u32 value;
4782 	int bit;
4783 	int i;
4784 	u32 vdev_param;
4785 
4786 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4787 
4788 	mutex_lock(&ar->conf_mutex);
4789 
4790 	memset(arvif, 0, sizeof(*arvif));
4791 	ath10k_mac_txq_init(vif->txq);
4792 
4793 	arvif->ar = ar;
4794 	arvif->vif = vif;
4795 
4796 	INIT_LIST_HEAD(&arvif->list);
4797 	INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4798 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
4799 			  ath10k_mac_vif_sta_connection_loss_work);
4800 
4801 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4802 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4803 		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4804 		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4805 		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4806 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4807 	}
4808 
4809 	if (ar->num_peers >= ar->max_num_peers) {
4810 		ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4811 		ret = -ENOBUFS;
4812 		goto err;
4813 	}
4814 
4815 	if (ar->free_vdev_map == 0) {
4816 		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4817 		ret = -EBUSY;
4818 		goto err;
4819 	}
4820 	bit = __ffs64(ar->free_vdev_map);
4821 
4822 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4823 		   bit, ar->free_vdev_map);
4824 
4825 	arvif->vdev_id = bit;
4826 	arvif->vdev_subtype =
4827 		ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
4828 
4829 	switch (vif->type) {
4830 	case NL80211_IFTYPE_P2P_DEVICE:
4831 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
4832 		arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4833 					(ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
4834 		break;
4835 	case NL80211_IFTYPE_UNSPECIFIED:
4836 	case NL80211_IFTYPE_STATION:
4837 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
4838 		if (vif->p2p)
4839 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4840 					(ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
4841 		break;
4842 	case NL80211_IFTYPE_ADHOC:
4843 		arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
4844 		break;
4845 	case NL80211_IFTYPE_MESH_POINT:
4846 		if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
4847 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4848 						(ar, WMI_VDEV_SUBTYPE_MESH_11S);
4849 		} else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4850 			ret = -EINVAL;
4851 			ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
4852 			goto err;
4853 		}
4854 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
4855 		break;
4856 	case NL80211_IFTYPE_AP:
4857 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
4858 
4859 		if (vif->p2p)
4860 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4861 						(ar, WMI_VDEV_SUBTYPE_P2P_GO);
4862 		break;
4863 	case NL80211_IFTYPE_MONITOR:
4864 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
4865 		break;
4866 	default:
4867 		WARN_ON(1);
4868 		break;
4869 	}
4870 
4871 	/* Using vdev_id as queue number will make it very easy to do per-vif
4872 	 * tx queue locking. This shouldn't wrap due to interface combinations
4873 	 * but do a modulo for correctness sake and prevent using offchannel tx
4874 	 * queues for regular vif tx.
4875 	 */
4876 	vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4877 	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
4878 		vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4879 
4880 	/* Some firmware revisions don't wait for beacon tx completion before
4881 	 * sending another SWBA event. This could lead to hardware using old
4882 	 * (freed) beacon data in some cases, e.g. tx credit starvation
4883 	 * combined with missed TBTT. This is very very rare.
4884 	 *
4885 	 * On non-IOMMU-enabled hosts this could be a possible security issue
4886 	 * because hw could beacon some random data on the air.  On
4887 	 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
4888 	 * device would crash.
4889 	 *
4890 	 * Since there are no beacon tx completions (implicit nor explicit)
4891 	 * propagated to host the only workaround for this is to allocate a
4892 	 * DMA-coherent buffer for a lifetime of a vif and use it for all
4893 	 * beacon tx commands. Worst case for this approach is some beacons may
4894 	 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
4895 	 */
4896 	if (vif->type == NL80211_IFTYPE_ADHOC ||
4897 	    vif->type == NL80211_IFTYPE_MESH_POINT ||
4898 	    vif->type == NL80211_IFTYPE_AP) {
4899 		arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
4900 							IEEE80211_MAX_FRAME_LEN,
4901 							&arvif->beacon_paddr,
4902 							GFP_ATOMIC);
4903 		if (!arvif->beacon_buf) {
4904 			ret = -ENOMEM;
4905 			ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
4906 				    ret);
4907 			goto err;
4908 		}
4909 	}
4910 	if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
4911 		arvif->nohwcrypt = true;
4912 
4913 	if (arvif->nohwcrypt &&
4914 	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4915 		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
4916 		goto err;
4917 	}
4918 
4919 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
4920 		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
4921 		   arvif->beacon_buf ? "single-buf" : "per-skb");
4922 
4923 	ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
4924 				     arvif->vdev_subtype, vif->addr);
4925 	if (ret) {
4926 		ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
4927 			    arvif->vdev_id, ret);
4928 		goto err;
4929 	}
4930 
4931 	ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
4932 	list_add(&arvif->list, &ar->arvifs);
4933 
4934 	/* It makes no sense to have firmware do keepalives. mac80211 already
4935 	 * takes care of this with idle connection polling.
4936 	 */
4937 	ret = ath10k_mac_vif_disable_keepalive(arvif);
4938 	if (ret) {
4939 		ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
4940 			    arvif->vdev_id, ret);
4941 		goto err_vdev_delete;
4942 	}
4943 
4944 	arvif->def_wep_key_idx = -1;
4945 
4946 	vdev_param = ar->wmi.vdev_param->tx_encap_type;
4947 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4948 					ATH10K_HW_TXRX_NATIVE_WIFI);
4949 	/* 10.X firmware does not support this VDEV parameter. Do not warn */
4950 	if (ret && ret != -EOPNOTSUPP) {
4951 		ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
4952 			    arvif->vdev_id, ret);
4953 		goto err_vdev_delete;
4954 	}
4955 
4956 	/* Configuring number of spatial stream for monitor interface is causing
4957 	 * target assert in qca9888 and qca6174.
4958 	 */
4959 	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
4960 		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4961 
4962 		vdev_param = ar->wmi.vdev_param->nss;
4963 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4964 						nss);
4965 		if (ret) {
4966 			ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
4967 				    arvif->vdev_id, ar->cfg_tx_chainmask, nss,
4968 				    ret);
4969 			goto err_vdev_delete;
4970 		}
4971 	}
4972 
4973 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4974 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
4975 		ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4976 					 vif->addr, WMI_PEER_TYPE_DEFAULT);
4977 		if (ret) {
4978 			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
4979 				    arvif->vdev_id, ret);
4980 			goto err_vdev_delete;
4981 		}
4982 
4983 		spin_lock_bh(&ar->data_lock);
4984 
4985 		peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4986 		if (!peer) {
4987 			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4988 				    vif->addr, arvif->vdev_id);
4989 			spin_unlock_bh(&ar->data_lock);
4990 			ret = -ENOENT;
4991 			goto err_peer_delete;
4992 		}
4993 
4994 		arvif->peer_id = find_first_bit(peer->peer_ids,
4995 						ATH10K_MAX_NUM_PEER_IDS);
4996 
4997 		spin_unlock_bh(&ar->data_lock);
4998 	} else {
4999 		arvif->peer_id = HTT_INVALID_PEERID;
5000 	}
5001 
5002 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
5003 		ret = ath10k_mac_set_kickout(arvif);
5004 		if (ret) {
5005 			ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
5006 				    arvif->vdev_id, ret);
5007 			goto err_peer_delete;
5008 		}
5009 	}
5010 
5011 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
5012 		param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
5013 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
5014 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
5015 						  param, value);
5016 		if (ret) {
5017 			ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
5018 				    arvif->vdev_id, ret);
5019 			goto err_peer_delete;
5020 		}
5021 
5022 		ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
5023 		if (ret) {
5024 			ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
5025 				    arvif->vdev_id, ret);
5026 			goto err_peer_delete;
5027 		}
5028 
5029 		ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
5030 		if (ret) {
5031 			ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
5032 				    arvif->vdev_id, ret);
5033 			goto err_peer_delete;
5034 		}
5035 	}
5036 
5037 	ret = ath10k_mac_set_txbf_conf(arvif);
5038 	if (ret) {
5039 		ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
5040 			    arvif->vdev_id, ret);
5041 		goto err_peer_delete;
5042 	}
5043 
5044 	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
5045 	if (ret) {
5046 		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
5047 			    arvif->vdev_id, ret);
5048 		goto err_peer_delete;
5049 	}
5050 
5051 	arvif->txpower = vif->bss_conf.txpower;
5052 	ret = ath10k_mac_txpower_recalc(ar);
5053 	if (ret) {
5054 		ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5055 		goto err_peer_delete;
5056 	}
5057 
5058 	if (vif->type == NL80211_IFTYPE_MONITOR) {
5059 		ar->monitor_arvif = arvif;
5060 		ret = ath10k_monitor_recalc(ar);
5061 		if (ret) {
5062 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5063 			goto err_peer_delete;
5064 		}
5065 	}
5066 
5067 	spin_lock_bh(&ar->htt.tx_lock);
5068 	if (!ar->tx_paused)
5069 		ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5070 	spin_unlock_bh(&ar->htt.tx_lock);
5071 
5072 	mutex_unlock(&ar->conf_mutex);
5073 	return 0;
5074 
5075 err_peer_delete:
5076 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5077 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
5078 		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5079 
5080 err_vdev_delete:
5081 	ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5082 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5083 	list_del(&arvif->list);
5084 
5085 err:
5086 	if (arvif->beacon_buf) {
5087 		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5088 				  arvif->beacon_buf, arvif->beacon_paddr);
5089 		arvif->beacon_buf = NULL;
5090 	}
5091 
5092 	mutex_unlock(&ar->conf_mutex);
5093 
5094 	return ret;
5095 }
5096 
5097 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5098 {
5099 	int i;
5100 
5101 	for (i = 0; i < BITS_PER_LONG; i++)
5102 		ath10k_mac_vif_tx_unlock(arvif, i);
5103 }
5104 
5105 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5106 				    struct ieee80211_vif *vif)
5107 {
5108 	struct ath10k *ar = hw->priv;
5109 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5110 	struct ath10k_peer *peer;
5111 	int ret;
5112 	int i;
5113 
5114 	cancel_work_sync(&arvif->ap_csa_work);
5115 	cancel_delayed_work_sync(&arvif->connection_loss_work);
5116 
5117 	mutex_lock(&ar->conf_mutex);
5118 
5119 	spin_lock_bh(&ar->data_lock);
5120 	ath10k_mac_vif_beacon_cleanup(arvif);
5121 	spin_unlock_bh(&ar->data_lock);
5122 
5123 	ret = ath10k_spectral_vif_stop(arvif);
5124 	if (ret)
5125 		ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5126 			    arvif->vdev_id, ret);
5127 
5128 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5129 	list_del(&arvif->list);
5130 
5131 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5132 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5133 		ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5134 					     vif->addr);
5135 		if (ret)
5136 			ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5137 				    arvif->vdev_id, ret);
5138 
5139 		kfree(arvif->u.ap.noa_data);
5140 	}
5141 
5142 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5143 		   arvif->vdev_id);
5144 
5145 	ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5146 	if (ret)
5147 		ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5148 			    arvif->vdev_id, ret);
5149 
5150 	/* Some firmware revisions don't notify host about self-peer removal
5151 	 * until after associated vdev is deleted.
5152 	 */
5153 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5154 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5155 		ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5156 						   vif->addr);
5157 		if (ret)
5158 			ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5159 				    arvif->vdev_id, ret);
5160 
5161 		spin_lock_bh(&ar->data_lock);
5162 		ar->num_peers--;
5163 		spin_unlock_bh(&ar->data_lock);
5164 	}
5165 
5166 	spin_lock_bh(&ar->data_lock);
5167 	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5168 		peer = ar->peer_map[i];
5169 		if (!peer)
5170 			continue;
5171 
5172 		if (peer->vif == vif) {
5173 			ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5174 				    vif->addr, arvif->vdev_id);
5175 			peer->vif = NULL;
5176 		}
5177 	}
5178 	spin_unlock_bh(&ar->data_lock);
5179 
5180 	ath10k_peer_cleanup(ar, arvif->vdev_id);
5181 	ath10k_mac_txq_unref(ar, vif->txq);
5182 
5183 	if (vif->type == NL80211_IFTYPE_MONITOR) {
5184 		ar->monitor_arvif = NULL;
5185 		ret = ath10k_monitor_recalc(ar);
5186 		if (ret)
5187 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5188 	}
5189 
5190 	spin_lock_bh(&ar->htt.tx_lock);
5191 	ath10k_mac_vif_tx_unlock_all(arvif);
5192 	spin_unlock_bh(&ar->htt.tx_lock);
5193 
5194 	ath10k_mac_txq_unref(ar, vif->txq);
5195 
5196 	mutex_unlock(&ar->conf_mutex);
5197 }
5198 
5199 /*
5200  * FIXME: Has to be verified.
5201  */
5202 #define SUPPORTED_FILTERS			\
5203 	(FIF_ALLMULTI |				\
5204 	FIF_CONTROL |				\
5205 	FIF_PSPOLL |				\
5206 	FIF_OTHER_BSS |				\
5207 	FIF_BCN_PRBRESP_PROMISC |		\
5208 	FIF_PROBE_REQ |				\
5209 	FIF_FCSFAIL)
5210 
5211 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5212 				    unsigned int changed_flags,
5213 				    unsigned int *total_flags,
5214 				    u64 multicast)
5215 {
5216 	struct ath10k *ar = hw->priv;
5217 	int ret;
5218 
5219 	mutex_lock(&ar->conf_mutex);
5220 
5221 	changed_flags &= SUPPORTED_FILTERS;
5222 	*total_flags &= SUPPORTED_FILTERS;
5223 	ar->filter_flags = *total_flags;
5224 
5225 	ret = ath10k_monitor_recalc(ar);
5226 	if (ret)
5227 		ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5228 
5229 	mutex_unlock(&ar->conf_mutex);
5230 }
5231 
5232 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5233 				    struct ieee80211_vif *vif,
5234 				    struct ieee80211_bss_conf *info,
5235 				    u32 changed)
5236 {
5237 	struct ath10k *ar = hw->priv;
5238 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5239 	int ret = 0;
5240 	u32 vdev_param, pdev_param, slottime, preamble;
5241 
5242 	mutex_lock(&ar->conf_mutex);
5243 
5244 	if (changed & BSS_CHANGED_IBSS)
5245 		ath10k_control_ibss(arvif, info, vif->addr);
5246 
5247 	if (changed & BSS_CHANGED_BEACON_INT) {
5248 		arvif->beacon_interval = info->beacon_int;
5249 		vdev_param = ar->wmi.vdev_param->beacon_interval;
5250 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5251 						arvif->beacon_interval);
5252 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5253 			   "mac vdev %d beacon_interval %d\n",
5254 			   arvif->vdev_id, arvif->beacon_interval);
5255 
5256 		if (ret)
5257 			ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5258 				    arvif->vdev_id, ret);
5259 	}
5260 
5261 	if (changed & BSS_CHANGED_BEACON) {
5262 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5263 			   "vdev %d set beacon tx mode to staggered\n",
5264 			   arvif->vdev_id);
5265 
5266 		pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5267 		ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5268 						WMI_BEACON_STAGGERED_MODE);
5269 		if (ret)
5270 			ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5271 				    arvif->vdev_id, ret);
5272 
5273 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
5274 		if (ret)
5275 			ath10k_warn(ar, "failed to update beacon template: %d\n",
5276 				    ret);
5277 
5278 		if (ieee80211_vif_is_mesh(vif)) {
5279 			/* mesh doesn't use SSID but firmware needs it */
5280 			strncpy(arvif->u.ap.ssid, "mesh",
5281 				sizeof(arvif->u.ap.ssid));
5282 			arvif->u.ap.ssid_len = 4;
5283 		}
5284 	}
5285 
5286 	if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5287 		ret = ath10k_mac_setup_prb_tmpl(arvif);
5288 		if (ret)
5289 			ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5290 				    arvif->vdev_id, ret);
5291 	}
5292 
5293 	if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5294 		arvif->dtim_period = info->dtim_period;
5295 
5296 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5297 			   "mac vdev %d dtim_period %d\n",
5298 			   arvif->vdev_id, arvif->dtim_period);
5299 
5300 		vdev_param = ar->wmi.vdev_param->dtim_period;
5301 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5302 						arvif->dtim_period);
5303 		if (ret)
5304 			ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5305 				    arvif->vdev_id, ret);
5306 	}
5307 
5308 	if (changed & BSS_CHANGED_SSID &&
5309 	    vif->type == NL80211_IFTYPE_AP) {
5310 		arvif->u.ap.ssid_len = info->ssid_len;
5311 		if (info->ssid_len)
5312 			memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5313 		arvif->u.ap.hidden_ssid = info->hidden_ssid;
5314 	}
5315 
5316 	if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5317 		ether_addr_copy(arvif->bssid, info->bssid);
5318 
5319 	if (changed & BSS_CHANGED_BEACON_ENABLED)
5320 		ath10k_control_beaconing(arvif, info);
5321 
5322 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5323 		arvif->use_cts_prot = info->use_cts_prot;
5324 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
5325 			   arvif->vdev_id, info->use_cts_prot);
5326 
5327 		ret = ath10k_recalc_rtscts_prot(arvif);
5328 		if (ret)
5329 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5330 				    arvif->vdev_id, ret);
5331 
5332 		vdev_param = ar->wmi.vdev_param->protection_mode;
5333 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5334 						info->use_cts_prot ? 1 : 0);
5335 		if (ret)
5336 			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
5337 				    info->use_cts_prot, arvif->vdev_id, ret);
5338 	}
5339 
5340 	if (changed & BSS_CHANGED_ERP_SLOT) {
5341 		if (info->use_short_slot)
5342 			slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5343 
5344 		else
5345 			slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5346 
5347 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5348 			   arvif->vdev_id, slottime);
5349 
5350 		vdev_param = ar->wmi.vdev_param->slot_time;
5351 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5352 						slottime);
5353 		if (ret)
5354 			ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5355 				    arvif->vdev_id, ret);
5356 	}
5357 
5358 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5359 		if (info->use_short_preamble)
5360 			preamble = WMI_VDEV_PREAMBLE_SHORT;
5361 		else
5362 			preamble = WMI_VDEV_PREAMBLE_LONG;
5363 
5364 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5365 			   "mac vdev %d preamble %dn",
5366 			   arvif->vdev_id, preamble);
5367 
5368 		vdev_param = ar->wmi.vdev_param->preamble;
5369 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5370 						preamble);
5371 		if (ret)
5372 			ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5373 				    arvif->vdev_id, ret);
5374 	}
5375 
5376 	if (changed & BSS_CHANGED_ASSOC) {
5377 		if (info->assoc) {
5378 			/* Workaround: Make sure monitor vdev is not running
5379 			 * when associating to prevent some firmware revisions
5380 			 * (e.g. 10.1 and 10.2) from crashing.
5381 			 */
5382 			if (ar->monitor_started)
5383 				ath10k_monitor_stop(ar);
5384 			ath10k_bss_assoc(hw, vif, info);
5385 			ath10k_monitor_recalc(ar);
5386 		} else {
5387 			ath10k_bss_disassoc(hw, vif);
5388 		}
5389 	}
5390 
5391 	if (changed & BSS_CHANGED_TXPOWER) {
5392 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5393 			   arvif->vdev_id, info->txpower);
5394 
5395 		arvif->txpower = info->txpower;
5396 		ret = ath10k_mac_txpower_recalc(ar);
5397 		if (ret)
5398 			ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5399 	}
5400 
5401 	if (changed & BSS_CHANGED_PS) {
5402 		arvif->ps = vif->bss_conf.ps;
5403 
5404 		ret = ath10k_config_ps(ar);
5405 		if (ret)
5406 			ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5407 				    arvif->vdev_id, ret);
5408 	}
5409 
5410 	mutex_unlock(&ar->conf_mutex);
5411 }
5412 
5413 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5414 			  struct ieee80211_vif *vif,
5415 			  struct ieee80211_scan_request *hw_req)
5416 {
5417 	struct ath10k *ar = hw->priv;
5418 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5419 	struct cfg80211_scan_request *req = &hw_req->req;
5420 	struct wmi_start_scan_arg arg;
5421 	int ret = 0;
5422 	int i;
5423 
5424 	mutex_lock(&ar->conf_mutex);
5425 
5426 	spin_lock_bh(&ar->data_lock);
5427 	switch (ar->scan.state) {
5428 	case ATH10K_SCAN_IDLE:
5429 		reinit_completion(&ar->scan.started);
5430 		reinit_completion(&ar->scan.completed);
5431 		ar->scan.state = ATH10K_SCAN_STARTING;
5432 		ar->scan.is_roc = false;
5433 		ar->scan.vdev_id = arvif->vdev_id;
5434 		ret = 0;
5435 		break;
5436 	case ATH10K_SCAN_STARTING:
5437 	case ATH10K_SCAN_RUNNING:
5438 	case ATH10K_SCAN_ABORTING:
5439 		ret = -EBUSY;
5440 		break;
5441 	}
5442 	spin_unlock_bh(&ar->data_lock);
5443 
5444 	if (ret)
5445 		goto exit;
5446 
5447 	memset(&arg, 0, sizeof(arg));
5448 	ath10k_wmi_start_scan_init(ar, &arg);
5449 	arg.vdev_id = arvif->vdev_id;
5450 	arg.scan_id = ATH10K_SCAN_ID;
5451 
5452 	if (req->ie_len) {
5453 		arg.ie_len = req->ie_len;
5454 		memcpy(arg.ie, req->ie, arg.ie_len);
5455 	}
5456 
5457 	if (req->n_ssids) {
5458 		arg.n_ssids = req->n_ssids;
5459 		for (i = 0; i < arg.n_ssids; i++) {
5460 			arg.ssids[i].len  = req->ssids[i].ssid_len;
5461 			arg.ssids[i].ssid = req->ssids[i].ssid;
5462 		}
5463 	} else {
5464 		arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5465 	}
5466 
5467 	if (req->n_channels) {
5468 		arg.n_channels = req->n_channels;
5469 		for (i = 0; i < arg.n_channels; i++)
5470 			arg.channels[i] = req->channels[i]->center_freq;
5471 	}
5472 
5473 	ret = ath10k_start_scan(ar, &arg);
5474 	if (ret) {
5475 		ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5476 		spin_lock_bh(&ar->data_lock);
5477 		ar->scan.state = ATH10K_SCAN_IDLE;
5478 		spin_unlock_bh(&ar->data_lock);
5479 	}
5480 
5481 	/* Add a 200ms margin to account for event/command processing */
5482 	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5483 				     msecs_to_jiffies(arg.max_scan_time +
5484 						      200));
5485 
5486 exit:
5487 	mutex_unlock(&ar->conf_mutex);
5488 	return ret;
5489 }
5490 
5491 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5492 				  struct ieee80211_vif *vif)
5493 {
5494 	struct ath10k *ar = hw->priv;
5495 
5496 	mutex_lock(&ar->conf_mutex);
5497 	ath10k_scan_abort(ar);
5498 	mutex_unlock(&ar->conf_mutex);
5499 
5500 	cancel_delayed_work_sync(&ar->scan.timeout);
5501 }
5502 
5503 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5504 					struct ath10k_vif *arvif,
5505 					enum set_key_cmd cmd,
5506 					struct ieee80211_key_conf *key)
5507 {
5508 	u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5509 	int ret;
5510 
5511 	/* 10.1 firmware branch requires default key index to be set to group
5512 	 * key index after installing it. Otherwise FW/HW Txes corrupted
5513 	 * frames with multi-vif APs. This is not required for main firmware
5514 	 * branch (e.g. 636).
5515 	 *
5516 	 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5517 	 *
5518 	 * FIXME: It remains unknown if this is required for multi-vif STA
5519 	 * interfaces on 10.1.
5520 	 */
5521 
5522 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5523 	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5524 		return;
5525 
5526 	if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5527 		return;
5528 
5529 	if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5530 		return;
5531 
5532 	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5533 		return;
5534 
5535 	if (cmd != SET_KEY)
5536 		return;
5537 
5538 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5539 					key->keyidx);
5540 	if (ret)
5541 		ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5542 			    arvif->vdev_id, ret);
5543 }
5544 
5545 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5546 			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5547 			  struct ieee80211_key_conf *key)
5548 {
5549 	struct ath10k *ar = hw->priv;
5550 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5551 	struct ath10k_peer *peer;
5552 	const u8 *peer_addr;
5553 	bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5554 		      key->cipher == WLAN_CIPHER_SUITE_WEP104;
5555 	int ret = 0;
5556 	int ret2;
5557 	u32 flags = 0;
5558 	u32 flags2;
5559 
5560 	/* this one needs to be done in software */
5561 	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5562 		return 1;
5563 
5564 	if (arvif->nohwcrypt)
5565 		return 1;
5566 
5567 	if (key->keyidx > WMI_MAX_KEY_INDEX)
5568 		return -ENOSPC;
5569 
5570 	mutex_lock(&ar->conf_mutex);
5571 
5572 	if (sta)
5573 		peer_addr = sta->addr;
5574 	else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5575 		peer_addr = vif->bss_conf.bssid;
5576 	else
5577 		peer_addr = vif->addr;
5578 
5579 	key->hw_key_idx = key->keyidx;
5580 
5581 	if (is_wep) {
5582 		if (cmd == SET_KEY)
5583 			arvif->wep_keys[key->keyidx] = key;
5584 		else
5585 			arvif->wep_keys[key->keyidx] = NULL;
5586 	}
5587 
5588 	/* the peer should not disappear in mid-way (unless FW goes awry) since
5589 	 * we already hold conf_mutex. we just make sure its there now. */
5590 	spin_lock_bh(&ar->data_lock);
5591 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5592 	spin_unlock_bh(&ar->data_lock);
5593 
5594 	if (!peer) {
5595 		if (cmd == SET_KEY) {
5596 			ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5597 				    peer_addr);
5598 			ret = -EOPNOTSUPP;
5599 			goto exit;
5600 		} else {
5601 			/* if the peer doesn't exist there is no key to disable
5602 			 * anymore */
5603 			goto exit;
5604 		}
5605 	}
5606 
5607 	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5608 		flags |= WMI_KEY_PAIRWISE;
5609 	else
5610 		flags |= WMI_KEY_GROUP;
5611 
5612 	if (is_wep) {
5613 		if (cmd == DISABLE_KEY)
5614 			ath10k_clear_vdev_key(arvif, key);
5615 
5616 		/* When WEP keys are uploaded it's possible that there are
5617 		 * stations associated already (e.g. when merging) without any
5618 		 * keys. Static WEP needs an explicit per-peer key upload.
5619 		 */
5620 		if (vif->type == NL80211_IFTYPE_ADHOC &&
5621 		    cmd == SET_KEY)
5622 			ath10k_mac_vif_update_wep_key(arvif, key);
5623 
5624 		/* 802.1x never sets the def_wep_key_idx so each set_key()
5625 		 * call changes default tx key.
5626 		 *
5627 		 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5628 		 * after first set_key().
5629 		 */
5630 		if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5631 			flags |= WMI_KEY_TX_USAGE;
5632 	}
5633 
5634 	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5635 	if (ret) {
5636 		WARN_ON(ret > 0);
5637 		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5638 			    arvif->vdev_id, peer_addr, ret);
5639 		goto exit;
5640 	}
5641 
5642 	/* mac80211 sets static WEP keys as groupwise while firmware requires
5643 	 * them to be installed twice as both pairwise and groupwise.
5644 	 */
5645 	if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5646 		flags2 = flags;
5647 		flags2 &= ~WMI_KEY_GROUP;
5648 		flags2 |= WMI_KEY_PAIRWISE;
5649 
5650 		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5651 		if (ret) {
5652 			WARN_ON(ret > 0);
5653 			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5654 				    arvif->vdev_id, peer_addr, ret);
5655 			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5656 						  peer_addr, flags);
5657 			if (ret2) {
5658 				WARN_ON(ret2 > 0);
5659 				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5660 					    arvif->vdev_id, peer_addr, ret2);
5661 			}
5662 			goto exit;
5663 		}
5664 	}
5665 
5666 	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5667 
5668 	spin_lock_bh(&ar->data_lock);
5669 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5670 	if (peer && cmd == SET_KEY)
5671 		peer->keys[key->keyidx] = key;
5672 	else if (peer && cmd == DISABLE_KEY)
5673 		peer->keys[key->keyidx] = NULL;
5674 	else if (peer == NULL)
5675 		/* impossible unless FW goes crazy */
5676 		ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5677 	spin_unlock_bh(&ar->data_lock);
5678 
5679 exit:
5680 	mutex_unlock(&ar->conf_mutex);
5681 	return ret;
5682 }
5683 
5684 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5685 					   struct ieee80211_vif *vif,
5686 					   int keyidx)
5687 {
5688 	struct ath10k *ar = hw->priv;
5689 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5690 	int ret;
5691 
5692 	mutex_lock(&arvif->ar->conf_mutex);
5693 
5694 	if (arvif->ar->state != ATH10K_STATE_ON)
5695 		goto unlock;
5696 
5697 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5698 		   arvif->vdev_id, keyidx);
5699 
5700 	ret = ath10k_wmi_vdev_set_param(arvif->ar,
5701 					arvif->vdev_id,
5702 					arvif->ar->wmi.vdev_param->def_keyid,
5703 					keyidx);
5704 
5705 	if (ret) {
5706 		ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5707 			    arvif->vdev_id,
5708 			    ret);
5709 		goto unlock;
5710 	}
5711 
5712 	arvif->def_wep_key_idx = keyidx;
5713 
5714 unlock:
5715 	mutex_unlock(&arvif->ar->conf_mutex);
5716 }
5717 
5718 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5719 {
5720 	struct ath10k *ar;
5721 	struct ath10k_vif *arvif;
5722 	struct ath10k_sta *arsta;
5723 	struct ieee80211_sta *sta;
5724 	struct cfg80211_chan_def def;
5725 	enum nl80211_band band;
5726 	const u8 *ht_mcs_mask;
5727 	const u16 *vht_mcs_mask;
5728 	u32 changed, bw, nss, smps;
5729 	int err;
5730 
5731 	arsta = container_of(wk, struct ath10k_sta, update_wk);
5732 	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5733 	arvif = arsta->arvif;
5734 	ar = arvif->ar;
5735 
5736 	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5737 		return;
5738 
5739 	band = def.chan->band;
5740 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5741 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5742 
5743 	spin_lock_bh(&ar->data_lock);
5744 
5745 	changed = arsta->changed;
5746 	arsta->changed = 0;
5747 
5748 	bw = arsta->bw;
5749 	nss = arsta->nss;
5750 	smps = arsta->smps;
5751 
5752 	spin_unlock_bh(&ar->data_lock);
5753 
5754 	mutex_lock(&ar->conf_mutex);
5755 
5756 	nss = max_t(u32, 1, nss);
5757 	nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5758 			   ath10k_mac_max_vht_nss(vht_mcs_mask)));
5759 
5760 	if (changed & IEEE80211_RC_BW_CHANGED) {
5761 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
5762 			   sta->addr, bw);
5763 
5764 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5765 						WMI_PEER_CHAN_WIDTH, bw);
5766 		if (err)
5767 			ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
5768 				    sta->addr, bw, err);
5769 	}
5770 
5771 	if (changed & IEEE80211_RC_NSS_CHANGED) {
5772 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
5773 			   sta->addr, nss);
5774 
5775 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5776 						WMI_PEER_NSS, nss);
5777 		if (err)
5778 			ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
5779 				    sta->addr, nss, err);
5780 	}
5781 
5782 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
5783 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
5784 			   sta->addr, smps);
5785 
5786 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5787 						WMI_PEER_SMPS_STATE, smps);
5788 		if (err)
5789 			ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
5790 				    sta->addr, smps, err);
5791 	}
5792 
5793 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
5794 	    changed & IEEE80211_RC_NSS_CHANGED) {
5795 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
5796 			   sta->addr);
5797 
5798 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
5799 		if (err)
5800 			ath10k_warn(ar, "failed to reassociate station: %pM\n",
5801 				    sta->addr);
5802 	}
5803 
5804 	mutex_unlock(&ar->conf_mutex);
5805 }
5806 
5807 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5808 				       struct ieee80211_sta *sta)
5809 {
5810 	struct ath10k *ar = arvif->ar;
5811 
5812 	lockdep_assert_held(&ar->conf_mutex);
5813 
5814 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5815 		return 0;
5816 
5817 	if (ar->num_stations >= ar->max_num_stations)
5818 		return -ENOBUFS;
5819 
5820 	ar->num_stations++;
5821 
5822 	return 0;
5823 }
5824 
5825 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
5826 					struct ieee80211_sta *sta)
5827 {
5828 	struct ath10k *ar = arvif->ar;
5829 
5830 	lockdep_assert_held(&ar->conf_mutex);
5831 
5832 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5833 		return;
5834 
5835 	ar->num_stations--;
5836 }
5837 
5838 struct ath10k_mac_tdls_iter_data {
5839 	u32 num_tdls_stations;
5840 	struct ieee80211_vif *curr_vif;
5841 };
5842 
5843 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5844 						    struct ieee80211_sta *sta)
5845 {
5846 	struct ath10k_mac_tdls_iter_data *iter_data = data;
5847 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5848 	struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5849 
5850 	if (sta->tdls && sta_vif == iter_data->curr_vif)
5851 		iter_data->num_tdls_stations++;
5852 }
5853 
5854 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5855 					      struct ieee80211_vif *vif)
5856 {
5857 	struct ath10k_mac_tdls_iter_data data = {};
5858 
5859 	data.curr_vif = vif;
5860 
5861 	ieee80211_iterate_stations_atomic(hw,
5862 					  ath10k_mac_tdls_vif_stations_count_iter,
5863 					  &data);
5864 	return data.num_tdls_stations;
5865 }
5866 
5867 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5868 					    struct ieee80211_vif *vif)
5869 {
5870 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5871 	int *num_tdls_vifs = data;
5872 
5873 	if (vif->type != NL80211_IFTYPE_STATION)
5874 		return;
5875 
5876 	if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5877 		(*num_tdls_vifs)++;
5878 }
5879 
5880 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5881 {
5882 	int num_tdls_vifs = 0;
5883 
5884 	ieee80211_iterate_active_interfaces_atomic(hw,
5885 						   IEEE80211_IFACE_ITER_NORMAL,
5886 						   ath10k_mac_tdls_vifs_count_iter,
5887 						   &num_tdls_vifs);
5888 	return num_tdls_vifs;
5889 }
5890 
5891 static int ath10k_sta_state(struct ieee80211_hw *hw,
5892 			    struct ieee80211_vif *vif,
5893 			    struct ieee80211_sta *sta,
5894 			    enum ieee80211_sta_state old_state,
5895 			    enum ieee80211_sta_state new_state)
5896 {
5897 	struct ath10k *ar = hw->priv;
5898 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5899 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5900 	struct ath10k_peer *peer;
5901 	int ret = 0;
5902 	int i;
5903 
5904 	if (old_state == IEEE80211_STA_NOTEXIST &&
5905 	    new_state == IEEE80211_STA_NONE) {
5906 		memset(arsta, 0, sizeof(*arsta));
5907 		arsta->arvif = arvif;
5908 		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
5909 
5910 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5911 			ath10k_mac_txq_init(sta->txq[i]);
5912 	}
5913 
5914 	/* cancel must be done outside the mutex to avoid deadlock */
5915 	if ((old_state == IEEE80211_STA_NONE &&
5916 	     new_state == IEEE80211_STA_NOTEXIST))
5917 		cancel_work_sync(&arsta->update_wk);
5918 
5919 	mutex_lock(&ar->conf_mutex);
5920 
5921 	if (old_state == IEEE80211_STA_NOTEXIST &&
5922 	    new_state == IEEE80211_STA_NONE) {
5923 		/*
5924 		 * New station addition.
5925 		 */
5926 		enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
5927 		u32 num_tdls_stations;
5928 		u32 num_tdls_vifs;
5929 
5930 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5931 			   "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
5932 			   arvif->vdev_id, sta->addr,
5933 			   ar->num_stations + 1, ar->max_num_stations,
5934 			   ar->num_peers + 1, ar->max_num_peers);
5935 
5936 		ret = ath10k_mac_inc_num_stations(arvif, sta);
5937 		if (ret) {
5938 			ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
5939 				    ar->max_num_stations);
5940 			goto exit;
5941 		}
5942 
5943 		if (sta->tdls)
5944 			peer_type = WMI_PEER_TYPE_TDLS;
5945 
5946 		ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5947 					 sta->addr, peer_type);
5948 		if (ret) {
5949 			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
5950 				    sta->addr, arvif->vdev_id, ret);
5951 			ath10k_mac_dec_num_stations(arvif, sta);
5952 			goto exit;
5953 		}
5954 
5955 		spin_lock_bh(&ar->data_lock);
5956 
5957 		peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5958 		if (!peer) {
5959 			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5960 				    vif->addr, arvif->vdev_id);
5961 			spin_unlock_bh(&ar->data_lock);
5962 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5963 			ath10k_mac_dec_num_stations(arvif, sta);
5964 			ret = -ENOENT;
5965 			goto exit;
5966 		}
5967 
5968 		arsta->peer_id = find_first_bit(peer->peer_ids,
5969 						ATH10K_MAX_NUM_PEER_IDS);
5970 
5971 		spin_unlock_bh(&ar->data_lock);
5972 
5973 		if (!sta->tdls)
5974 			goto exit;
5975 
5976 		num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
5977 		num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
5978 
5979 		if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
5980 		    num_tdls_stations == 0) {
5981 			ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
5982 				    arvif->vdev_id, ar->max_num_tdls_vdevs);
5983 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5984 			ath10k_mac_dec_num_stations(arvif, sta);
5985 			ret = -ENOBUFS;
5986 			goto exit;
5987 		}
5988 
5989 		if (num_tdls_stations == 0) {
5990 			/* This is the first tdls peer in current vif */
5991 			enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
5992 
5993 			ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5994 							      state);
5995 			if (ret) {
5996 				ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
5997 					    arvif->vdev_id, ret);
5998 				ath10k_peer_delete(ar, arvif->vdev_id,
5999 						   sta->addr);
6000 				ath10k_mac_dec_num_stations(arvif, sta);
6001 				goto exit;
6002 			}
6003 		}
6004 
6005 		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6006 						  WMI_TDLS_PEER_STATE_PEERING);
6007 		if (ret) {
6008 			ath10k_warn(ar,
6009 				    "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
6010 				    sta->addr, arvif->vdev_id, ret);
6011 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6012 			ath10k_mac_dec_num_stations(arvif, sta);
6013 
6014 			if (num_tdls_stations != 0)
6015 				goto exit;
6016 			ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6017 							WMI_TDLS_DISABLE);
6018 		}
6019 	} else if ((old_state == IEEE80211_STA_NONE &&
6020 		    new_state == IEEE80211_STA_NOTEXIST)) {
6021 		/*
6022 		 * Existing station deletion.
6023 		 */
6024 		ath10k_dbg(ar, ATH10K_DBG_MAC,
6025 			   "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
6026 			   arvif->vdev_id, sta->addr, sta);
6027 
6028 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6029 		if (ret)
6030 			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
6031 				    sta->addr, arvif->vdev_id, ret);
6032 
6033 		ath10k_mac_dec_num_stations(arvif, sta);
6034 
6035 		spin_lock_bh(&ar->data_lock);
6036 		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
6037 			peer = ar->peer_map[i];
6038 			if (!peer)
6039 				continue;
6040 
6041 			if (peer->sta == sta) {
6042 				ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
6043 					    sta->addr, peer, i, arvif->vdev_id);
6044 				peer->sta = NULL;
6045 
6046 				/* Clean up the peer object as well since we
6047 				 * must have failed to do this above.
6048 				 */
6049 				list_del(&peer->list);
6050 				ar->peer_map[i] = NULL;
6051 				kfree(peer);
6052 				ar->num_peers--;
6053 			}
6054 		}
6055 		spin_unlock_bh(&ar->data_lock);
6056 
6057 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6058 			ath10k_mac_txq_unref(ar, sta->txq[i]);
6059 
6060 		if (!sta->tdls)
6061 			goto exit;
6062 
6063 		if (ath10k_mac_tdls_vif_stations_count(hw, vif))
6064 			goto exit;
6065 
6066 		/* This was the last tdls peer in current vif */
6067 		ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6068 						      WMI_TDLS_DISABLE);
6069 		if (ret) {
6070 			ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6071 				    arvif->vdev_id, ret);
6072 		}
6073 	} else if (old_state == IEEE80211_STA_AUTH &&
6074 		   new_state == IEEE80211_STA_ASSOC &&
6075 		   (vif->type == NL80211_IFTYPE_AP ||
6076 		    vif->type == NL80211_IFTYPE_MESH_POINT ||
6077 		    vif->type == NL80211_IFTYPE_ADHOC)) {
6078 		/*
6079 		 * New association.
6080 		 */
6081 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
6082 			   sta->addr);
6083 
6084 		ret = ath10k_station_assoc(ar, vif, sta, false);
6085 		if (ret)
6086 			ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
6087 				    sta->addr, arvif->vdev_id, ret);
6088 	} else if (old_state == IEEE80211_STA_ASSOC &&
6089 		   new_state == IEEE80211_STA_AUTHORIZED &&
6090 		   sta->tdls) {
6091 		/*
6092 		 * Tdls station authorized.
6093 		 */
6094 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
6095 			   sta->addr);
6096 
6097 		ret = ath10k_station_assoc(ar, vif, sta, false);
6098 		if (ret) {
6099 			ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
6100 				    sta->addr, arvif->vdev_id, ret);
6101 			goto exit;
6102 		}
6103 
6104 		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6105 						  WMI_TDLS_PEER_STATE_CONNECTED);
6106 		if (ret)
6107 			ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
6108 				    sta->addr, arvif->vdev_id, ret);
6109 	} else if (old_state == IEEE80211_STA_ASSOC &&
6110 		    new_state == IEEE80211_STA_AUTH &&
6111 		    (vif->type == NL80211_IFTYPE_AP ||
6112 		     vif->type == NL80211_IFTYPE_MESH_POINT ||
6113 		     vif->type == NL80211_IFTYPE_ADHOC)) {
6114 		/*
6115 		 * Disassociation.
6116 		 */
6117 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6118 			   sta->addr);
6119 
6120 		ret = ath10k_station_disassoc(ar, vif, sta);
6121 		if (ret)
6122 			ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6123 				    sta->addr, arvif->vdev_id, ret);
6124 	}
6125 exit:
6126 	mutex_unlock(&ar->conf_mutex);
6127 	return ret;
6128 }
6129 
6130 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6131 				u16 ac, bool enable)
6132 {
6133 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6134 	struct wmi_sta_uapsd_auto_trig_arg arg = {};
6135 	u32 prio = 0, acc = 0;
6136 	u32 value = 0;
6137 	int ret = 0;
6138 
6139 	lockdep_assert_held(&ar->conf_mutex);
6140 
6141 	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6142 		return 0;
6143 
6144 	switch (ac) {
6145 	case IEEE80211_AC_VO:
6146 		value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6147 			WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6148 		prio = 7;
6149 		acc = 3;
6150 		break;
6151 	case IEEE80211_AC_VI:
6152 		value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6153 			WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6154 		prio = 5;
6155 		acc = 2;
6156 		break;
6157 	case IEEE80211_AC_BE:
6158 		value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6159 			WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6160 		prio = 2;
6161 		acc = 1;
6162 		break;
6163 	case IEEE80211_AC_BK:
6164 		value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6165 			WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6166 		prio = 0;
6167 		acc = 0;
6168 		break;
6169 	}
6170 
6171 	if (enable)
6172 		arvif->u.sta.uapsd |= value;
6173 	else
6174 		arvif->u.sta.uapsd &= ~value;
6175 
6176 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6177 					  WMI_STA_PS_PARAM_UAPSD,
6178 					  arvif->u.sta.uapsd);
6179 	if (ret) {
6180 		ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6181 		goto exit;
6182 	}
6183 
6184 	if (arvif->u.sta.uapsd)
6185 		value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6186 	else
6187 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6188 
6189 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6190 					  WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6191 					  value);
6192 	if (ret)
6193 		ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6194 
6195 	ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6196 	if (ret) {
6197 		ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6198 			    arvif->vdev_id, ret);
6199 		return ret;
6200 	}
6201 
6202 	ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6203 	if (ret) {
6204 		ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6205 			    arvif->vdev_id, ret);
6206 		return ret;
6207 	}
6208 
6209 	if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6210 	    test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6211 		/* Only userspace can make an educated decision when to send
6212 		 * trigger frame. The following effectively disables u-UAPSD
6213 		 * autotrigger in firmware (which is enabled by default
6214 		 * provided the autotrigger service is available).
6215 		 */
6216 
6217 		arg.wmm_ac = acc;
6218 		arg.user_priority = prio;
6219 		arg.service_interval = 0;
6220 		arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6221 		arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6222 
6223 		ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6224 						arvif->bssid, &arg, 1);
6225 		if (ret) {
6226 			ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6227 				    ret);
6228 			return ret;
6229 		}
6230 	}
6231 
6232 exit:
6233 	return ret;
6234 }
6235 
6236 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6237 			  struct ieee80211_vif *vif, u16 ac,
6238 			  const struct ieee80211_tx_queue_params *params)
6239 {
6240 	struct ath10k *ar = hw->priv;
6241 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6242 	struct wmi_wmm_params_arg *p = NULL;
6243 	int ret;
6244 
6245 	mutex_lock(&ar->conf_mutex);
6246 
6247 	switch (ac) {
6248 	case IEEE80211_AC_VO:
6249 		p = &arvif->wmm_params.ac_vo;
6250 		break;
6251 	case IEEE80211_AC_VI:
6252 		p = &arvif->wmm_params.ac_vi;
6253 		break;
6254 	case IEEE80211_AC_BE:
6255 		p = &arvif->wmm_params.ac_be;
6256 		break;
6257 	case IEEE80211_AC_BK:
6258 		p = &arvif->wmm_params.ac_bk;
6259 		break;
6260 	}
6261 
6262 	if (WARN_ON(!p)) {
6263 		ret = -EINVAL;
6264 		goto exit;
6265 	}
6266 
6267 	p->cwmin = params->cw_min;
6268 	p->cwmax = params->cw_max;
6269 	p->aifs = params->aifs;
6270 
6271 	/*
6272 	 * The channel time duration programmed in the HW is in absolute
6273 	 * microseconds, while mac80211 gives the txop in units of
6274 	 * 32 microseconds.
6275 	 */
6276 	p->txop = params->txop * 32;
6277 
6278 	if (ar->wmi.ops->gen_vdev_wmm_conf) {
6279 		ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6280 					       &arvif->wmm_params);
6281 		if (ret) {
6282 			ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6283 				    arvif->vdev_id, ret);
6284 			goto exit;
6285 		}
6286 	} else {
6287 		/* This won't work well with multi-interface cases but it's
6288 		 * better than nothing.
6289 		 */
6290 		ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6291 		if (ret) {
6292 			ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6293 			goto exit;
6294 		}
6295 	}
6296 
6297 	ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6298 	if (ret)
6299 		ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6300 
6301 exit:
6302 	mutex_unlock(&ar->conf_mutex);
6303 	return ret;
6304 }
6305 
6306 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6307 
6308 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6309 				    struct ieee80211_vif *vif,
6310 				    struct ieee80211_channel *chan,
6311 				    int duration,
6312 				    enum ieee80211_roc_type type)
6313 {
6314 	struct ath10k *ar = hw->priv;
6315 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6316 	struct wmi_start_scan_arg arg;
6317 	int ret = 0;
6318 	u32 scan_time_msec;
6319 
6320 	mutex_lock(&ar->conf_mutex);
6321 
6322 	spin_lock_bh(&ar->data_lock);
6323 	switch (ar->scan.state) {
6324 	case ATH10K_SCAN_IDLE:
6325 		reinit_completion(&ar->scan.started);
6326 		reinit_completion(&ar->scan.completed);
6327 		reinit_completion(&ar->scan.on_channel);
6328 		ar->scan.state = ATH10K_SCAN_STARTING;
6329 		ar->scan.is_roc = true;
6330 		ar->scan.vdev_id = arvif->vdev_id;
6331 		ar->scan.roc_freq = chan->center_freq;
6332 		ar->scan.roc_notify = true;
6333 		ret = 0;
6334 		break;
6335 	case ATH10K_SCAN_STARTING:
6336 	case ATH10K_SCAN_RUNNING:
6337 	case ATH10K_SCAN_ABORTING:
6338 		ret = -EBUSY;
6339 		break;
6340 	}
6341 	spin_unlock_bh(&ar->data_lock);
6342 
6343 	if (ret)
6344 		goto exit;
6345 
6346 	scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6347 
6348 	memset(&arg, 0, sizeof(arg));
6349 	ath10k_wmi_start_scan_init(ar, &arg);
6350 	arg.vdev_id = arvif->vdev_id;
6351 	arg.scan_id = ATH10K_SCAN_ID;
6352 	arg.n_channels = 1;
6353 	arg.channels[0] = chan->center_freq;
6354 	arg.dwell_time_active = scan_time_msec;
6355 	arg.dwell_time_passive = scan_time_msec;
6356 	arg.max_scan_time = scan_time_msec;
6357 	arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6358 	arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6359 	arg.burst_duration_ms = duration;
6360 
6361 	ret = ath10k_start_scan(ar, &arg);
6362 	if (ret) {
6363 		ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6364 		spin_lock_bh(&ar->data_lock);
6365 		ar->scan.state = ATH10K_SCAN_IDLE;
6366 		spin_unlock_bh(&ar->data_lock);
6367 		goto exit;
6368 	}
6369 
6370 	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6371 	if (ret == 0) {
6372 		ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6373 
6374 		ret = ath10k_scan_stop(ar);
6375 		if (ret)
6376 			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6377 
6378 		ret = -ETIMEDOUT;
6379 		goto exit;
6380 	}
6381 
6382 	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6383 				     msecs_to_jiffies(duration));
6384 
6385 	ret = 0;
6386 exit:
6387 	mutex_unlock(&ar->conf_mutex);
6388 	return ret;
6389 }
6390 
6391 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6392 {
6393 	struct ath10k *ar = hw->priv;
6394 
6395 	mutex_lock(&ar->conf_mutex);
6396 
6397 	spin_lock_bh(&ar->data_lock);
6398 	ar->scan.roc_notify = false;
6399 	spin_unlock_bh(&ar->data_lock);
6400 
6401 	ath10k_scan_abort(ar);
6402 
6403 	mutex_unlock(&ar->conf_mutex);
6404 
6405 	cancel_delayed_work_sync(&ar->scan.timeout);
6406 
6407 	return 0;
6408 }
6409 
6410 /*
6411  * Both RTS and Fragmentation threshold are interface-specific
6412  * in ath10k, but device-specific in mac80211.
6413  */
6414 
6415 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6416 {
6417 	struct ath10k *ar = hw->priv;
6418 	struct ath10k_vif *arvif;
6419 	int ret = 0;
6420 
6421 	mutex_lock(&ar->conf_mutex);
6422 	list_for_each_entry(arvif, &ar->arvifs, list) {
6423 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6424 			   arvif->vdev_id, value);
6425 
6426 		ret = ath10k_mac_set_rts(arvif, value);
6427 		if (ret) {
6428 			ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6429 				    arvif->vdev_id, ret);
6430 			break;
6431 		}
6432 	}
6433 	mutex_unlock(&ar->conf_mutex);
6434 
6435 	return ret;
6436 }
6437 
6438 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6439 {
6440 	/* Even though there's a WMI enum for fragmentation threshold no known
6441 	 * firmware actually implements it. Moreover it is not possible to rely
6442 	 * frame fragmentation to mac80211 because firmware clears the "more
6443 	 * fragments" bit in frame control making it impossible for remote
6444 	 * devices to reassemble frames.
6445 	 *
6446 	 * Hence implement a dummy callback just to say fragmentation isn't
6447 	 * supported. This effectively prevents mac80211 from doing frame
6448 	 * fragmentation in software.
6449 	 */
6450 	return -EOPNOTSUPP;
6451 }
6452 
6453 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6454 			 u32 queues, bool drop)
6455 {
6456 	struct ath10k *ar = hw->priv;
6457 	bool skip;
6458 	long time_left;
6459 
6460 	/* mac80211 doesn't care if we really xmit queued frames or not
6461 	 * we'll collect those frames either way if we stop/delete vdevs */
6462 	if (drop)
6463 		return;
6464 
6465 	mutex_lock(&ar->conf_mutex);
6466 
6467 	if (ar->state == ATH10K_STATE_WEDGED)
6468 		goto skip;
6469 
6470 	time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6471 			bool empty;
6472 
6473 			spin_lock_bh(&ar->htt.tx_lock);
6474 			empty = (ar->htt.num_pending_tx == 0);
6475 			spin_unlock_bh(&ar->htt.tx_lock);
6476 
6477 			skip = (ar->state == ATH10K_STATE_WEDGED) ||
6478 			       test_bit(ATH10K_FLAG_CRASH_FLUSH,
6479 					&ar->dev_flags);
6480 
6481 			(empty || skip);
6482 		}), ATH10K_FLUSH_TIMEOUT_HZ);
6483 
6484 	if (time_left == 0 || skip)
6485 		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6486 			    skip, ar->state, time_left);
6487 
6488 skip:
6489 	mutex_unlock(&ar->conf_mutex);
6490 }
6491 
6492 /* TODO: Implement this function properly
6493  * For now it is needed to reply to Probe Requests in IBSS mode.
6494  * Propably we need this information from FW.
6495  */
6496 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6497 {
6498 	return 1;
6499 }
6500 
6501 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6502 				     enum ieee80211_reconfig_type reconfig_type)
6503 {
6504 	struct ath10k *ar = hw->priv;
6505 
6506 	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6507 		return;
6508 
6509 	mutex_lock(&ar->conf_mutex);
6510 
6511 	/* If device failed to restart it will be in a different state, e.g.
6512 	 * ATH10K_STATE_WEDGED */
6513 	if (ar->state == ATH10K_STATE_RESTARTED) {
6514 		ath10k_info(ar, "device successfully recovered\n");
6515 		ar->state = ATH10K_STATE_ON;
6516 		ieee80211_wake_queues(ar->hw);
6517 	}
6518 
6519 	mutex_unlock(&ar->conf_mutex);
6520 }
6521 
6522 static void
6523 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6524 				  struct ieee80211_channel *channel)
6525 {
6526 	int ret;
6527 	enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6528 
6529 	lockdep_assert_held(&ar->conf_mutex);
6530 
6531 	if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6532 	    (ar->rx_channel != channel))
6533 		return;
6534 
6535 	if (ar->scan.state != ATH10K_SCAN_IDLE) {
6536 		ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6537 		return;
6538 	}
6539 
6540 	reinit_completion(&ar->bss_survey_done);
6541 
6542 	ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6543 	if (ret) {
6544 		ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6545 		return;
6546 	}
6547 
6548 	ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6549 	if (!ret) {
6550 		ath10k_warn(ar, "bss channel survey timed out\n");
6551 		return;
6552 	}
6553 }
6554 
6555 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6556 			     struct survey_info *survey)
6557 {
6558 	struct ath10k *ar = hw->priv;
6559 	struct ieee80211_supported_band *sband;
6560 	struct survey_info *ar_survey = &ar->survey[idx];
6561 	int ret = 0;
6562 
6563 	mutex_lock(&ar->conf_mutex);
6564 
6565 	sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6566 	if (sband && idx >= sband->n_channels) {
6567 		idx -= sband->n_channels;
6568 		sband = NULL;
6569 	}
6570 
6571 	if (!sband)
6572 		sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6573 
6574 	if (!sband || idx >= sband->n_channels) {
6575 		ret = -ENOENT;
6576 		goto exit;
6577 	}
6578 
6579 	ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
6580 
6581 	spin_lock_bh(&ar->data_lock);
6582 	memcpy(survey, ar_survey, sizeof(*survey));
6583 	spin_unlock_bh(&ar->data_lock);
6584 
6585 	survey->channel = &sband->channels[idx];
6586 
6587 	if (ar->rx_channel == survey->channel)
6588 		survey->filled |= SURVEY_INFO_IN_USE;
6589 
6590 exit:
6591 	mutex_unlock(&ar->conf_mutex);
6592 	return ret;
6593 }
6594 
6595 static bool
6596 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6597 					enum nl80211_band band,
6598 					const struct cfg80211_bitrate_mask *mask)
6599 {
6600 	int num_rates = 0;
6601 	int i;
6602 
6603 	num_rates += hweight32(mask->control[band].legacy);
6604 
6605 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6606 		num_rates += hweight8(mask->control[band].ht_mcs[i]);
6607 
6608 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6609 		num_rates += hweight16(mask->control[band].vht_mcs[i]);
6610 
6611 	return num_rates == 1;
6612 }
6613 
6614 static bool
6615 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6616 				       enum nl80211_band band,
6617 				       const struct cfg80211_bitrate_mask *mask,
6618 				       int *nss)
6619 {
6620 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6621 	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6622 	u8 ht_nss_mask = 0;
6623 	u8 vht_nss_mask = 0;
6624 	int i;
6625 
6626 	if (mask->control[band].legacy)
6627 		return false;
6628 
6629 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6630 		if (mask->control[band].ht_mcs[i] == 0)
6631 			continue;
6632 		else if (mask->control[band].ht_mcs[i] ==
6633 			 sband->ht_cap.mcs.rx_mask[i])
6634 			ht_nss_mask |= BIT(i);
6635 		else
6636 			return false;
6637 	}
6638 
6639 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6640 		if (mask->control[band].vht_mcs[i] == 0)
6641 			continue;
6642 		else if (mask->control[band].vht_mcs[i] ==
6643 			 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6644 			vht_nss_mask |= BIT(i);
6645 		else
6646 			return false;
6647 	}
6648 
6649 	if (ht_nss_mask != vht_nss_mask)
6650 		return false;
6651 
6652 	if (ht_nss_mask == 0)
6653 		return false;
6654 
6655 	if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6656 		return false;
6657 
6658 	*nss = fls(ht_nss_mask);
6659 
6660 	return true;
6661 }
6662 
6663 static int
6664 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6665 					enum nl80211_band band,
6666 					const struct cfg80211_bitrate_mask *mask,
6667 					u8 *rate, u8 *nss)
6668 {
6669 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6670 	int rate_idx;
6671 	int i;
6672 	u16 bitrate;
6673 	u8 preamble;
6674 	u8 hw_rate;
6675 
6676 	if (hweight32(mask->control[band].legacy) == 1) {
6677 		rate_idx = ffs(mask->control[band].legacy) - 1;
6678 
6679 		hw_rate = sband->bitrates[rate_idx].hw_value;
6680 		bitrate = sband->bitrates[rate_idx].bitrate;
6681 
6682 		if (ath10k_mac_bitrate_is_cck(bitrate))
6683 			preamble = WMI_RATE_PREAMBLE_CCK;
6684 		else
6685 			preamble = WMI_RATE_PREAMBLE_OFDM;
6686 
6687 		*nss = 1;
6688 		*rate = preamble << 6 |
6689 			(*nss - 1) << 4 |
6690 			hw_rate << 0;
6691 
6692 		return 0;
6693 	}
6694 
6695 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6696 		if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6697 			*nss = i + 1;
6698 			*rate = WMI_RATE_PREAMBLE_HT << 6 |
6699 				(*nss - 1) << 4 |
6700 				(ffs(mask->control[band].ht_mcs[i]) - 1);
6701 
6702 			return 0;
6703 		}
6704 	}
6705 
6706 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6707 		if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6708 			*nss = i + 1;
6709 			*rate = WMI_RATE_PREAMBLE_VHT << 6 |
6710 				(*nss - 1) << 4 |
6711 				(ffs(mask->control[band].vht_mcs[i]) - 1);
6712 
6713 			return 0;
6714 		}
6715 	}
6716 
6717 	return -EINVAL;
6718 }
6719 
6720 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6721 					    u8 rate, u8 nss, u8 sgi, u8 ldpc)
6722 {
6723 	struct ath10k *ar = arvif->ar;
6724 	u32 vdev_param;
6725 	int ret;
6726 
6727 	lockdep_assert_held(&ar->conf_mutex);
6728 
6729 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6730 		   arvif->vdev_id, rate, nss, sgi);
6731 
6732 	vdev_param = ar->wmi.vdev_param->fixed_rate;
6733 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6734 	if (ret) {
6735 		ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6736 			    rate, ret);
6737 		return ret;
6738 	}
6739 
6740 	vdev_param = ar->wmi.vdev_param->nss;
6741 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6742 	if (ret) {
6743 		ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6744 		return ret;
6745 	}
6746 
6747 	vdev_param = ar->wmi.vdev_param->sgi;
6748 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6749 	if (ret) {
6750 		ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6751 		return ret;
6752 	}
6753 
6754 	vdev_param = ar->wmi.vdev_param->ldpc;
6755 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6756 	if (ret) {
6757 		ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6758 		return ret;
6759 	}
6760 
6761 	return 0;
6762 }
6763 
6764 static bool
6765 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6766 				enum nl80211_band band,
6767 				const struct cfg80211_bitrate_mask *mask)
6768 {
6769 	int i;
6770 	u16 vht_mcs;
6771 
6772 	/* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6773 	 * to express all VHT MCS rate masks. Effectively only the following
6774 	 * ranges can be used: none, 0-7, 0-8 and 0-9.
6775 	 */
6776 	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6777 		vht_mcs = mask->control[band].vht_mcs[i];
6778 
6779 		switch (vht_mcs) {
6780 		case 0:
6781 		case BIT(8) - 1:
6782 		case BIT(9) - 1:
6783 		case BIT(10) - 1:
6784 			break;
6785 		default:
6786 			ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6787 			return false;
6788 		}
6789 	}
6790 
6791 	return true;
6792 }
6793 
6794 static void ath10k_mac_set_bitrate_mask_iter(void *data,
6795 					     struct ieee80211_sta *sta)
6796 {
6797 	struct ath10k_vif *arvif = data;
6798 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6799 	struct ath10k *ar = arvif->ar;
6800 
6801 	if (arsta->arvif != arvif)
6802 		return;
6803 
6804 	spin_lock_bh(&ar->data_lock);
6805 	arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
6806 	spin_unlock_bh(&ar->data_lock);
6807 
6808 	ieee80211_queue_work(ar->hw, &arsta->update_wk);
6809 }
6810 
6811 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6812 					  struct ieee80211_vif *vif,
6813 					  const struct cfg80211_bitrate_mask *mask)
6814 {
6815 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6816 	struct cfg80211_chan_def def;
6817 	struct ath10k *ar = arvif->ar;
6818 	enum nl80211_band band;
6819 	const u8 *ht_mcs_mask;
6820 	const u16 *vht_mcs_mask;
6821 	u8 rate;
6822 	u8 nss;
6823 	u8 sgi;
6824 	u8 ldpc;
6825 	int single_nss;
6826 	int ret;
6827 
6828 	if (ath10k_mac_vif_chan(vif, &def))
6829 		return -EPERM;
6830 
6831 	band = def.chan->band;
6832 	ht_mcs_mask = mask->control[band].ht_mcs;
6833 	vht_mcs_mask = mask->control[band].vht_mcs;
6834 	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
6835 
6836 	sgi = mask->control[band].gi;
6837 	if (sgi == NL80211_TXRATE_FORCE_LGI)
6838 		return -EINVAL;
6839 
6840 	if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
6841 		ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
6842 							      &rate, &nss);
6843 		if (ret) {
6844 			ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
6845 				    arvif->vdev_id, ret);
6846 			return ret;
6847 		}
6848 	} else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
6849 							  &single_nss)) {
6850 		rate = WMI_FIXED_RATE_NONE;
6851 		nss = single_nss;
6852 	} else {
6853 		rate = WMI_FIXED_RATE_NONE;
6854 		nss = min(ar->num_rf_chains,
6855 			  max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6856 			      ath10k_mac_max_vht_nss(vht_mcs_mask)));
6857 
6858 		if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
6859 			return -EINVAL;
6860 
6861 		mutex_lock(&ar->conf_mutex);
6862 
6863 		arvif->bitrate_mask = *mask;
6864 		ieee80211_iterate_stations_atomic(ar->hw,
6865 						  ath10k_mac_set_bitrate_mask_iter,
6866 						  arvif);
6867 
6868 		mutex_unlock(&ar->conf_mutex);
6869 	}
6870 
6871 	mutex_lock(&ar->conf_mutex);
6872 
6873 	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
6874 	if (ret) {
6875 		ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
6876 			    arvif->vdev_id, ret);
6877 		goto exit;
6878 	}
6879 
6880 exit:
6881 	mutex_unlock(&ar->conf_mutex);
6882 
6883 	return ret;
6884 }
6885 
6886 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6887 				 struct ieee80211_vif *vif,
6888 				 struct ieee80211_sta *sta,
6889 				 u32 changed)
6890 {
6891 	struct ath10k *ar = hw->priv;
6892 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6893 	u32 bw, smps;
6894 
6895 	spin_lock_bh(&ar->data_lock);
6896 
6897 	ath10k_dbg(ar, ATH10K_DBG_MAC,
6898 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6899 		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
6900 		   sta->smps_mode);
6901 
6902 	if (changed & IEEE80211_RC_BW_CHANGED) {
6903 		bw = WMI_PEER_CHWIDTH_20MHZ;
6904 
6905 		switch (sta->bandwidth) {
6906 		case IEEE80211_STA_RX_BW_20:
6907 			bw = WMI_PEER_CHWIDTH_20MHZ;
6908 			break;
6909 		case IEEE80211_STA_RX_BW_40:
6910 			bw = WMI_PEER_CHWIDTH_40MHZ;
6911 			break;
6912 		case IEEE80211_STA_RX_BW_80:
6913 			bw = WMI_PEER_CHWIDTH_80MHZ;
6914 			break;
6915 		case IEEE80211_STA_RX_BW_160:
6916 			ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
6917 				    sta->bandwidth, sta->addr);
6918 			bw = WMI_PEER_CHWIDTH_20MHZ;
6919 			break;
6920 		}
6921 
6922 		arsta->bw = bw;
6923 	}
6924 
6925 	if (changed & IEEE80211_RC_NSS_CHANGED)
6926 		arsta->nss = sta->rx_nss;
6927 
6928 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
6929 		smps = WMI_PEER_SMPS_PS_NONE;
6930 
6931 		switch (sta->smps_mode) {
6932 		case IEEE80211_SMPS_AUTOMATIC:
6933 		case IEEE80211_SMPS_OFF:
6934 			smps = WMI_PEER_SMPS_PS_NONE;
6935 			break;
6936 		case IEEE80211_SMPS_STATIC:
6937 			smps = WMI_PEER_SMPS_STATIC;
6938 			break;
6939 		case IEEE80211_SMPS_DYNAMIC:
6940 			smps = WMI_PEER_SMPS_DYNAMIC;
6941 			break;
6942 		case IEEE80211_SMPS_NUM_MODES:
6943 			ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
6944 				    sta->smps_mode, sta->addr);
6945 			smps = WMI_PEER_SMPS_PS_NONE;
6946 			break;
6947 		}
6948 
6949 		arsta->smps = smps;
6950 	}
6951 
6952 	arsta->changed |= changed;
6953 
6954 	spin_unlock_bh(&ar->data_lock);
6955 
6956 	ieee80211_queue_work(hw, &arsta->update_wk);
6957 }
6958 
6959 static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
6960 {
6961 	/*
6962 	 * FIXME: Return 0 for time being. Need to figure out whether FW
6963 	 * has the API to fetch 64-bit local TSF
6964 	 */
6965 
6966 	return 0;
6967 }
6968 
6969 static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6970 			   u64 tsf)
6971 {
6972 	struct ath10k *ar = hw->priv;
6973 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6974 	u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
6975 	int ret;
6976 
6977 	/* Workaround:
6978 	 *
6979 	 * Given tsf argument is entire TSF value, but firmware accepts
6980 	 * only TSF offset to current TSF.
6981 	 *
6982 	 * get_tsf function is used to get offset value, however since
6983 	 * ath10k_get_tsf is not implemented properly, it will return 0 always.
6984 	 * Luckily all the caller functions to set_tsf, as of now, also rely on
6985 	 * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
6986 	 * final tsf offset value to firmware will be arithmetically correct.
6987 	 */
6988 	tsf_offset = tsf - ath10k_get_tsf(hw, vif);
6989 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
6990 					vdev_param, tsf_offset);
6991 	if (ret && ret != -EOPNOTSUPP)
6992 		ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
6993 }
6994 
6995 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
6996 			       struct ieee80211_vif *vif,
6997 			       struct ieee80211_ampdu_params *params)
6998 {
6999 	struct ath10k *ar = hw->priv;
7000 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7001 	struct ieee80211_sta *sta = params->sta;
7002 	enum ieee80211_ampdu_mlme_action action = params->action;
7003 	u16 tid = params->tid;
7004 
7005 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
7006 		   arvif->vdev_id, sta->addr, tid, action);
7007 
7008 	switch (action) {
7009 	case IEEE80211_AMPDU_RX_START:
7010 	case IEEE80211_AMPDU_RX_STOP:
7011 		/* HTT AddBa/DelBa events trigger mac80211 Rx BA session
7012 		 * creation/removal. Do we need to verify this?
7013 		 */
7014 		return 0;
7015 	case IEEE80211_AMPDU_TX_START:
7016 	case IEEE80211_AMPDU_TX_STOP_CONT:
7017 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
7018 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
7019 	case IEEE80211_AMPDU_TX_OPERATIONAL:
7020 		/* Firmware offloads Tx aggregation entirely so deny mac80211
7021 		 * Tx aggregation requests.
7022 		 */
7023 		return -EOPNOTSUPP;
7024 	}
7025 
7026 	return -EINVAL;
7027 }
7028 
7029 static void
7030 ath10k_mac_update_rx_channel(struct ath10k *ar,
7031 			     struct ieee80211_chanctx_conf *ctx,
7032 			     struct ieee80211_vif_chanctx_switch *vifs,
7033 			     int n_vifs)
7034 {
7035 	struct cfg80211_chan_def *def = NULL;
7036 
7037 	/* Both locks are required because ar->rx_channel is modified. This
7038 	 * allows readers to hold either lock.
7039 	 */
7040 	lockdep_assert_held(&ar->conf_mutex);
7041 	lockdep_assert_held(&ar->data_lock);
7042 
7043 	WARN_ON(ctx && vifs);
7044 	WARN_ON(vifs && n_vifs != 1);
7045 
7046 	/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
7047 	 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
7048 	 * ppdu on Rx may reduce performance on low-end systems. It should be
7049 	 * possible to make tables/hashmaps to speed the lookup up (be vary of
7050 	 * cpu data cache lines though regarding sizes) but to keep the initial
7051 	 * implementation simple and less intrusive fallback to the slow lookup
7052 	 * only for multi-channel cases. Single-channel cases will remain to
7053 	 * use the old channel derival and thus performance should not be
7054 	 * affected much.
7055 	 */
7056 	rcu_read_lock();
7057 	if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
7058 		ieee80211_iter_chan_contexts_atomic(ar->hw,
7059 						    ath10k_mac_get_any_chandef_iter,
7060 						    &def);
7061 
7062 		if (vifs)
7063 			def = &vifs[0].new_ctx->def;
7064 
7065 		ar->rx_channel = def->chan;
7066 	} else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
7067 		   (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
7068 		/* During driver restart due to firmware assert, since mac80211
7069 		 * already has valid channel context for given radio, channel
7070 		 * context iteration return num_chanctx > 0. So fix rx_channel
7071 		 * when restart is in progress.
7072 		 */
7073 		ar->rx_channel = ctx->def.chan;
7074 	} else {
7075 		ar->rx_channel = NULL;
7076 	}
7077 	rcu_read_unlock();
7078 }
7079 
7080 static void
7081 ath10k_mac_update_vif_chan(struct ath10k *ar,
7082 			   struct ieee80211_vif_chanctx_switch *vifs,
7083 			   int n_vifs)
7084 {
7085 	struct ath10k_vif *arvif;
7086 	int ret;
7087 	int i;
7088 
7089 	lockdep_assert_held(&ar->conf_mutex);
7090 
7091 	/* First stop monitor interface. Some FW versions crash if there's a
7092 	 * lone monitor interface.
7093 	 */
7094 	if (ar->monitor_started)
7095 		ath10k_monitor_stop(ar);
7096 
7097 	for (i = 0; i < n_vifs; i++) {
7098 		arvif = ath10k_vif_to_arvif(vifs[i].vif);
7099 
7100 		ath10k_dbg(ar, ATH10K_DBG_MAC,
7101 			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
7102 			   arvif->vdev_id,
7103 			   vifs[i].old_ctx->def.chan->center_freq,
7104 			   vifs[i].new_ctx->def.chan->center_freq,
7105 			   vifs[i].old_ctx->def.width,
7106 			   vifs[i].new_ctx->def.width);
7107 
7108 		if (WARN_ON(!arvif->is_started))
7109 			continue;
7110 
7111 		if (WARN_ON(!arvif->is_up))
7112 			continue;
7113 
7114 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7115 		if (ret) {
7116 			ath10k_warn(ar, "failed to down vdev %d: %d\n",
7117 				    arvif->vdev_id, ret);
7118 			continue;
7119 		}
7120 	}
7121 
7122 	/* All relevant vdevs are downed and associated channel resources
7123 	 * should be available for the channel switch now.
7124 	 */
7125 
7126 	spin_lock_bh(&ar->data_lock);
7127 	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7128 	spin_unlock_bh(&ar->data_lock);
7129 
7130 	for (i = 0; i < n_vifs; i++) {
7131 		arvif = ath10k_vif_to_arvif(vifs[i].vif);
7132 
7133 		if (WARN_ON(!arvif->is_started))
7134 			continue;
7135 
7136 		if (WARN_ON(!arvif->is_up))
7137 			continue;
7138 
7139 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
7140 		if (ret)
7141 			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7142 				    ret);
7143 
7144 		ret = ath10k_mac_setup_prb_tmpl(arvif);
7145 		if (ret)
7146 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7147 				    ret);
7148 
7149 		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7150 		if (ret) {
7151 			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7152 				    arvif->vdev_id, ret);
7153 			continue;
7154 		}
7155 
7156 		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7157 					 arvif->bssid);
7158 		if (ret) {
7159 			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7160 				    arvif->vdev_id, ret);
7161 			continue;
7162 		}
7163 	}
7164 
7165 	ath10k_monitor_recalc(ar);
7166 }
7167 
7168 static int
7169 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7170 			  struct ieee80211_chanctx_conf *ctx)
7171 {
7172 	struct ath10k *ar = hw->priv;
7173 
7174 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7175 		   "mac chanctx add freq %hu width %d ptr %pK\n",
7176 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7177 
7178 	mutex_lock(&ar->conf_mutex);
7179 
7180 	spin_lock_bh(&ar->data_lock);
7181 	ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7182 	spin_unlock_bh(&ar->data_lock);
7183 
7184 	ath10k_recalc_radar_detection(ar);
7185 	ath10k_monitor_recalc(ar);
7186 
7187 	mutex_unlock(&ar->conf_mutex);
7188 
7189 	return 0;
7190 }
7191 
7192 static void
7193 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7194 			     struct ieee80211_chanctx_conf *ctx)
7195 {
7196 	struct ath10k *ar = hw->priv;
7197 
7198 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7199 		   "mac chanctx remove freq %hu width %d ptr %pK\n",
7200 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7201 
7202 	mutex_lock(&ar->conf_mutex);
7203 
7204 	spin_lock_bh(&ar->data_lock);
7205 	ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7206 	spin_unlock_bh(&ar->data_lock);
7207 
7208 	ath10k_recalc_radar_detection(ar);
7209 	ath10k_monitor_recalc(ar);
7210 
7211 	mutex_unlock(&ar->conf_mutex);
7212 }
7213 
7214 struct ath10k_mac_change_chanctx_arg {
7215 	struct ieee80211_chanctx_conf *ctx;
7216 	struct ieee80211_vif_chanctx_switch *vifs;
7217 	int n_vifs;
7218 	int next_vif;
7219 };
7220 
7221 static void
7222 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7223 				   struct ieee80211_vif *vif)
7224 {
7225 	struct ath10k_mac_change_chanctx_arg *arg = data;
7226 
7227 	if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7228 		return;
7229 
7230 	arg->n_vifs++;
7231 }
7232 
7233 static void
7234 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7235 				    struct ieee80211_vif *vif)
7236 {
7237 	struct ath10k_mac_change_chanctx_arg *arg = data;
7238 	struct ieee80211_chanctx_conf *ctx;
7239 
7240 	ctx = rcu_access_pointer(vif->chanctx_conf);
7241 	if (ctx != arg->ctx)
7242 		return;
7243 
7244 	if (WARN_ON(arg->next_vif == arg->n_vifs))
7245 		return;
7246 
7247 	arg->vifs[arg->next_vif].vif = vif;
7248 	arg->vifs[arg->next_vif].old_ctx = ctx;
7249 	arg->vifs[arg->next_vif].new_ctx = ctx;
7250 	arg->next_vif++;
7251 }
7252 
7253 static void
7254 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7255 			     struct ieee80211_chanctx_conf *ctx,
7256 			     u32 changed)
7257 {
7258 	struct ath10k *ar = hw->priv;
7259 	struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7260 
7261 	mutex_lock(&ar->conf_mutex);
7262 
7263 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7264 		   "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
7265 		   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7266 
7267 	/* This shouldn't really happen because channel switching should use
7268 	 * switch_vif_chanctx().
7269 	 */
7270 	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7271 		goto unlock;
7272 
7273 	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7274 		ieee80211_iterate_active_interfaces_atomic(
7275 					hw,
7276 					IEEE80211_IFACE_ITER_NORMAL,
7277 					ath10k_mac_change_chanctx_cnt_iter,
7278 					&arg);
7279 		if (arg.n_vifs == 0)
7280 			goto radar;
7281 
7282 		arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7283 				   GFP_KERNEL);
7284 		if (!arg.vifs)
7285 			goto radar;
7286 
7287 		ieee80211_iterate_active_interfaces_atomic(
7288 					hw,
7289 					IEEE80211_IFACE_ITER_NORMAL,
7290 					ath10k_mac_change_chanctx_fill_iter,
7291 					&arg);
7292 		ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7293 		kfree(arg.vifs);
7294 	}
7295 
7296 radar:
7297 	ath10k_recalc_radar_detection(ar);
7298 
7299 	/* FIXME: How to configure Rx chains properly? */
7300 
7301 	/* No other actions are actually necessary. Firmware maintains channel
7302 	 * definitions per vdev internally and there's no host-side channel
7303 	 * context abstraction to configure, e.g. channel width.
7304 	 */
7305 
7306 unlock:
7307 	mutex_unlock(&ar->conf_mutex);
7308 }
7309 
7310 static int
7311 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7312 				 struct ieee80211_vif *vif,
7313 				 struct ieee80211_chanctx_conf *ctx)
7314 {
7315 	struct ath10k *ar = hw->priv;
7316 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7317 	int ret;
7318 
7319 	mutex_lock(&ar->conf_mutex);
7320 
7321 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7322 		   "mac chanctx assign ptr %pK vdev_id %i\n",
7323 		   ctx, arvif->vdev_id);
7324 
7325 	if (WARN_ON(arvif->is_started)) {
7326 		mutex_unlock(&ar->conf_mutex);
7327 		return -EBUSY;
7328 	}
7329 
7330 	ret = ath10k_vdev_start(arvif, &ctx->def);
7331 	if (ret) {
7332 		ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7333 			    arvif->vdev_id, vif->addr,
7334 			    ctx->def.chan->center_freq, ret);
7335 		goto err;
7336 	}
7337 
7338 	arvif->is_started = true;
7339 
7340 	ret = ath10k_mac_vif_setup_ps(arvif);
7341 	if (ret) {
7342 		ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7343 			    arvif->vdev_id, ret);
7344 		goto err_stop;
7345 	}
7346 
7347 	if (vif->type == NL80211_IFTYPE_MONITOR) {
7348 		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7349 		if (ret) {
7350 			ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7351 				    arvif->vdev_id, ret);
7352 			goto err_stop;
7353 		}
7354 
7355 		arvif->is_up = true;
7356 	}
7357 
7358 	mutex_unlock(&ar->conf_mutex);
7359 	return 0;
7360 
7361 err_stop:
7362 	ath10k_vdev_stop(arvif);
7363 	arvif->is_started = false;
7364 	ath10k_mac_vif_setup_ps(arvif);
7365 
7366 err:
7367 	mutex_unlock(&ar->conf_mutex);
7368 	return ret;
7369 }
7370 
7371 static void
7372 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7373 				   struct ieee80211_vif *vif,
7374 				   struct ieee80211_chanctx_conf *ctx)
7375 {
7376 	struct ath10k *ar = hw->priv;
7377 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7378 	int ret;
7379 
7380 	mutex_lock(&ar->conf_mutex);
7381 
7382 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7383 		   "mac chanctx unassign ptr %pK vdev_id %i\n",
7384 		   ctx, arvif->vdev_id);
7385 
7386 	WARN_ON(!arvif->is_started);
7387 
7388 	if (vif->type == NL80211_IFTYPE_MONITOR) {
7389 		WARN_ON(!arvif->is_up);
7390 
7391 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7392 		if (ret)
7393 			ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7394 				    arvif->vdev_id, ret);
7395 
7396 		arvif->is_up = false;
7397 	}
7398 
7399 	ret = ath10k_vdev_stop(arvif);
7400 	if (ret)
7401 		ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7402 			    arvif->vdev_id, ret);
7403 
7404 	arvif->is_started = false;
7405 
7406 	mutex_unlock(&ar->conf_mutex);
7407 }
7408 
7409 static int
7410 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7411 				 struct ieee80211_vif_chanctx_switch *vifs,
7412 				 int n_vifs,
7413 				 enum ieee80211_chanctx_switch_mode mode)
7414 {
7415 	struct ath10k *ar = hw->priv;
7416 
7417 	mutex_lock(&ar->conf_mutex);
7418 
7419 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7420 		   "mac chanctx switch n_vifs %d mode %d\n",
7421 		   n_vifs, mode);
7422 	ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7423 
7424 	mutex_unlock(&ar->conf_mutex);
7425 	return 0;
7426 }
7427 
7428 static const struct ieee80211_ops ath10k_ops = {
7429 	.tx				= ath10k_mac_op_tx,
7430 	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
7431 	.start				= ath10k_start,
7432 	.stop				= ath10k_stop,
7433 	.config				= ath10k_config,
7434 	.add_interface			= ath10k_add_interface,
7435 	.remove_interface		= ath10k_remove_interface,
7436 	.configure_filter		= ath10k_configure_filter,
7437 	.bss_info_changed		= ath10k_bss_info_changed,
7438 	.hw_scan			= ath10k_hw_scan,
7439 	.cancel_hw_scan			= ath10k_cancel_hw_scan,
7440 	.set_key			= ath10k_set_key,
7441 	.set_default_unicast_key        = ath10k_set_default_unicast_key,
7442 	.sta_state			= ath10k_sta_state,
7443 	.conf_tx			= ath10k_conf_tx,
7444 	.remain_on_channel		= ath10k_remain_on_channel,
7445 	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
7446 	.set_rts_threshold		= ath10k_set_rts_threshold,
7447 	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
7448 	.flush				= ath10k_flush,
7449 	.tx_last_beacon			= ath10k_tx_last_beacon,
7450 	.set_antenna			= ath10k_set_antenna,
7451 	.get_antenna			= ath10k_get_antenna,
7452 	.reconfig_complete		= ath10k_reconfig_complete,
7453 	.get_survey			= ath10k_get_survey,
7454 	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
7455 	.sta_rc_update			= ath10k_sta_rc_update,
7456 	.get_tsf			= ath10k_get_tsf,
7457 	.set_tsf			= ath10k_set_tsf,
7458 	.ampdu_action			= ath10k_ampdu_action,
7459 	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
7460 	.get_et_stats			= ath10k_debug_get_et_stats,
7461 	.get_et_strings			= ath10k_debug_get_et_strings,
7462 	.add_chanctx			= ath10k_mac_op_add_chanctx,
7463 	.remove_chanctx			= ath10k_mac_op_remove_chanctx,
7464 	.change_chanctx			= ath10k_mac_op_change_chanctx,
7465 	.assign_vif_chanctx		= ath10k_mac_op_assign_vif_chanctx,
7466 	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
7467 	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
7468 
7469 	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7470 
7471 #ifdef CONFIG_PM
7472 	.suspend			= ath10k_wow_op_suspend,
7473 	.resume				= ath10k_wow_op_resume,
7474 #endif
7475 #ifdef CONFIG_MAC80211_DEBUGFS
7476 	.sta_add_debugfs		= ath10k_sta_add_debugfs,
7477 	.sta_statistics			= ath10k_sta_statistics,
7478 #endif
7479 };
7480 
7481 #define CHAN2G(_channel, _freq, _flags) { \
7482 	.band			= NL80211_BAND_2GHZ, \
7483 	.hw_value		= (_channel), \
7484 	.center_freq		= (_freq), \
7485 	.flags			= (_flags), \
7486 	.max_antenna_gain	= 0, \
7487 	.max_power		= 30, \
7488 }
7489 
7490 #define CHAN5G(_channel, _freq, _flags) { \
7491 	.band			= NL80211_BAND_5GHZ, \
7492 	.hw_value		= (_channel), \
7493 	.center_freq		= (_freq), \
7494 	.flags			= (_flags), \
7495 	.max_antenna_gain	= 0, \
7496 	.max_power		= 30, \
7497 }
7498 
7499 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7500 	CHAN2G(1, 2412, 0),
7501 	CHAN2G(2, 2417, 0),
7502 	CHAN2G(3, 2422, 0),
7503 	CHAN2G(4, 2427, 0),
7504 	CHAN2G(5, 2432, 0),
7505 	CHAN2G(6, 2437, 0),
7506 	CHAN2G(7, 2442, 0),
7507 	CHAN2G(8, 2447, 0),
7508 	CHAN2G(9, 2452, 0),
7509 	CHAN2G(10, 2457, 0),
7510 	CHAN2G(11, 2462, 0),
7511 	CHAN2G(12, 2467, 0),
7512 	CHAN2G(13, 2472, 0),
7513 	CHAN2G(14, 2484, 0),
7514 };
7515 
7516 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7517 	CHAN5G(36, 5180, 0),
7518 	CHAN5G(40, 5200, 0),
7519 	CHAN5G(44, 5220, 0),
7520 	CHAN5G(48, 5240, 0),
7521 	CHAN5G(52, 5260, 0),
7522 	CHAN5G(56, 5280, 0),
7523 	CHAN5G(60, 5300, 0),
7524 	CHAN5G(64, 5320, 0),
7525 	CHAN5G(100, 5500, 0),
7526 	CHAN5G(104, 5520, 0),
7527 	CHAN5G(108, 5540, 0),
7528 	CHAN5G(112, 5560, 0),
7529 	CHAN5G(116, 5580, 0),
7530 	CHAN5G(120, 5600, 0),
7531 	CHAN5G(124, 5620, 0),
7532 	CHAN5G(128, 5640, 0),
7533 	CHAN5G(132, 5660, 0),
7534 	CHAN5G(136, 5680, 0),
7535 	CHAN5G(140, 5700, 0),
7536 	CHAN5G(144, 5720, 0),
7537 	CHAN5G(149, 5745, 0),
7538 	CHAN5G(153, 5765, 0),
7539 	CHAN5G(157, 5785, 0),
7540 	CHAN5G(161, 5805, 0),
7541 	CHAN5G(165, 5825, 0),
7542 };
7543 
7544 struct ath10k *ath10k_mac_create(size_t priv_size)
7545 {
7546 	struct ieee80211_hw *hw;
7547 	struct ieee80211_ops *ops;
7548 	struct ath10k *ar;
7549 
7550 	ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
7551 	if (!ops)
7552 		return NULL;
7553 
7554 	hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
7555 	if (!hw) {
7556 		kfree(ops);
7557 		return NULL;
7558 	}
7559 
7560 	ar = hw->priv;
7561 	ar->hw = hw;
7562 	ar->ops = ops;
7563 
7564 	return ar;
7565 }
7566 
7567 void ath10k_mac_destroy(struct ath10k *ar)
7568 {
7569 	struct ieee80211_ops *ops = ar->ops;
7570 
7571 	ieee80211_free_hw(ar->hw);
7572 	kfree(ops);
7573 }
7574 
7575 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7576 	{
7577 		.max	= 8,
7578 		.types	= BIT(NL80211_IFTYPE_STATION)
7579 			| BIT(NL80211_IFTYPE_P2P_CLIENT)
7580 	},
7581 	{
7582 		.max	= 3,
7583 		.types	= BIT(NL80211_IFTYPE_P2P_GO)
7584 	},
7585 	{
7586 		.max	= 1,
7587 		.types	= BIT(NL80211_IFTYPE_P2P_DEVICE)
7588 	},
7589 	{
7590 		.max	= 7,
7591 		.types	= BIT(NL80211_IFTYPE_AP)
7592 #ifdef CONFIG_MAC80211_MESH
7593 			| BIT(NL80211_IFTYPE_MESH_POINT)
7594 #endif
7595 	},
7596 };
7597 
7598 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7599 	{
7600 		.max	= 8,
7601 		.types	= BIT(NL80211_IFTYPE_AP)
7602 #ifdef CONFIG_MAC80211_MESH
7603 			| BIT(NL80211_IFTYPE_MESH_POINT)
7604 #endif
7605 	},
7606 	{
7607 		.max	= 1,
7608 		.types	= BIT(NL80211_IFTYPE_STATION)
7609 	},
7610 };
7611 
7612 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7613 	{
7614 		.limits = ath10k_if_limits,
7615 		.n_limits = ARRAY_SIZE(ath10k_if_limits),
7616 		.max_interfaces = 8,
7617 		.num_different_channels = 1,
7618 		.beacon_int_infra_match = true,
7619 	},
7620 };
7621 
7622 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7623 	{
7624 		.limits = ath10k_10x_if_limits,
7625 		.n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7626 		.max_interfaces = 8,
7627 		.num_different_channels = 1,
7628 		.beacon_int_infra_match = true,
7629 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7630 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7631 					BIT(NL80211_CHAN_WIDTH_20) |
7632 					BIT(NL80211_CHAN_WIDTH_40) |
7633 					BIT(NL80211_CHAN_WIDTH_80),
7634 #endif
7635 	},
7636 };
7637 
7638 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7639 	{
7640 		.max = 2,
7641 		.types = BIT(NL80211_IFTYPE_STATION),
7642 	},
7643 	{
7644 		.max = 2,
7645 		.types = BIT(NL80211_IFTYPE_AP) |
7646 #ifdef CONFIG_MAC80211_MESH
7647 			 BIT(NL80211_IFTYPE_MESH_POINT) |
7648 #endif
7649 			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7650 			 BIT(NL80211_IFTYPE_P2P_GO),
7651 	},
7652 	{
7653 		.max = 1,
7654 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7655 	},
7656 };
7657 
7658 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7659 	{
7660 		.max = 2,
7661 		.types = BIT(NL80211_IFTYPE_STATION),
7662 	},
7663 	{
7664 		.max = 2,
7665 		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7666 	},
7667 	{
7668 		.max = 1,
7669 		.types = BIT(NL80211_IFTYPE_AP) |
7670 #ifdef CONFIG_MAC80211_MESH
7671 			 BIT(NL80211_IFTYPE_MESH_POINT) |
7672 #endif
7673 			 BIT(NL80211_IFTYPE_P2P_GO),
7674 	},
7675 	{
7676 		.max = 1,
7677 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7678 	},
7679 };
7680 
7681 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7682 	{
7683 		.max = 1,
7684 		.types = BIT(NL80211_IFTYPE_STATION),
7685 	},
7686 	{
7687 		.max = 1,
7688 		.types = BIT(NL80211_IFTYPE_ADHOC),
7689 	},
7690 };
7691 
7692 /* FIXME: This is not thouroughly tested. These combinations may over- or
7693  * underestimate hw/fw capabilities.
7694  */
7695 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7696 	{
7697 		.limits = ath10k_tlv_if_limit,
7698 		.num_different_channels = 1,
7699 		.max_interfaces = 4,
7700 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7701 	},
7702 	{
7703 		.limits = ath10k_tlv_if_limit_ibss,
7704 		.num_different_channels = 1,
7705 		.max_interfaces = 2,
7706 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7707 	},
7708 };
7709 
7710 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7711 	{
7712 		.limits = ath10k_tlv_if_limit,
7713 		.num_different_channels = 1,
7714 		.max_interfaces = 4,
7715 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7716 	},
7717 	{
7718 		.limits = ath10k_tlv_qcs_if_limit,
7719 		.num_different_channels = 2,
7720 		.max_interfaces = 4,
7721 		.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7722 	},
7723 	{
7724 		.limits = ath10k_tlv_if_limit_ibss,
7725 		.num_different_channels = 1,
7726 		.max_interfaces = 2,
7727 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7728 	},
7729 };
7730 
7731 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7732 	{
7733 		.max = 1,
7734 		.types = BIT(NL80211_IFTYPE_STATION),
7735 	},
7736 	{
7737 		.max	= 16,
7738 		.types	= BIT(NL80211_IFTYPE_AP)
7739 #ifdef CONFIG_MAC80211_MESH
7740 			| BIT(NL80211_IFTYPE_MESH_POINT)
7741 #endif
7742 	},
7743 };
7744 
7745 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7746 	{
7747 		.limits = ath10k_10_4_if_limits,
7748 		.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7749 		.max_interfaces = 16,
7750 		.num_different_channels = 1,
7751 		.beacon_int_infra_match = true,
7752 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7753 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7754 					BIT(NL80211_CHAN_WIDTH_20) |
7755 					BIT(NL80211_CHAN_WIDTH_40) |
7756 					BIT(NL80211_CHAN_WIDTH_80),
7757 #endif
7758 	},
7759 };
7760 
7761 static void ath10k_get_arvif_iter(void *data, u8 *mac,
7762 				  struct ieee80211_vif *vif)
7763 {
7764 	struct ath10k_vif_iter *arvif_iter = data;
7765 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7766 
7767 	if (arvif->vdev_id == arvif_iter->vdev_id)
7768 		arvif_iter->arvif = arvif;
7769 }
7770 
7771 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7772 {
7773 	struct ath10k_vif_iter arvif_iter;
7774 	u32 flags;
7775 
7776 	memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7777 	arvif_iter.vdev_id = vdev_id;
7778 
7779 	flags = IEEE80211_IFACE_ITER_RESUME_ALL;
7780 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
7781 						   flags,
7782 						   ath10k_get_arvif_iter,
7783 						   &arvif_iter);
7784 	if (!arvif_iter.arvif) {
7785 		ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
7786 		return NULL;
7787 	}
7788 
7789 	return arvif_iter.arvif;
7790 }
7791 
7792 int ath10k_mac_register(struct ath10k *ar)
7793 {
7794 	static const u32 cipher_suites[] = {
7795 		WLAN_CIPHER_SUITE_WEP40,
7796 		WLAN_CIPHER_SUITE_WEP104,
7797 		WLAN_CIPHER_SUITE_TKIP,
7798 		WLAN_CIPHER_SUITE_CCMP,
7799 		WLAN_CIPHER_SUITE_AES_CMAC,
7800 	};
7801 	struct ieee80211_supported_band *band;
7802 	void *channels;
7803 	int ret;
7804 
7805 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
7806 
7807 	SET_IEEE80211_DEV(ar->hw, ar->dev);
7808 
7809 	BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
7810 		      ARRAY_SIZE(ath10k_5ghz_channels)) !=
7811 		     ATH10K_NUM_CHANS);
7812 
7813 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
7814 		channels = kmemdup(ath10k_2ghz_channels,
7815 				   sizeof(ath10k_2ghz_channels),
7816 				   GFP_KERNEL);
7817 		if (!channels) {
7818 			ret = -ENOMEM;
7819 			goto err_free;
7820 		}
7821 
7822 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7823 		band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7824 		band->channels = channels;
7825 
7826 		if (ar->hw_params.cck_rate_map_rev2) {
7827 			band->n_bitrates = ath10k_g_rates_rev2_size;
7828 			band->bitrates = ath10k_g_rates_rev2;
7829 		} else {
7830 			band->n_bitrates = ath10k_g_rates_size;
7831 			band->bitrates = ath10k_g_rates;
7832 		}
7833 
7834 		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7835 	}
7836 
7837 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
7838 		channels = kmemdup(ath10k_5ghz_channels,
7839 				   sizeof(ath10k_5ghz_channels),
7840 				   GFP_KERNEL);
7841 		if (!channels) {
7842 			ret = -ENOMEM;
7843 			goto err_free;
7844 		}
7845 
7846 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
7847 		band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7848 		band->channels = channels;
7849 		band->n_bitrates = ath10k_a_rates_size;
7850 		band->bitrates = ath10k_a_rates;
7851 		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
7852 	}
7853 
7854 	ath10k_mac_setup_ht_vht_cap(ar);
7855 
7856 	ar->hw->wiphy->interface_modes =
7857 		BIT(NL80211_IFTYPE_STATION) |
7858 		BIT(NL80211_IFTYPE_AP) |
7859 		BIT(NL80211_IFTYPE_MESH_POINT);
7860 
7861 	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
7862 	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
7863 
7864 	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
7865 		ar->hw->wiphy->interface_modes |=
7866 			BIT(NL80211_IFTYPE_P2P_DEVICE) |
7867 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
7868 			BIT(NL80211_IFTYPE_P2P_GO);
7869 
7870 	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
7871 	ieee80211_hw_set(ar->hw, SUPPORTS_PS);
7872 	ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
7873 	ieee80211_hw_set(ar->hw, MFP_CAPABLE);
7874 	ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
7875 	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
7876 	ieee80211_hw_set(ar->hw, AP_LINK_PS);
7877 	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
7878 	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
7879 	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
7880 	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
7881 	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
7882 	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
7883 	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
7884 
7885 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7886 		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
7887 
7888 	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
7889 	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
7890 
7891 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
7892 		ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
7893 
7894 	if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
7895 		ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
7896 		ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
7897 	}
7898 
7899 	ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
7900 	ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
7901 
7902 	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
7903 	ar->hw->sta_data_size = sizeof(struct ath10k_sta);
7904 	ar->hw->txq_data_size = sizeof(struct ath10k_txq);
7905 
7906 	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
7907 
7908 	if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
7909 		ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
7910 
7911 		/* Firmware delivers WPS/P2P Probe Requests frames to driver so
7912 		 * that userspace (e.g. wpa_supplicant/hostapd) can generate
7913 		 * correct Probe Responses. This is more of a hack advert..
7914 		 */
7915 		ar->hw->wiphy->probe_resp_offload |=
7916 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
7917 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
7918 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
7919 	}
7920 
7921 	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
7922 		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
7923 
7924 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
7925 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
7926 	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
7927 
7928 	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
7929 	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
7930 				   NL80211_FEATURE_AP_SCAN;
7931 
7932 	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
7933 
7934 	ret = ath10k_wow_init(ar);
7935 	if (ret) {
7936 		ath10k_warn(ar, "failed to init wow: %d\n", ret);
7937 		goto err_free;
7938 	}
7939 
7940 	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
7941 
7942 	/*
7943 	 * on LL hardware queues are managed entirely by the FW
7944 	 * so we only advertise to mac we can do the queues thing
7945 	 */
7946 	ar->hw->queues = IEEE80211_MAX_QUEUES;
7947 
7948 	/* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
7949 	 * something that vdev_ids can't reach so that we don't stop the queue
7950 	 * accidentally.
7951 	 */
7952 	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
7953 
7954 	switch (ar->running_fw->fw_file.wmi_op_version) {
7955 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
7956 		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
7957 		ar->hw->wiphy->n_iface_combinations =
7958 			ARRAY_SIZE(ath10k_if_comb);
7959 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7960 		break;
7961 	case ATH10K_FW_WMI_OP_VERSION_TLV:
7962 		if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
7963 			ar->hw->wiphy->iface_combinations =
7964 				ath10k_tlv_qcs_if_comb;
7965 			ar->hw->wiphy->n_iface_combinations =
7966 				ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
7967 		} else {
7968 			ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
7969 			ar->hw->wiphy->n_iface_combinations =
7970 				ARRAY_SIZE(ath10k_tlv_if_comb);
7971 		}
7972 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7973 		break;
7974 	case ATH10K_FW_WMI_OP_VERSION_10_1:
7975 	case ATH10K_FW_WMI_OP_VERSION_10_2:
7976 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
7977 		ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
7978 		ar->hw->wiphy->n_iface_combinations =
7979 			ARRAY_SIZE(ath10k_10x_if_comb);
7980 		break;
7981 	case ATH10K_FW_WMI_OP_VERSION_10_4:
7982 		ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
7983 		ar->hw->wiphy->n_iface_combinations =
7984 			ARRAY_SIZE(ath10k_10_4_if_comb);
7985 		break;
7986 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
7987 	case ATH10K_FW_WMI_OP_VERSION_MAX:
7988 		WARN_ON(1);
7989 		ret = -EINVAL;
7990 		goto err_free;
7991 	}
7992 
7993 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7994 		ar->hw->netdev_features = NETIF_F_HW_CSUM;
7995 
7996 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
7997 		/* Init ath dfs pattern detector */
7998 		ar->ath_common.debug_mask = ATH_DBG_DFS;
7999 		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
8000 							     NL80211_DFS_UNSET);
8001 
8002 		if (!ar->dfs_detector)
8003 			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
8004 	}
8005 
8006 	/* Current wake_tx_queue implementation imposes a significant
8007 	 * performance penalty in some setups. The tx scheduling code needs
8008 	 * more work anyway so disable the wake_tx_queue unless firmware
8009 	 * supports the pull-push mechanism.
8010 	 */
8011 	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
8012 		      ar->running_fw->fw_file.fw_features))
8013 		ar->ops->wake_tx_queue = NULL;
8014 
8015 	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
8016 			    ath10k_reg_notifier);
8017 	if (ret) {
8018 		ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
8019 		goto err_dfs_detector_exit;
8020 	}
8021 
8022 	ar->hw->wiphy->cipher_suites = cipher_suites;
8023 	ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
8024 
8025 	ret = ieee80211_register_hw(ar->hw);
8026 	if (ret) {
8027 		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
8028 		goto err_dfs_detector_exit;
8029 	}
8030 
8031 	if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
8032 		ret = regulatory_hint(ar->hw->wiphy,
8033 				      ar->ath_common.regulatory.alpha2);
8034 		if (ret)
8035 			goto err_unregister;
8036 	}
8037 
8038 	return 0;
8039 
8040 err_unregister:
8041 	ieee80211_unregister_hw(ar->hw);
8042 
8043 err_dfs_detector_exit:
8044 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8045 		ar->dfs_detector->exit(ar->dfs_detector);
8046 
8047 err_free:
8048 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8049 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8050 
8051 	SET_IEEE80211_DEV(ar->hw, NULL);
8052 	return ret;
8053 }
8054 
8055 void ath10k_mac_unregister(struct ath10k *ar)
8056 {
8057 	ieee80211_unregister_hw(ar->hw);
8058 
8059 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8060 		ar->dfs_detector->exit(ar->dfs_detector);
8061 
8062 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8063 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8064 
8065 	SET_IEEE80211_DEV(ar->hw, NULL);
8066 }
8067