xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/mac.c (revision f2a89d3b)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "mac.h"
19 
20 #include <net/mac80211.h>
21 #include <linux/etherdevice.h>
22 
23 #include "hif.h"
24 #include "core.h"
25 #include "debug.h"
26 #include "wmi.h"
27 #include "htt.h"
28 #include "txrx.h"
29 #include "testmode.h"
30 #include "wmi.h"
31 #include "wmi-tlv.h"
32 #include "wmi-ops.h"
33 #include "wow.h"
34 
35 /*********/
36 /* Rates */
37 /*********/
38 
39 static struct ieee80211_rate ath10k_rates[] = {
40 	{ .bitrate = 10,
41 	  .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
42 	{ .bitrate = 20,
43 	  .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
44 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
45 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
46 	{ .bitrate = 55,
47 	  .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
48 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
49 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
50 	{ .bitrate = 110,
51 	  .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
52 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
53 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
54 
55 	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
56 	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
57 	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
58 	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
59 	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
60 	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
61 	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
62 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
63 };
64 
65 static struct ieee80211_rate ath10k_rates_rev2[] = {
66 	{ .bitrate = 10,
67 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
68 	{ .bitrate = 20,
69 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
70 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
71 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
72 	{ .bitrate = 55,
73 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
74 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
75 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
76 	{ .bitrate = 110,
77 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
78 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
79 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
80 
81 	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
82 	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
83 	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
84 	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
85 	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
86 	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
87 	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
88 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
89 };
90 
91 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
92 
93 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
94 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
95 			     ATH10K_MAC_FIRST_OFDM_RATE_IDX)
96 #define ath10k_g_rates (ath10k_rates + 0)
97 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
98 
99 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
100 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
101 
102 static bool ath10k_mac_bitrate_is_cck(int bitrate)
103 {
104 	switch (bitrate) {
105 	case 10:
106 	case 20:
107 	case 55:
108 	case 110:
109 		return true;
110 	}
111 
112 	return false;
113 }
114 
115 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
116 {
117 	return DIV_ROUND_UP(bitrate, 5) |
118 	       (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
119 }
120 
121 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
122 			     u8 hw_rate, bool cck)
123 {
124 	const struct ieee80211_rate *rate;
125 	int i;
126 
127 	for (i = 0; i < sband->n_bitrates; i++) {
128 		rate = &sband->bitrates[i];
129 
130 		if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
131 			continue;
132 
133 		if (rate->hw_value == hw_rate)
134 			return i;
135 		else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
136 			 rate->hw_value_short == hw_rate)
137 			return i;
138 	}
139 
140 	return 0;
141 }
142 
143 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
144 			     u32 bitrate)
145 {
146 	int i;
147 
148 	for (i = 0; i < sband->n_bitrates; i++)
149 		if (sband->bitrates[i].bitrate == bitrate)
150 			return i;
151 
152 	return 0;
153 }
154 
155 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
156 {
157 	switch ((mcs_map >> (2 * nss)) & 0x3) {
158 	case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
159 	case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
160 	case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
161 	}
162 	return 0;
163 }
164 
165 static u32
166 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
167 {
168 	int nss;
169 
170 	for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
171 		if (ht_mcs_mask[nss])
172 			return nss + 1;
173 
174 	return 1;
175 }
176 
177 static u32
178 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
179 {
180 	int nss;
181 
182 	for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
183 		if (vht_mcs_mask[nss])
184 			return nss + 1;
185 
186 	return 1;
187 }
188 
189 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
190 {
191 	enum wmi_host_platform_type platform_type;
192 	int ret;
193 
194 	if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
195 		platform_type = WMI_HOST_PLATFORM_LOW_PERF;
196 	else
197 		platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
198 
199 	ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
200 
201 	if (ret && ret != -EOPNOTSUPP) {
202 		ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
203 		return ret;
204 	}
205 
206 	return 0;
207 }
208 
209 /**********/
210 /* Crypto */
211 /**********/
212 
213 static int ath10k_send_key(struct ath10k_vif *arvif,
214 			   struct ieee80211_key_conf *key,
215 			   enum set_key_cmd cmd,
216 			   const u8 *macaddr, u32 flags)
217 {
218 	struct ath10k *ar = arvif->ar;
219 	struct wmi_vdev_install_key_arg arg = {
220 		.vdev_id = arvif->vdev_id,
221 		.key_idx = key->keyidx,
222 		.key_len = key->keylen,
223 		.key_data = key->key,
224 		.key_flags = flags,
225 		.macaddr = macaddr,
226 	};
227 
228 	lockdep_assert_held(&arvif->ar->conf_mutex);
229 
230 	switch (key->cipher) {
231 	case WLAN_CIPHER_SUITE_CCMP:
232 		arg.key_cipher = WMI_CIPHER_AES_CCM;
233 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
234 		break;
235 	case WLAN_CIPHER_SUITE_TKIP:
236 		arg.key_cipher = WMI_CIPHER_TKIP;
237 		arg.key_txmic_len = 8;
238 		arg.key_rxmic_len = 8;
239 		break;
240 	case WLAN_CIPHER_SUITE_WEP40:
241 	case WLAN_CIPHER_SUITE_WEP104:
242 		arg.key_cipher = WMI_CIPHER_WEP;
243 		break;
244 	case WLAN_CIPHER_SUITE_AES_CMAC:
245 		WARN_ON(1);
246 		return -EINVAL;
247 	default:
248 		ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
249 		return -EOPNOTSUPP;
250 	}
251 
252 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
253 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
254 
255 	if (cmd == DISABLE_KEY) {
256 		arg.key_cipher = WMI_CIPHER_NONE;
257 		arg.key_data = NULL;
258 	}
259 
260 	return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
261 }
262 
263 static int ath10k_install_key(struct ath10k_vif *arvif,
264 			      struct ieee80211_key_conf *key,
265 			      enum set_key_cmd cmd,
266 			      const u8 *macaddr, u32 flags)
267 {
268 	struct ath10k *ar = arvif->ar;
269 	int ret;
270 	unsigned long time_left;
271 
272 	lockdep_assert_held(&ar->conf_mutex);
273 
274 	reinit_completion(&ar->install_key_done);
275 
276 	if (arvif->nohwcrypt)
277 		return 1;
278 
279 	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
280 	if (ret)
281 		return ret;
282 
283 	time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
284 	if (time_left == 0)
285 		return -ETIMEDOUT;
286 
287 	return 0;
288 }
289 
290 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
291 					const u8 *addr)
292 {
293 	struct ath10k *ar = arvif->ar;
294 	struct ath10k_peer *peer;
295 	int ret;
296 	int i;
297 	u32 flags;
298 
299 	lockdep_assert_held(&ar->conf_mutex);
300 
301 	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
302 		    arvif->vif->type != NL80211_IFTYPE_ADHOC &&
303 		    arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
304 		return -EINVAL;
305 
306 	spin_lock_bh(&ar->data_lock);
307 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
308 	spin_unlock_bh(&ar->data_lock);
309 
310 	if (!peer)
311 		return -ENOENT;
312 
313 	for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
314 		if (arvif->wep_keys[i] == NULL)
315 			continue;
316 
317 		switch (arvif->vif->type) {
318 		case NL80211_IFTYPE_AP:
319 			flags = WMI_KEY_PAIRWISE;
320 
321 			if (arvif->def_wep_key_idx == i)
322 				flags |= WMI_KEY_TX_USAGE;
323 
324 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
325 						 SET_KEY, addr, flags);
326 			if (ret < 0)
327 				return ret;
328 			break;
329 		case NL80211_IFTYPE_ADHOC:
330 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
331 						 SET_KEY, addr,
332 						 WMI_KEY_PAIRWISE);
333 			if (ret < 0)
334 				return ret;
335 
336 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
337 						 SET_KEY, addr, WMI_KEY_GROUP);
338 			if (ret < 0)
339 				return ret;
340 			break;
341 		default:
342 			WARN_ON(1);
343 			return -EINVAL;
344 		}
345 
346 		spin_lock_bh(&ar->data_lock);
347 		peer->keys[i] = arvif->wep_keys[i];
348 		spin_unlock_bh(&ar->data_lock);
349 	}
350 
351 	/* In some cases (notably with static WEP IBSS with multiple keys)
352 	 * multicast Tx becomes broken. Both pairwise and groupwise keys are
353 	 * installed already. Using WMI_KEY_TX_USAGE in different combinations
354 	 * didn't seem help. Using def_keyid vdev parameter seems to be
355 	 * effective so use that.
356 	 *
357 	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
358 	 */
359 	if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
360 		return 0;
361 
362 	if (arvif->def_wep_key_idx == -1)
363 		return 0;
364 
365 	ret = ath10k_wmi_vdev_set_param(arvif->ar,
366 					arvif->vdev_id,
367 					arvif->ar->wmi.vdev_param->def_keyid,
368 					arvif->def_wep_key_idx);
369 	if (ret) {
370 		ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
371 			    arvif->vdev_id, ret);
372 		return ret;
373 	}
374 
375 	return 0;
376 }
377 
378 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
379 				  const u8 *addr)
380 {
381 	struct ath10k *ar = arvif->ar;
382 	struct ath10k_peer *peer;
383 	int first_errno = 0;
384 	int ret;
385 	int i;
386 	u32 flags = 0;
387 
388 	lockdep_assert_held(&ar->conf_mutex);
389 
390 	spin_lock_bh(&ar->data_lock);
391 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
392 	spin_unlock_bh(&ar->data_lock);
393 
394 	if (!peer)
395 		return -ENOENT;
396 
397 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
398 		if (peer->keys[i] == NULL)
399 			continue;
400 
401 		/* key flags are not required to delete the key */
402 		ret = ath10k_install_key(arvif, peer->keys[i],
403 					 DISABLE_KEY, addr, flags);
404 		if (ret < 0 && first_errno == 0)
405 			first_errno = ret;
406 
407 		if (ret < 0)
408 			ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
409 				    i, ret);
410 
411 		spin_lock_bh(&ar->data_lock);
412 		peer->keys[i] = NULL;
413 		spin_unlock_bh(&ar->data_lock);
414 	}
415 
416 	return first_errno;
417 }
418 
419 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
420 				    u8 keyidx)
421 {
422 	struct ath10k_peer *peer;
423 	int i;
424 
425 	lockdep_assert_held(&ar->data_lock);
426 
427 	/* We don't know which vdev this peer belongs to,
428 	 * since WMI doesn't give us that information.
429 	 *
430 	 * FIXME: multi-bss needs to be handled.
431 	 */
432 	peer = ath10k_peer_find(ar, 0, addr);
433 	if (!peer)
434 		return false;
435 
436 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
437 		if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
438 			return true;
439 	}
440 
441 	return false;
442 }
443 
444 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
445 				 struct ieee80211_key_conf *key)
446 {
447 	struct ath10k *ar = arvif->ar;
448 	struct ath10k_peer *peer;
449 	u8 addr[ETH_ALEN];
450 	int first_errno = 0;
451 	int ret;
452 	int i;
453 	u32 flags = 0;
454 
455 	lockdep_assert_held(&ar->conf_mutex);
456 
457 	for (;;) {
458 		/* since ath10k_install_key we can't hold data_lock all the
459 		 * time, so we try to remove the keys incrementally */
460 		spin_lock_bh(&ar->data_lock);
461 		i = 0;
462 		list_for_each_entry(peer, &ar->peers, list) {
463 			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
464 				if (peer->keys[i] == key) {
465 					ether_addr_copy(addr, peer->addr);
466 					peer->keys[i] = NULL;
467 					break;
468 				}
469 			}
470 
471 			if (i < ARRAY_SIZE(peer->keys))
472 				break;
473 		}
474 		spin_unlock_bh(&ar->data_lock);
475 
476 		if (i == ARRAY_SIZE(peer->keys))
477 			break;
478 		/* key flags are not required to delete the key */
479 		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
480 		if (ret < 0 && first_errno == 0)
481 			first_errno = ret;
482 
483 		if (ret)
484 			ath10k_warn(ar, "failed to remove key for %pM: %d\n",
485 				    addr, ret);
486 	}
487 
488 	return first_errno;
489 }
490 
491 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
492 					 struct ieee80211_key_conf *key)
493 {
494 	struct ath10k *ar = arvif->ar;
495 	struct ath10k_peer *peer;
496 	int ret;
497 
498 	lockdep_assert_held(&ar->conf_mutex);
499 
500 	list_for_each_entry(peer, &ar->peers, list) {
501 		if (ether_addr_equal(peer->addr, arvif->vif->addr))
502 			continue;
503 
504 		if (ether_addr_equal(peer->addr, arvif->bssid))
505 			continue;
506 
507 		if (peer->keys[key->keyidx] == key)
508 			continue;
509 
510 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
511 			   arvif->vdev_id, key->keyidx);
512 
513 		ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
514 		if (ret) {
515 			ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
516 				    arvif->vdev_id, peer->addr, ret);
517 			return ret;
518 		}
519 	}
520 
521 	return 0;
522 }
523 
524 /*********************/
525 /* General utilities */
526 /*********************/
527 
528 static inline enum wmi_phy_mode
529 chan_to_phymode(const struct cfg80211_chan_def *chandef)
530 {
531 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
532 
533 	switch (chandef->chan->band) {
534 	case NL80211_BAND_2GHZ:
535 		switch (chandef->width) {
536 		case NL80211_CHAN_WIDTH_20_NOHT:
537 			if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
538 				phymode = MODE_11B;
539 			else
540 				phymode = MODE_11G;
541 			break;
542 		case NL80211_CHAN_WIDTH_20:
543 			phymode = MODE_11NG_HT20;
544 			break;
545 		case NL80211_CHAN_WIDTH_40:
546 			phymode = MODE_11NG_HT40;
547 			break;
548 		case NL80211_CHAN_WIDTH_5:
549 		case NL80211_CHAN_WIDTH_10:
550 		case NL80211_CHAN_WIDTH_80:
551 		case NL80211_CHAN_WIDTH_80P80:
552 		case NL80211_CHAN_WIDTH_160:
553 			phymode = MODE_UNKNOWN;
554 			break;
555 		}
556 		break;
557 	case NL80211_BAND_5GHZ:
558 		switch (chandef->width) {
559 		case NL80211_CHAN_WIDTH_20_NOHT:
560 			phymode = MODE_11A;
561 			break;
562 		case NL80211_CHAN_WIDTH_20:
563 			phymode = MODE_11NA_HT20;
564 			break;
565 		case NL80211_CHAN_WIDTH_40:
566 			phymode = MODE_11NA_HT40;
567 			break;
568 		case NL80211_CHAN_WIDTH_80:
569 			phymode = MODE_11AC_VHT80;
570 			break;
571 		case NL80211_CHAN_WIDTH_5:
572 		case NL80211_CHAN_WIDTH_10:
573 		case NL80211_CHAN_WIDTH_80P80:
574 		case NL80211_CHAN_WIDTH_160:
575 			phymode = MODE_UNKNOWN;
576 			break;
577 		}
578 		break;
579 	default:
580 		break;
581 	}
582 
583 	WARN_ON(phymode == MODE_UNKNOWN);
584 	return phymode;
585 }
586 
587 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
588 {
589 /*
590  * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
591  *   0 for no restriction
592  *   1 for 1/4 us
593  *   2 for 1/2 us
594  *   3 for 1 us
595  *   4 for 2 us
596  *   5 for 4 us
597  *   6 for 8 us
598  *   7 for 16 us
599  */
600 	switch (mpdudensity) {
601 	case 0:
602 		return 0;
603 	case 1:
604 	case 2:
605 	case 3:
606 	/* Our lower layer calculations limit our precision to
607 	   1 microsecond */
608 		return 1;
609 	case 4:
610 		return 2;
611 	case 5:
612 		return 4;
613 	case 6:
614 		return 8;
615 	case 7:
616 		return 16;
617 	default:
618 		return 0;
619 	}
620 }
621 
622 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
623 			struct cfg80211_chan_def *def)
624 {
625 	struct ieee80211_chanctx_conf *conf;
626 
627 	rcu_read_lock();
628 	conf = rcu_dereference(vif->chanctx_conf);
629 	if (!conf) {
630 		rcu_read_unlock();
631 		return -ENOENT;
632 	}
633 
634 	*def = conf->def;
635 	rcu_read_unlock();
636 
637 	return 0;
638 }
639 
640 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
641 					 struct ieee80211_chanctx_conf *conf,
642 					 void *data)
643 {
644 	int *num = data;
645 
646 	(*num)++;
647 }
648 
649 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
650 {
651 	int num = 0;
652 
653 	ieee80211_iter_chan_contexts_atomic(ar->hw,
654 					    ath10k_mac_num_chanctxs_iter,
655 					    &num);
656 
657 	return num;
658 }
659 
660 static void
661 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
662 				struct ieee80211_chanctx_conf *conf,
663 				void *data)
664 {
665 	struct cfg80211_chan_def **def = data;
666 
667 	*def = &conf->def;
668 }
669 
670 static int ath10k_peer_create(struct ath10k *ar,
671 			      struct ieee80211_vif *vif,
672 			      struct ieee80211_sta *sta,
673 			      u32 vdev_id,
674 			      const u8 *addr,
675 			      enum wmi_peer_type peer_type)
676 {
677 	struct ath10k_vif *arvif;
678 	struct ath10k_peer *peer;
679 	int num_peers = 0;
680 	int ret;
681 
682 	lockdep_assert_held(&ar->conf_mutex);
683 
684 	num_peers = ar->num_peers;
685 
686 	/* Each vdev consumes a peer entry as well */
687 	list_for_each_entry(arvif, &ar->arvifs, list)
688 		num_peers++;
689 
690 	if (num_peers >= ar->max_num_peers)
691 		return -ENOBUFS;
692 
693 	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
694 	if (ret) {
695 		ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
696 			    addr, vdev_id, ret);
697 		return ret;
698 	}
699 
700 	ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
701 	if (ret) {
702 		ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
703 			    addr, vdev_id, ret);
704 		return ret;
705 	}
706 
707 	spin_lock_bh(&ar->data_lock);
708 
709 	peer = ath10k_peer_find(ar, vdev_id, addr);
710 	if (!peer) {
711 		spin_unlock_bh(&ar->data_lock);
712 		ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
713 			    addr, vdev_id);
714 		ath10k_wmi_peer_delete(ar, vdev_id, addr);
715 		return -ENOENT;
716 	}
717 
718 	peer->vif = vif;
719 	peer->sta = sta;
720 
721 	spin_unlock_bh(&ar->data_lock);
722 
723 	ar->num_peers++;
724 
725 	return 0;
726 }
727 
728 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
729 {
730 	struct ath10k *ar = arvif->ar;
731 	u32 param;
732 	int ret;
733 
734 	param = ar->wmi.pdev_param->sta_kickout_th;
735 	ret = ath10k_wmi_pdev_set_param(ar, param,
736 					ATH10K_KICKOUT_THRESHOLD);
737 	if (ret) {
738 		ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
739 			    arvif->vdev_id, ret);
740 		return ret;
741 	}
742 
743 	param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
744 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
745 					ATH10K_KEEPALIVE_MIN_IDLE);
746 	if (ret) {
747 		ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
748 			    arvif->vdev_id, ret);
749 		return ret;
750 	}
751 
752 	param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
753 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
754 					ATH10K_KEEPALIVE_MAX_IDLE);
755 	if (ret) {
756 		ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
757 			    arvif->vdev_id, ret);
758 		return ret;
759 	}
760 
761 	param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
762 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
763 					ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
764 	if (ret) {
765 		ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
766 			    arvif->vdev_id, ret);
767 		return ret;
768 	}
769 
770 	return 0;
771 }
772 
773 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
774 {
775 	struct ath10k *ar = arvif->ar;
776 	u32 vdev_param;
777 
778 	vdev_param = ar->wmi.vdev_param->rts_threshold;
779 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
780 }
781 
782 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
783 {
784 	int ret;
785 
786 	lockdep_assert_held(&ar->conf_mutex);
787 
788 	ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
789 	if (ret)
790 		return ret;
791 
792 	ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
793 	if (ret)
794 		return ret;
795 
796 	ar->num_peers--;
797 
798 	return 0;
799 }
800 
801 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
802 {
803 	struct ath10k_peer *peer, *tmp;
804 	int peer_id;
805 	int i;
806 
807 	lockdep_assert_held(&ar->conf_mutex);
808 
809 	spin_lock_bh(&ar->data_lock);
810 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
811 		if (peer->vdev_id != vdev_id)
812 			continue;
813 
814 		ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
815 			    peer->addr, vdev_id);
816 
817 		for_each_set_bit(peer_id, peer->peer_ids,
818 				 ATH10K_MAX_NUM_PEER_IDS) {
819 			ar->peer_map[peer_id] = NULL;
820 		}
821 
822 		/* Double check that peer is properly un-referenced from
823 		 * the peer_map
824 		 */
825 		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
826 			if (ar->peer_map[i] == peer) {
827 				ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
828 					    peer->addr, peer, i);
829 				ar->peer_map[i] = NULL;
830 			}
831 		}
832 
833 		list_del(&peer->list);
834 		kfree(peer);
835 		ar->num_peers--;
836 	}
837 	spin_unlock_bh(&ar->data_lock);
838 }
839 
840 static void ath10k_peer_cleanup_all(struct ath10k *ar)
841 {
842 	struct ath10k_peer *peer, *tmp;
843 	int i;
844 
845 	lockdep_assert_held(&ar->conf_mutex);
846 
847 	spin_lock_bh(&ar->data_lock);
848 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
849 		list_del(&peer->list);
850 		kfree(peer);
851 	}
852 
853 	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
854 		ar->peer_map[i] = NULL;
855 
856 	spin_unlock_bh(&ar->data_lock);
857 
858 	ar->num_peers = 0;
859 	ar->num_stations = 0;
860 }
861 
862 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
863 				       struct ieee80211_sta *sta,
864 				       enum wmi_tdls_peer_state state)
865 {
866 	int ret;
867 	struct wmi_tdls_peer_update_cmd_arg arg = {};
868 	struct wmi_tdls_peer_capab_arg cap = {};
869 	struct wmi_channel_arg chan_arg = {};
870 
871 	lockdep_assert_held(&ar->conf_mutex);
872 
873 	arg.vdev_id = vdev_id;
874 	arg.peer_state = state;
875 	ether_addr_copy(arg.addr, sta->addr);
876 
877 	cap.peer_max_sp = sta->max_sp;
878 	cap.peer_uapsd_queues = sta->uapsd_queues;
879 
880 	if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
881 	    !sta->tdls_initiator)
882 		cap.is_peer_responder = 1;
883 
884 	ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
885 	if (ret) {
886 		ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
887 			    arg.addr, vdev_id, ret);
888 		return ret;
889 	}
890 
891 	return 0;
892 }
893 
894 /************************/
895 /* Interface management */
896 /************************/
897 
898 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
899 {
900 	struct ath10k *ar = arvif->ar;
901 
902 	lockdep_assert_held(&ar->data_lock);
903 
904 	if (!arvif->beacon)
905 		return;
906 
907 	if (!arvif->beacon_buf)
908 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
909 				 arvif->beacon->len, DMA_TO_DEVICE);
910 
911 	if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
912 		    arvif->beacon_state != ATH10K_BEACON_SENT))
913 		return;
914 
915 	dev_kfree_skb_any(arvif->beacon);
916 
917 	arvif->beacon = NULL;
918 	arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
919 }
920 
921 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
922 {
923 	struct ath10k *ar = arvif->ar;
924 
925 	lockdep_assert_held(&ar->data_lock);
926 
927 	ath10k_mac_vif_beacon_free(arvif);
928 
929 	if (arvif->beacon_buf) {
930 		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
931 				  arvif->beacon_buf, arvif->beacon_paddr);
932 		arvif->beacon_buf = NULL;
933 	}
934 }
935 
936 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
937 {
938 	unsigned long time_left;
939 
940 	lockdep_assert_held(&ar->conf_mutex);
941 
942 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
943 		return -ESHUTDOWN;
944 
945 	time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
946 						ATH10K_VDEV_SETUP_TIMEOUT_HZ);
947 	if (time_left == 0)
948 		return -ETIMEDOUT;
949 
950 	return 0;
951 }
952 
953 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
954 {
955 	struct cfg80211_chan_def *chandef = NULL;
956 	struct ieee80211_channel *channel = NULL;
957 	struct wmi_vdev_start_request_arg arg = {};
958 	int ret = 0;
959 
960 	lockdep_assert_held(&ar->conf_mutex);
961 
962 	ieee80211_iter_chan_contexts_atomic(ar->hw,
963 					    ath10k_mac_get_any_chandef_iter,
964 					    &chandef);
965 	if (WARN_ON_ONCE(!chandef))
966 		return -ENOENT;
967 
968 	channel = chandef->chan;
969 
970 	arg.vdev_id = vdev_id;
971 	arg.channel.freq = channel->center_freq;
972 	arg.channel.band_center_freq1 = chandef->center_freq1;
973 
974 	/* TODO setup this dynamically, what in case we
975 	   don't have any vifs? */
976 	arg.channel.mode = chan_to_phymode(chandef);
977 	arg.channel.chan_radar =
978 			!!(channel->flags & IEEE80211_CHAN_RADAR);
979 
980 	arg.channel.min_power = 0;
981 	arg.channel.max_power = channel->max_power * 2;
982 	arg.channel.max_reg_power = channel->max_reg_power * 2;
983 	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
984 
985 	reinit_completion(&ar->vdev_setup_done);
986 
987 	ret = ath10k_wmi_vdev_start(ar, &arg);
988 	if (ret) {
989 		ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
990 			    vdev_id, ret);
991 		return ret;
992 	}
993 
994 	ret = ath10k_vdev_setup_sync(ar);
995 	if (ret) {
996 		ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
997 			    vdev_id, ret);
998 		return ret;
999 	}
1000 
1001 	ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
1002 	if (ret) {
1003 		ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
1004 			    vdev_id, ret);
1005 		goto vdev_stop;
1006 	}
1007 
1008 	ar->monitor_vdev_id = vdev_id;
1009 
1010 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1011 		   ar->monitor_vdev_id);
1012 	return 0;
1013 
1014 vdev_stop:
1015 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1016 	if (ret)
1017 		ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1018 			    ar->monitor_vdev_id, ret);
1019 
1020 	return ret;
1021 }
1022 
1023 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1024 {
1025 	int ret = 0;
1026 
1027 	lockdep_assert_held(&ar->conf_mutex);
1028 
1029 	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1030 	if (ret)
1031 		ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1032 			    ar->monitor_vdev_id, ret);
1033 
1034 	reinit_completion(&ar->vdev_setup_done);
1035 
1036 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1037 	if (ret)
1038 		ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
1039 			    ar->monitor_vdev_id, ret);
1040 
1041 	ret = ath10k_vdev_setup_sync(ar);
1042 	if (ret)
1043 		ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1044 			    ar->monitor_vdev_id, ret);
1045 
1046 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1047 		   ar->monitor_vdev_id);
1048 	return ret;
1049 }
1050 
1051 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1052 {
1053 	int bit, ret = 0;
1054 
1055 	lockdep_assert_held(&ar->conf_mutex);
1056 
1057 	if (ar->free_vdev_map == 0) {
1058 		ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1059 		return -ENOMEM;
1060 	}
1061 
1062 	bit = __ffs64(ar->free_vdev_map);
1063 
1064 	ar->monitor_vdev_id = bit;
1065 
1066 	ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1067 				     WMI_VDEV_TYPE_MONITOR,
1068 				     0, ar->mac_addr);
1069 	if (ret) {
1070 		ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1071 			    ar->monitor_vdev_id, ret);
1072 		return ret;
1073 	}
1074 
1075 	ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1076 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1077 		   ar->monitor_vdev_id);
1078 
1079 	return 0;
1080 }
1081 
1082 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1083 {
1084 	int ret = 0;
1085 
1086 	lockdep_assert_held(&ar->conf_mutex);
1087 
1088 	ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1089 	if (ret) {
1090 		ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1091 			    ar->monitor_vdev_id, ret);
1092 		return ret;
1093 	}
1094 
1095 	ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1096 
1097 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1098 		   ar->monitor_vdev_id);
1099 	return ret;
1100 }
1101 
1102 static int ath10k_monitor_start(struct ath10k *ar)
1103 {
1104 	int ret;
1105 
1106 	lockdep_assert_held(&ar->conf_mutex);
1107 
1108 	ret = ath10k_monitor_vdev_create(ar);
1109 	if (ret) {
1110 		ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1111 		return ret;
1112 	}
1113 
1114 	ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1115 	if (ret) {
1116 		ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1117 		ath10k_monitor_vdev_delete(ar);
1118 		return ret;
1119 	}
1120 
1121 	ar->monitor_started = true;
1122 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1123 
1124 	return 0;
1125 }
1126 
1127 static int ath10k_monitor_stop(struct ath10k *ar)
1128 {
1129 	int ret;
1130 
1131 	lockdep_assert_held(&ar->conf_mutex);
1132 
1133 	ret = ath10k_monitor_vdev_stop(ar);
1134 	if (ret) {
1135 		ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1136 		return ret;
1137 	}
1138 
1139 	ret = ath10k_monitor_vdev_delete(ar);
1140 	if (ret) {
1141 		ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1142 		return ret;
1143 	}
1144 
1145 	ar->monitor_started = false;
1146 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1147 
1148 	return 0;
1149 }
1150 
1151 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1152 {
1153 	int num_ctx;
1154 
1155 	/* At least one chanctx is required to derive a channel to start
1156 	 * monitor vdev on.
1157 	 */
1158 	num_ctx = ath10k_mac_num_chanctxs(ar);
1159 	if (num_ctx == 0)
1160 		return false;
1161 
1162 	/* If there's already an existing special monitor interface then don't
1163 	 * bother creating another monitor vdev.
1164 	 */
1165 	if (ar->monitor_arvif)
1166 		return false;
1167 
1168 	return ar->monitor ||
1169 	       ar->filter_flags & FIF_OTHER_BSS ||
1170 	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1171 }
1172 
1173 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1174 {
1175 	int num_ctx;
1176 
1177 	num_ctx = ath10k_mac_num_chanctxs(ar);
1178 
1179 	/* FIXME: Current interface combinations and cfg80211/mac80211 code
1180 	 * shouldn't allow this but make sure to prevent handling the following
1181 	 * case anyway since multi-channel DFS hasn't been tested at all.
1182 	 */
1183 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1184 		return false;
1185 
1186 	return true;
1187 }
1188 
1189 static int ath10k_monitor_recalc(struct ath10k *ar)
1190 {
1191 	bool needed;
1192 	bool allowed;
1193 	int ret;
1194 
1195 	lockdep_assert_held(&ar->conf_mutex);
1196 
1197 	needed = ath10k_mac_monitor_vdev_is_needed(ar);
1198 	allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1199 
1200 	ath10k_dbg(ar, ATH10K_DBG_MAC,
1201 		   "mac monitor recalc started? %d needed? %d allowed? %d\n",
1202 		   ar->monitor_started, needed, allowed);
1203 
1204 	if (WARN_ON(needed && !allowed)) {
1205 		if (ar->monitor_started) {
1206 			ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1207 
1208 			ret = ath10k_monitor_stop(ar);
1209 			if (ret)
1210 				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1211 					    ret);
1212 				/* not serious */
1213 		}
1214 
1215 		return -EPERM;
1216 	}
1217 
1218 	if (needed == ar->monitor_started)
1219 		return 0;
1220 
1221 	if (needed)
1222 		return ath10k_monitor_start(ar);
1223 	else
1224 		return ath10k_monitor_stop(ar);
1225 }
1226 
1227 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1228 {
1229 	struct ath10k *ar = arvif->ar;
1230 	u32 vdev_param, rts_cts = 0;
1231 
1232 	lockdep_assert_held(&ar->conf_mutex);
1233 
1234 	vdev_param = ar->wmi.vdev_param->enable_rtscts;
1235 
1236 	rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1237 
1238 	if (arvif->num_legacy_stations > 0)
1239 		rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1240 			      WMI_RTSCTS_PROFILE);
1241 	else
1242 		rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1243 			      WMI_RTSCTS_PROFILE);
1244 
1245 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1246 					 rts_cts);
1247 }
1248 
1249 static int ath10k_start_cac(struct ath10k *ar)
1250 {
1251 	int ret;
1252 
1253 	lockdep_assert_held(&ar->conf_mutex);
1254 
1255 	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1256 
1257 	ret = ath10k_monitor_recalc(ar);
1258 	if (ret) {
1259 		ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1260 		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1261 		return ret;
1262 	}
1263 
1264 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1265 		   ar->monitor_vdev_id);
1266 
1267 	return 0;
1268 }
1269 
1270 static int ath10k_stop_cac(struct ath10k *ar)
1271 {
1272 	lockdep_assert_held(&ar->conf_mutex);
1273 
1274 	/* CAC is not running - do nothing */
1275 	if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1276 		return 0;
1277 
1278 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1279 	ath10k_monitor_stop(ar);
1280 
1281 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1282 
1283 	return 0;
1284 }
1285 
1286 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1287 				      struct ieee80211_chanctx_conf *conf,
1288 				      void *data)
1289 {
1290 	bool *ret = data;
1291 
1292 	if (!*ret && conf->radar_enabled)
1293 		*ret = true;
1294 }
1295 
1296 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1297 {
1298 	bool has_radar = false;
1299 
1300 	ieee80211_iter_chan_contexts_atomic(ar->hw,
1301 					    ath10k_mac_has_radar_iter,
1302 					    &has_radar);
1303 
1304 	return has_radar;
1305 }
1306 
1307 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1308 {
1309 	int ret;
1310 
1311 	lockdep_assert_held(&ar->conf_mutex);
1312 
1313 	ath10k_stop_cac(ar);
1314 
1315 	if (!ath10k_mac_has_radar_enabled(ar))
1316 		return;
1317 
1318 	if (ar->num_started_vdevs > 0)
1319 		return;
1320 
1321 	ret = ath10k_start_cac(ar);
1322 	if (ret) {
1323 		/*
1324 		 * Not possible to start CAC on current channel so starting
1325 		 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1326 		 * by indicating that radar was detected.
1327 		 */
1328 		ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1329 		ieee80211_radar_detected(ar->hw);
1330 	}
1331 }
1332 
1333 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1334 {
1335 	struct ath10k *ar = arvif->ar;
1336 	int ret;
1337 
1338 	lockdep_assert_held(&ar->conf_mutex);
1339 
1340 	reinit_completion(&ar->vdev_setup_done);
1341 
1342 	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1343 	if (ret) {
1344 		ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1345 			    arvif->vdev_id, ret);
1346 		return ret;
1347 	}
1348 
1349 	ret = ath10k_vdev_setup_sync(ar);
1350 	if (ret) {
1351 		ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
1352 			    arvif->vdev_id, ret);
1353 		return ret;
1354 	}
1355 
1356 	WARN_ON(ar->num_started_vdevs == 0);
1357 
1358 	if (ar->num_started_vdevs != 0) {
1359 		ar->num_started_vdevs--;
1360 		ath10k_recalc_radar_detection(ar);
1361 	}
1362 
1363 	return ret;
1364 }
1365 
1366 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1367 				     const struct cfg80211_chan_def *chandef,
1368 				     bool restart)
1369 {
1370 	struct ath10k *ar = arvif->ar;
1371 	struct wmi_vdev_start_request_arg arg = {};
1372 	int ret = 0;
1373 
1374 	lockdep_assert_held(&ar->conf_mutex);
1375 
1376 	reinit_completion(&ar->vdev_setup_done);
1377 
1378 	arg.vdev_id = arvif->vdev_id;
1379 	arg.dtim_period = arvif->dtim_period;
1380 	arg.bcn_intval = arvif->beacon_interval;
1381 
1382 	arg.channel.freq = chandef->chan->center_freq;
1383 	arg.channel.band_center_freq1 = chandef->center_freq1;
1384 	arg.channel.mode = chan_to_phymode(chandef);
1385 
1386 	arg.channel.min_power = 0;
1387 	arg.channel.max_power = chandef->chan->max_power * 2;
1388 	arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1389 	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1390 
1391 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1392 		arg.ssid = arvif->u.ap.ssid;
1393 		arg.ssid_len = arvif->u.ap.ssid_len;
1394 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1395 
1396 		/* For now allow DFS for AP mode */
1397 		arg.channel.chan_radar =
1398 			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1399 	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1400 		arg.ssid = arvif->vif->bss_conf.ssid;
1401 		arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1402 	}
1403 
1404 	ath10k_dbg(ar, ATH10K_DBG_MAC,
1405 		   "mac vdev %d start center_freq %d phymode %s\n",
1406 		   arg.vdev_id, arg.channel.freq,
1407 		   ath10k_wmi_phymode_str(arg.channel.mode));
1408 
1409 	if (restart)
1410 		ret = ath10k_wmi_vdev_restart(ar, &arg);
1411 	else
1412 		ret = ath10k_wmi_vdev_start(ar, &arg);
1413 
1414 	if (ret) {
1415 		ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1416 			    arg.vdev_id, ret);
1417 		return ret;
1418 	}
1419 
1420 	ret = ath10k_vdev_setup_sync(ar);
1421 	if (ret) {
1422 		ath10k_warn(ar,
1423 			    "failed to synchronize setup for vdev %i restart %d: %d\n",
1424 			    arg.vdev_id, restart, ret);
1425 		return ret;
1426 	}
1427 
1428 	ar->num_started_vdevs++;
1429 	ath10k_recalc_radar_detection(ar);
1430 
1431 	return ret;
1432 }
1433 
1434 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1435 			     const struct cfg80211_chan_def *def)
1436 {
1437 	return ath10k_vdev_start_restart(arvif, def, false);
1438 }
1439 
1440 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1441 			       const struct cfg80211_chan_def *def)
1442 {
1443 	return ath10k_vdev_start_restart(arvif, def, true);
1444 }
1445 
1446 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1447 				       struct sk_buff *bcn)
1448 {
1449 	struct ath10k *ar = arvif->ar;
1450 	struct ieee80211_mgmt *mgmt;
1451 	const u8 *p2p_ie;
1452 	int ret;
1453 
1454 	if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1455 		return 0;
1456 
1457 	mgmt = (void *)bcn->data;
1458 	p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1459 					 mgmt->u.beacon.variable,
1460 					 bcn->len - (mgmt->u.beacon.variable -
1461 						     bcn->data));
1462 	if (!p2p_ie)
1463 		return -ENOENT;
1464 
1465 	ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1466 	if (ret) {
1467 		ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1468 			    arvif->vdev_id, ret);
1469 		return ret;
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1476 				       u8 oui_type, size_t ie_offset)
1477 {
1478 	size_t len;
1479 	const u8 *next;
1480 	const u8 *end;
1481 	u8 *ie;
1482 
1483 	if (WARN_ON(skb->len < ie_offset))
1484 		return -EINVAL;
1485 
1486 	ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1487 					   skb->data + ie_offset,
1488 					   skb->len - ie_offset);
1489 	if (!ie)
1490 		return -ENOENT;
1491 
1492 	len = ie[1] + 2;
1493 	end = skb->data + skb->len;
1494 	next = ie + len;
1495 
1496 	if (WARN_ON(next > end))
1497 		return -EINVAL;
1498 
1499 	memmove(ie, next, end - next);
1500 	skb_trim(skb, skb->len - len);
1501 
1502 	return 0;
1503 }
1504 
1505 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1506 {
1507 	struct ath10k *ar = arvif->ar;
1508 	struct ieee80211_hw *hw = ar->hw;
1509 	struct ieee80211_vif *vif = arvif->vif;
1510 	struct ieee80211_mutable_offsets offs = {};
1511 	struct sk_buff *bcn;
1512 	int ret;
1513 
1514 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1515 		return 0;
1516 
1517 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1518 	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1519 		return 0;
1520 
1521 	bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1522 	if (!bcn) {
1523 		ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1524 		return -EPERM;
1525 	}
1526 
1527 	ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1528 	if (ret) {
1529 		ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1530 		kfree_skb(bcn);
1531 		return ret;
1532 	}
1533 
1534 	/* P2P IE is inserted by firmware automatically (as configured above)
1535 	 * so remove it from the base beacon template to avoid duplicate P2P
1536 	 * IEs in beacon frames.
1537 	 */
1538 	ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1539 				    offsetof(struct ieee80211_mgmt,
1540 					     u.beacon.variable));
1541 
1542 	ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1543 				  0, NULL, 0);
1544 	kfree_skb(bcn);
1545 
1546 	if (ret) {
1547 		ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1548 			    ret);
1549 		return ret;
1550 	}
1551 
1552 	return 0;
1553 }
1554 
1555 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1556 {
1557 	struct ath10k *ar = arvif->ar;
1558 	struct ieee80211_hw *hw = ar->hw;
1559 	struct ieee80211_vif *vif = arvif->vif;
1560 	struct sk_buff *prb;
1561 	int ret;
1562 
1563 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1564 		return 0;
1565 
1566 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1567 		return 0;
1568 
1569 	prb = ieee80211_proberesp_get(hw, vif);
1570 	if (!prb) {
1571 		ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1572 		return -EPERM;
1573 	}
1574 
1575 	ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1576 	kfree_skb(prb);
1577 
1578 	if (ret) {
1579 		ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1580 			    ret);
1581 		return ret;
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1588 {
1589 	struct ath10k *ar = arvif->ar;
1590 	struct cfg80211_chan_def def;
1591 	int ret;
1592 
1593 	/* When originally vdev is started during assign_vif_chanctx() some
1594 	 * information is missing, notably SSID. Firmware revisions with beacon
1595 	 * offloading require the SSID to be provided during vdev (re)start to
1596 	 * handle hidden SSID properly.
1597 	 *
1598 	 * Vdev restart must be done after vdev has been both started and
1599 	 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1600 	 * deliver vdev restart response event causing timeouts during vdev
1601 	 * syncing in ath10k.
1602 	 *
1603 	 * Note: The vdev down/up and template reinstallation could be skipped
1604 	 * since only wmi-tlv firmware are known to have beacon offload and
1605 	 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1606 	 * response delivery. It's probably more robust to keep it as is.
1607 	 */
1608 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1609 		return 0;
1610 
1611 	if (WARN_ON(!arvif->is_started))
1612 		return -EINVAL;
1613 
1614 	if (WARN_ON(!arvif->is_up))
1615 		return -EINVAL;
1616 
1617 	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1618 		return -EINVAL;
1619 
1620 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1621 	if (ret) {
1622 		ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1623 			    arvif->vdev_id, ret);
1624 		return ret;
1625 	}
1626 
1627 	/* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1628 	 * firmware will crash upon vdev up.
1629 	 */
1630 
1631 	ret = ath10k_mac_setup_bcn_tmpl(arvif);
1632 	if (ret) {
1633 		ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1634 		return ret;
1635 	}
1636 
1637 	ret = ath10k_mac_setup_prb_tmpl(arvif);
1638 	if (ret) {
1639 		ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1640 		return ret;
1641 	}
1642 
1643 	ret = ath10k_vdev_restart(arvif, &def);
1644 	if (ret) {
1645 		ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1646 			    arvif->vdev_id, ret);
1647 		return ret;
1648 	}
1649 
1650 	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1651 				 arvif->bssid);
1652 	if (ret) {
1653 		ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1654 			    arvif->vdev_id, ret);
1655 		return ret;
1656 	}
1657 
1658 	return 0;
1659 }
1660 
1661 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1662 				     struct ieee80211_bss_conf *info)
1663 {
1664 	struct ath10k *ar = arvif->ar;
1665 	int ret = 0;
1666 
1667 	lockdep_assert_held(&arvif->ar->conf_mutex);
1668 
1669 	if (!info->enable_beacon) {
1670 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1671 		if (ret)
1672 			ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1673 				    arvif->vdev_id, ret);
1674 
1675 		arvif->is_up = false;
1676 
1677 		spin_lock_bh(&arvif->ar->data_lock);
1678 		ath10k_mac_vif_beacon_free(arvif);
1679 		spin_unlock_bh(&arvif->ar->data_lock);
1680 
1681 		return;
1682 	}
1683 
1684 	arvif->tx_seq_no = 0x1000;
1685 
1686 	arvif->aid = 0;
1687 	ether_addr_copy(arvif->bssid, info->bssid);
1688 
1689 	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1690 				 arvif->bssid);
1691 	if (ret) {
1692 		ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1693 			    arvif->vdev_id, ret);
1694 		return;
1695 	}
1696 
1697 	arvif->is_up = true;
1698 
1699 	ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1700 	if (ret) {
1701 		ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1702 			    arvif->vdev_id, ret);
1703 		return;
1704 	}
1705 
1706 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1707 }
1708 
1709 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1710 				struct ieee80211_bss_conf *info,
1711 				const u8 self_peer[ETH_ALEN])
1712 {
1713 	struct ath10k *ar = arvif->ar;
1714 	u32 vdev_param;
1715 	int ret = 0;
1716 
1717 	lockdep_assert_held(&arvif->ar->conf_mutex);
1718 
1719 	if (!info->ibss_joined) {
1720 		if (is_zero_ether_addr(arvif->bssid))
1721 			return;
1722 
1723 		eth_zero_addr(arvif->bssid);
1724 
1725 		return;
1726 	}
1727 
1728 	vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1729 	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1730 					ATH10K_DEFAULT_ATIM);
1731 	if (ret)
1732 		ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1733 			    arvif->vdev_id, ret);
1734 }
1735 
1736 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1737 {
1738 	struct ath10k *ar = arvif->ar;
1739 	u32 param;
1740 	u32 value;
1741 	int ret;
1742 
1743 	lockdep_assert_held(&arvif->ar->conf_mutex);
1744 
1745 	if (arvif->u.sta.uapsd)
1746 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1747 	else
1748 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1749 
1750 	param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1751 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1752 	if (ret) {
1753 		ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1754 			    value, arvif->vdev_id, ret);
1755 		return ret;
1756 	}
1757 
1758 	return 0;
1759 }
1760 
1761 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1762 {
1763 	struct ath10k *ar = arvif->ar;
1764 	u32 param;
1765 	u32 value;
1766 	int ret;
1767 
1768 	lockdep_assert_held(&arvif->ar->conf_mutex);
1769 
1770 	if (arvif->u.sta.uapsd)
1771 		value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1772 	else
1773 		value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1774 
1775 	param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1776 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1777 					  param, value);
1778 	if (ret) {
1779 		ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1780 			    value, arvif->vdev_id, ret);
1781 		return ret;
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1788 {
1789 	struct ath10k_vif *arvif;
1790 	int num = 0;
1791 
1792 	lockdep_assert_held(&ar->conf_mutex);
1793 
1794 	list_for_each_entry(arvif, &ar->arvifs, list)
1795 		if (arvif->is_started)
1796 			num++;
1797 
1798 	return num;
1799 }
1800 
1801 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1802 {
1803 	struct ath10k *ar = arvif->ar;
1804 	struct ieee80211_vif *vif = arvif->vif;
1805 	struct ieee80211_conf *conf = &ar->hw->conf;
1806 	enum wmi_sta_powersave_param param;
1807 	enum wmi_sta_ps_mode psmode;
1808 	int ret;
1809 	int ps_timeout;
1810 	bool enable_ps;
1811 
1812 	lockdep_assert_held(&arvif->ar->conf_mutex);
1813 
1814 	if (arvif->vif->type != NL80211_IFTYPE_STATION)
1815 		return 0;
1816 
1817 	enable_ps = arvif->ps;
1818 
1819 	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1820 	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1821 		      ar->running_fw->fw_file.fw_features)) {
1822 		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1823 			    arvif->vdev_id);
1824 		enable_ps = false;
1825 	}
1826 
1827 	if (!arvif->is_started) {
1828 		/* mac80211 can update vif powersave state while disconnected.
1829 		 * Firmware doesn't behave nicely and consumes more power than
1830 		 * necessary if PS is disabled on a non-started vdev. Hence
1831 		 * force-enable PS for non-running vdevs.
1832 		 */
1833 		psmode = WMI_STA_PS_MODE_ENABLED;
1834 	} else if (enable_ps) {
1835 		psmode = WMI_STA_PS_MODE_ENABLED;
1836 		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1837 
1838 		ps_timeout = conf->dynamic_ps_timeout;
1839 		if (ps_timeout == 0) {
1840 			/* Firmware doesn't like 0 */
1841 			ps_timeout = ieee80211_tu_to_usec(
1842 				vif->bss_conf.beacon_int) / 1000;
1843 		}
1844 
1845 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1846 						  ps_timeout);
1847 		if (ret) {
1848 			ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1849 				    arvif->vdev_id, ret);
1850 			return ret;
1851 		}
1852 	} else {
1853 		psmode = WMI_STA_PS_MODE_DISABLED;
1854 	}
1855 
1856 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1857 		   arvif->vdev_id, psmode ? "enable" : "disable");
1858 
1859 	ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1860 	if (ret) {
1861 		ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1862 			    psmode, arvif->vdev_id, ret);
1863 		return ret;
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1870 {
1871 	struct ath10k *ar = arvif->ar;
1872 	struct wmi_sta_keepalive_arg arg = {};
1873 	int ret;
1874 
1875 	lockdep_assert_held(&arvif->ar->conf_mutex);
1876 
1877 	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1878 		return 0;
1879 
1880 	if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1881 		return 0;
1882 
1883 	/* Some firmware revisions have a bug and ignore the `enabled` field.
1884 	 * Instead use the interval to disable the keepalive.
1885 	 */
1886 	arg.vdev_id = arvif->vdev_id;
1887 	arg.enabled = 1;
1888 	arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1889 	arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1890 
1891 	ret = ath10k_wmi_sta_keepalive(ar, &arg);
1892 	if (ret) {
1893 		ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1894 			    arvif->vdev_id, ret);
1895 		return ret;
1896 	}
1897 
1898 	return 0;
1899 }
1900 
1901 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1902 {
1903 	struct ath10k *ar = arvif->ar;
1904 	struct ieee80211_vif *vif = arvif->vif;
1905 	int ret;
1906 
1907 	lockdep_assert_held(&arvif->ar->conf_mutex);
1908 
1909 	if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1910 		return;
1911 
1912 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1913 		return;
1914 
1915 	if (!vif->csa_active)
1916 		return;
1917 
1918 	if (!arvif->is_up)
1919 		return;
1920 
1921 	if (!ieee80211_csa_is_complete(vif)) {
1922 		ieee80211_csa_update_counter(vif);
1923 
1924 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
1925 		if (ret)
1926 			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1927 				    ret);
1928 
1929 		ret = ath10k_mac_setup_prb_tmpl(arvif);
1930 		if (ret)
1931 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1932 				    ret);
1933 	} else {
1934 		ieee80211_csa_finish(vif);
1935 	}
1936 }
1937 
1938 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1939 {
1940 	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1941 						ap_csa_work);
1942 	struct ath10k *ar = arvif->ar;
1943 
1944 	mutex_lock(&ar->conf_mutex);
1945 	ath10k_mac_vif_ap_csa_count_down(arvif);
1946 	mutex_unlock(&ar->conf_mutex);
1947 }
1948 
1949 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
1950 					  struct ieee80211_vif *vif)
1951 {
1952 	struct sk_buff *skb = data;
1953 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
1954 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1955 
1956 	if (vif->type != NL80211_IFTYPE_STATION)
1957 		return;
1958 
1959 	if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
1960 		return;
1961 
1962 	cancel_delayed_work(&arvif->connection_loss_work);
1963 }
1964 
1965 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
1966 {
1967 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1968 						   IEEE80211_IFACE_ITER_NORMAL,
1969 						   ath10k_mac_handle_beacon_iter,
1970 						   skb);
1971 }
1972 
1973 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
1974 					       struct ieee80211_vif *vif)
1975 {
1976 	u32 *vdev_id = data;
1977 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1978 	struct ath10k *ar = arvif->ar;
1979 	struct ieee80211_hw *hw = ar->hw;
1980 
1981 	if (arvif->vdev_id != *vdev_id)
1982 		return;
1983 
1984 	if (!arvif->is_up)
1985 		return;
1986 
1987 	ieee80211_beacon_loss(vif);
1988 
1989 	/* Firmware doesn't report beacon loss events repeatedly. If AP probe
1990 	 * (done by mac80211) succeeds but beacons do not resume then it
1991 	 * doesn't make sense to continue operation. Queue connection loss work
1992 	 * which can be cancelled when beacon is received.
1993 	 */
1994 	ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
1995 				     ATH10K_CONNECTION_LOSS_HZ);
1996 }
1997 
1998 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
1999 {
2000 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
2001 						   IEEE80211_IFACE_ITER_NORMAL,
2002 						   ath10k_mac_handle_beacon_miss_iter,
2003 						   &vdev_id);
2004 }
2005 
2006 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2007 {
2008 	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2009 						connection_loss_work.work);
2010 	struct ieee80211_vif *vif = arvif->vif;
2011 
2012 	if (!arvif->is_up)
2013 		return;
2014 
2015 	ieee80211_connection_loss(vif);
2016 }
2017 
2018 /**********************/
2019 /* Station management */
2020 /**********************/
2021 
2022 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2023 					     struct ieee80211_vif *vif)
2024 {
2025 	/* Some firmware revisions have unstable STA powersave when listen
2026 	 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2027 	 * generate NullFunc frames properly even if buffered frames have been
2028 	 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2029 	 * buffered frames. Often pinging the device from AP would simply fail.
2030 	 *
2031 	 * As a workaround set it to 1.
2032 	 */
2033 	if (vif->type == NL80211_IFTYPE_STATION)
2034 		return 1;
2035 
2036 	return ar->hw->conf.listen_interval;
2037 }
2038 
2039 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2040 				      struct ieee80211_vif *vif,
2041 				      struct ieee80211_sta *sta,
2042 				      struct wmi_peer_assoc_complete_arg *arg)
2043 {
2044 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2045 	u32 aid;
2046 
2047 	lockdep_assert_held(&ar->conf_mutex);
2048 
2049 	if (vif->type == NL80211_IFTYPE_STATION)
2050 		aid = vif->bss_conf.aid;
2051 	else
2052 		aid = sta->aid;
2053 
2054 	ether_addr_copy(arg->addr, sta->addr);
2055 	arg->vdev_id = arvif->vdev_id;
2056 	arg->peer_aid = aid;
2057 	arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2058 	arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2059 	arg->peer_num_spatial_streams = 1;
2060 	arg->peer_caps = vif->bss_conf.assoc_capability;
2061 }
2062 
2063 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2064 				       struct ieee80211_vif *vif,
2065 				       struct ieee80211_sta *sta,
2066 				       struct wmi_peer_assoc_complete_arg *arg)
2067 {
2068 	struct ieee80211_bss_conf *info = &vif->bss_conf;
2069 	struct cfg80211_chan_def def;
2070 	struct cfg80211_bss *bss;
2071 	const u8 *rsnie = NULL;
2072 	const u8 *wpaie = NULL;
2073 
2074 	lockdep_assert_held(&ar->conf_mutex);
2075 
2076 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2077 		return;
2078 
2079 	bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2080 			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2081 	if (bss) {
2082 		const struct cfg80211_bss_ies *ies;
2083 
2084 		rcu_read_lock();
2085 		rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2086 
2087 		ies = rcu_dereference(bss->ies);
2088 
2089 		wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2090 						WLAN_OUI_TYPE_MICROSOFT_WPA,
2091 						ies->data,
2092 						ies->len);
2093 		rcu_read_unlock();
2094 		cfg80211_put_bss(ar->hw->wiphy, bss);
2095 	}
2096 
2097 	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
2098 	if (rsnie || wpaie) {
2099 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2100 		arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2101 	}
2102 
2103 	if (wpaie) {
2104 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2105 		arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2106 	}
2107 
2108 	if (sta->mfp &&
2109 	    test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2110 		     ar->running_fw->fw_file.fw_features)) {
2111 		arg->peer_flags |= ar->wmi.peer_flags->pmf;
2112 	}
2113 }
2114 
2115 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2116 				      struct ieee80211_vif *vif,
2117 				      struct ieee80211_sta *sta,
2118 				      struct wmi_peer_assoc_complete_arg *arg)
2119 {
2120 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2121 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2122 	struct cfg80211_chan_def def;
2123 	const struct ieee80211_supported_band *sband;
2124 	const struct ieee80211_rate *rates;
2125 	enum nl80211_band band;
2126 	u32 ratemask;
2127 	u8 rate;
2128 	int i;
2129 
2130 	lockdep_assert_held(&ar->conf_mutex);
2131 
2132 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2133 		return;
2134 
2135 	band = def.chan->band;
2136 	sband = ar->hw->wiphy->bands[band];
2137 	ratemask = sta->supp_rates[band];
2138 	ratemask &= arvif->bitrate_mask.control[band].legacy;
2139 	rates = sband->bitrates;
2140 
2141 	rateset->num_rates = 0;
2142 
2143 	for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2144 		if (!(ratemask & 1))
2145 			continue;
2146 
2147 		rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2148 		rateset->rates[rateset->num_rates] = rate;
2149 		rateset->num_rates++;
2150 	}
2151 }
2152 
2153 static bool
2154 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2155 {
2156 	int nss;
2157 
2158 	for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2159 		if (ht_mcs_mask[nss])
2160 			return false;
2161 
2162 	return true;
2163 }
2164 
2165 static bool
2166 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2167 {
2168 	int nss;
2169 
2170 	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2171 		if (vht_mcs_mask[nss])
2172 			return false;
2173 
2174 	return true;
2175 }
2176 
2177 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2178 				   struct ieee80211_vif *vif,
2179 				   struct ieee80211_sta *sta,
2180 				   struct wmi_peer_assoc_complete_arg *arg)
2181 {
2182 	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2183 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2184 	struct cfg80211_chan_def def;
2185 	enum nl80211_band band;
2186 	const u8 *ht_mcs_mask;
2187 	const u16 *vht_mcs_mask;
2188 	int i, n;
2189 	u8 max_nss;
2190 	u32 stbc;
2191 
2192 	lockdep_assert_held(&ar->conf_mutex);
2193 
2194 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2195 		return;
2196 
2197 	if (!ht_cap->ht_supported)
2198 		return;
2199 
2200 	band = def.chan->band;
2201 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2202 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2203 
2204 	if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2205 	    ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2206 		return;
2207 
2208 	arg->peer_flags |= ar->wmi.peer_flags->ht;
2209 	arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2210 				    ht_cap->ampdu_factor)) - 1;
2211 
2212 	arg->peer_mpdu_density =
2213 		ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2214 
2215 	arg->peer_ht_caps = ht_cap->cap;
2216 	arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2217 
2218 	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2219 		arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2220 
2221 	if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2222 		arg->peer_flags |= ar->wmi.peer_flags->bw40;
2223 		arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2224 	}
2225 
2226 	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2227 		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2228 			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2229 
2230 		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2231 			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2232 	}
2233 
2234 	if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2235 		arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2236 		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2237 	}
2238 
2239 	if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2240 		stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2241 		stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2242 		stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2243 		arg->peer_rate_caps |= stbc;
2244 		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2245 	}
2246 
2247 	if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2248 		arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2249 	else if (ht_cap->mcs.rx_mask[1])
2250 		arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2251 
2252 	for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2253 		if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2254 		    (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2255 			max_nss = (i / 8) + 1;
2256 			arg->peer_ht_rates.rates[n++] = i;
2257 		}
2258 
2259 	/*
2260 	 * This is a workaround for HT-enabled STAs which break the spec
2261 	 * and have no HT capabilities RX mask (no HT RX MCS map).
2262 	 *
2263 	 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2264 	 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2265 	 *
2266 	 * Firmware asserts if such situation occurs.
2267 	 */
2268 	if (n == 0) {
2269 		arg->peer_ht_rates.num_rates = 8;
2270 		for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2271 			arg->peer_ht_rates.rates[i] = i;
2272 	} else {
2273 		arg->peer_ht_rates.num_rates = n;
2274 		arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2275 	}
2276 
2277 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2278 		   arg->addr,
2279 		   arg->peer_ht_rates.num_rates,
2280 		   arg->peer_num_spatial_streams);
2281 }
2282 
2283 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2284 				    struct ath10k_vif *arvif,
2285 				    struct ieee80211_sta *sta)
2286 {
2287 	u32 uapsd = 0;
2288 	u32 max_sp = 0;
2289 	int ret = 0;
2290 
2291 	lockdep_assert_held(&ar->conf_mutex);
2292 
2293 	if (sta->wme && sta->uapsd_queues) {
2294 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2295 			   sta->uapsd_queues, sta->max_sp);
2296 
2297 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2298 			uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2299 				 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2300 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2301 			uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2302 				 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2303 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2304 			uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2305 				 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2306 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2307 			uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2308 				 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2309 
2310 		if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2311 			max_sp = sta->max_sp;
2312 
2313 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2314 						 sta->addr,
2315 						 WMI_AP_PS_PEER_PARAM_UAPSD,
2316 						 uapsd);
2317 		if (ret) {
2318 			ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2319 				    arvif->vdev_id, ret);
2320 			return ret;
2321 		}
2322 
2323 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2324 						 sta->addr,
2325 						 WMI_AP_PS_PEER_PARAM_MAX_SP,
2326 						 max_sp);
2327 		if (ret) {
2328 			ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2329 				    arvif->vdev_id, ret);
2330 			return ret;
2331 		}
2332 
2333 		/* TODO setup this based on STA listen interval and
2334 		   beacon interval. Currently we don't know
2335 		   sta->listen_interval - mac80211 patch required.
2336 		   Currently use 10 seconds */
2337 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2338 						 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2339 						 10);
2340 		if (ret) {
2341 			ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2342 				    arvif->vdev_id, ret);
2343 			return ret;
2344 		}
2345 	}
2346 
2347 	return 0;
2348 }
2349 
2350 static u16
2351 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2352 			      const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2353 {
2354 	int idx_limit;
2355 	int nss;
2356 	u16 mcs_map;
2357 	u16 mcs;
2358 
2359 	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2360 		mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2361 			  vht_mcs_limit[nss];
2362 
2363 		if (mcs_map)
2364 			idx_limit = fls(mcs_map) - 1;
2365 		else
2366 			idx_limit = -1;
2367 
2368 		switch (idx_limit) {
2369 		case 0: /* fall through */
2370 		case 1: /* fall through */
2371 		case 2: /* fall through */
2372 		case 3: /* fall through */
2373 		case 4: /* fall through */
2374 		case 5: /* fall through */
2375 		case 6: /* fall through */
2376 		default:
2377 			/* see ath10k_mac_can_set_bitrate_mask() */
2378 			WARN_ON(1);
2379 			/* fall through */
2380 		case -1:
2381 			mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2382 			break;
2383 		case 7:
2384 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2385 			break;
2386 		case 8:
2387 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2388 			break;
2389 		case 9:
2390 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2391 			break;
2392 		}
2393 
2394 		tx_mcs_set &= ~(0x3 << (nss * 2));
2395 		tx_mcs_set |= mcs << (nss * 2);
2396 	}
2397 
2398 	return tx_mcs_set;
2399 }
2400 
2401 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2402 				    struct ieee80211_vif *vif,
2403 				    struct ieee80211_sta *sta,
2404 				    struct wmi_peer_assoc_complete_arg *arg)
2405 {
2406 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2407 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2408 	struct cfg80211_chan_def def;
2409 	enum nl80211_band band;
2410 	const u16 *vht_mcs_mask;
2411 	u8 ampdu_factor;
2412 
2413 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2414 		return;
2415 
2416 	if (!vht_cap->vht_supported)
2417 		return;
2418 
2419 	band = def.chan->band;
2420 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2421 
2422 	if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2423 		return;
2424 
2425 	arg->peer_flags |= ar->wmi.peer_flags->vht;
2426 
2427 	if (def.chan->band == NL80211_BAND_2GHZ)
2428 		arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2429 
2430 	arg->peer_vht_caps = vht_cap->cap;
2431 
2432 	ampdu_factor = (vht_cap->cap &
2433 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2434 		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2435 
2436 	/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2437 	 * zero in VHT IE. Using it would result in degraded throughput.
2438 	 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2439 	 * it if VHT max_mpdu is smaller. */
2440 	arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2441 				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2442 					ampdu_factor)) - 1);
2443 
2444 	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2445 		arg->peer_flags |= ar->wmi.peer_flags->bw80;
2446 
2447 	arg->peer_vht_rates.rx_max_rate =
2448 		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2449 	arg->peer_vht_rates.rx_mcs_set =
2450 		__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2451 	arg->peer_vht_rates.tx_max_rate =
2452 		__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2453 	arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2454 		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2455 
2456 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2457 		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2458 }
2459 
2460 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2461 				    struct ieee80211_vif *vif,
2462 				    struct ieee80211_sta *sta,
2463 				    struct wmi_peer_assoc_complete_arg *arg)
2464 {
2465 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2466 
2467 	switch (arvif->vdev_type) {
2468 	case WMI_VDEV_TYPE_AP:
2469 		if (sta->wme)
2470 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2471 
2472 		if (sta->wme && sta->uapsd_queues) {
2473 			arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2474 			arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2475 		}
2476 		break;
2477 	case WMI_VDEV_TYPE_STA:
2478 		if (vif->bss_conf.qos)
2479 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2480 		break;
2481 	case WMI_VDEV_TYPE_IBSS:
2482 		if (sta->wme)
2483 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2484 		break;
2485 	default:
2486 		break;
2487 	}
2488 
2489 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2490 		   sta->addr, !!(arg->peer_flags &
2491 		   arvif->ar->wmi.peer_flags->qos));
2492 }
2493 
2494 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2495 {
2496 	return sta->supp_rates[NL80211_BAND_2GHZ] >>
2497 	       ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2498 }
2499 
2500 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2501 					struct ieee80211_vif *vif,
2502 					struct ieee80211_sta *sta,
2503 					struct wmi_peer_assoc_complete_arg *arg)
2504 {
2505 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2506 	struct cfg80211_chan_def def;
2507 	enum nl80211_band band;
2508 	const u8 *ht_mcs_mask;
2509 	const u16 *vht_mcs_mask;
2510 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
2511 
2512 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2513 		return;
2514 
2515 	band = def.chan->band;
2516 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2517 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2518 
2519 	switch (band) {
2520 	case NL80211_BAND_2GHZ:
2521 		if (sta->vht_cap.vht_supported &&
2522 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2523 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2524 				phymode = MODE_11AC_VHT40;
2525 			else
2526 				phymode = MODE_11AC_VHT20;
2527 		} else if (sta->ht_cap.ht_supported &&
2528 			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2529 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2530 				phymode = MODE_11NG_HT40;
2531 			else
2532 				phymode = MODE_11NG_HT20;
2533 		} else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2534 			phymode = MODE_11G;
2535 		} else {
2536 			phymode = MODE_11B;
2537 		}
2538 
2539 		break;
2540 	case NL80211_BAND_5GHZ:
2541 		/*
2542 		 * Check VHT first.
2543 		 */
2544 		if (sta->vht_cap.vht_supported &&
2545 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2546 			if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2547 				phymode = MODE_11AC_VHT80;
2548 			else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2549 				phymode = MODE_11AC_VHT40;
2550 			else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2551 				phymode = MODE_11AC_VHT20;
2552 		} else if (sta->ht_cap.ht_supported &&
2553 			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2554 			if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2555 				phymode = MODE_11NA_HT40;
2556 			else
2557 				phymode = MODE_11NA_HT20;
2558 		} else {
2559 			phymode = MODE_11A;
2560 		}
2561 
2562 		break;
2563 	default:
2564 		break;
2565 	}
2566 
2567 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2568 		   sta->addr, ath10k_wmi_phymode_str(phymode));
2569 
2570 	arg->peer_phymode = phymode;
2571 	WARN_ON(phymode == MODE_UNKNOWN);
2572 }
2573 
2574 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2575 				     struct ieee80211_vif *vif,
2576 				     struct ieee80211_sta *sta,
2577 				     struct wmi_peer_assoc_complete_arg *arg)
2578 {
2579 	lockdep_assert_held(&ar->conf_mutex);
2580 
2581 	memset(arg, 0, sizeof(*arg));
2582 
2583 	ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2584 	ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2585 	ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2586 	ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2587 	ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2588 	ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2589 	ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2590 
2591 	return 0;
2592 }
2593 
2594 static const u32 ath10k_smps_map[] = {
2595 	[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2596 	[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2597 	[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2598 	[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2599 };
2600 
2601 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2602 				  const u8 *addr,
2603 				  const struct ieee80211_sta_ht_cap *ht_cap)
2604 {
2605 	int smps;
2606 
2607 	if (!ht_cap->ht_supported)
2608 		return 0;
2609 
2610 	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2611 	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2612 
2613 	if (smps >= ARRAY_SIZE(ath10k_smps_map))
2614 		return -EINVAL;
2615 
2616 	return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2617 					 WMI_PEER_SMPS_STATE,
2618 					 ath10k_smps_map[smps]);
2619 }
2620 
2621 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2622 				      struct ieee80211_vif *vif,
2623 				      struct ieee80211_sta_vht_cap vht_cap)
2624 {
2625 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2626 	int ret;
2627 	u32 param;
2628 	u32 value;
2629 
2630 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2631 		return 0;
2632 
2633 	if (!(ar->vht_cap_info &
2634 	      (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2635 	       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2636 	       IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2637 	       IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2638 		return 0;
2639 
2640 	param = ar->wmi.vdev_param->txbf;
2641 	value = 0;
2642 
2643 	if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2644 		return 0;
2645 
2646 	/* The following logic is correct. If a remote STA advertises support
2647 	 * for being a beamformer then we should enable us being a beamformee.
2648 	 */
2649 
2650 	if (ar->vht_cap_info &
2651 	    (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2652 	     IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2653 		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2654 			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2655 
2656 		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2657 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2658 	}
2659 
2660 	if (ar->vht_cap_info &
2661 	    (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2662 	     IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2663 		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2664 			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2665 
2666 		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2667 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2668 	}
2669 
2670 	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2671 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2672 
2673 	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2674 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2675 
2676 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2677 	if (ret) {
2678 		ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2679 			    value, ret);
2680 		return ret;
2681 	}
2682 
2683 	return 0;
2684 }
2685 
2686 /* can be called only in mac80211 callbacks due to `key_count` usage */
2687 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2688 			     struct ieee80211_vif *vif,
2689 			     struct ieee80211_bss_conf *bss_conf)
2690 {
2691 	struct ath10k *ar = hw->priv;
2692 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2693 	struct ieee80211_sta_ht_cap ht_cap;
2694 	struct ieee80211_sta_vht_cap vht_cap;
2695 	struct wmi_peer_assoc_complete_arg peer_arg;
2696 	struct ieee80211_sta *ap_sta;
2697 	int ret;
2698 
2699 	lockdep_assert_held(&ar->conf_mutex);
2700 
2701 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2702 		   arvif->vdev_id, arvif->bssid, arvif->aid);
2703 
2704 	rcu_read_lock();
2705 
2706 	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2707 	if (!ap_sta) {
2708 		ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2709 			    bss_conf->bssid, arvif->vdev_id);
2710 		rcu_read_unlock();
2711 		return;
2712 	}
2713 
2714 	/* ap_sta must be accessed only within rcu section which must be left
2715 	 * before calling ath10k_setup_peer_smps() which might sleep. */
2716 	ht_cap = ap_sta->ht_cap;
2717 	vht_cap = ap_sta->vht_cap;
2718 
2719 	ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2720 	if (ret) {
2721 		ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2722 			    bss_conf->bssid, arvif->vdev_id, ret);
2723 		rcu_read_unlock();
2724 		return;
2725 	}
2726 
2727 	rcu_read_unlock();
2728 
2729 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2730 	if (ret) {
2731 		ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2732 			    bss_conf->bssid, arvif->vdev_id, ret);
2733 		return;
2734 	}
2735 
2736 	ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2737 	if (ret) {
2738 		ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2739 			    arvif->vdev_id, ret);
2740 		return;
2741 	}
2742 
2743 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2744 	if (ret) {
2745 		ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2746 			    arvif->vdev_id, bss_conf->bssid, ret);
2747 		return;
2748 	}
2749 
2750 	ath10k_dbg(ar, ATH10K_DBG_MAC,
2751 		   "mac vdev %d up (associated) bssid %pM aid %d\n",
2752 		   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2753 
2754 	WARN_ON(arvif->is_up);
2755 
2756 	arvif->aid = bss_conf->aid;
2757 	ether_addr_copy(arvif->bssid, bss_conf->bssid);
2758 
2759 	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2760 	if (ret) {
2761 		ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2762 			    arvif->vdev_id, ret);
2763 		return;
2764 	}
2765 
2766 	arvif->is_up = true;
2767 
2768 	/* Workaround: Some firmware revisions (tested with qca6174
2769 	 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2770 	 * poked with peer param command.
2771 	 */
2772 	ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2773 					WMI_PEER_DUMMY_VAR, 1);
2774 	if (ret) {
2775 		ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2776 			    arvif->bssid, arvif->vdev_id, ret);
2777 		return;
2778 	}
2779 }
2780 
2781 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2782 				struct ieee80211_vif *vif)
2783 {
2784 	struct ath10k *ar = hw->priv;
2785 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2786 	struct ieee80211_sta_vht_cap vht_cap = {};
2787 	int ret;
2788 
2789 	lockdep_assert_held(&ar->conf_mutex);
2790 
2791 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2792 		   arvif->vdev_id, arvif->bssid);
2793 
2794 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2795 	if (ret)
2796 		ath10k_warn(ar, "faield to down vdev %i: %d\n",
2797 			    arvif->vdev_id, ret);
2798 
2799 	arvif->def_wep_key_idx = -1;
2800 
2801 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2802 	if (ret) {
2803 		ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2804 			    arvif->vdev_id, ret);
2805 		return;
2806 	}
2807 
2808 	arvif->is_up = false;
2809 
2810 	cancel_delayed_work_sync(&arvif->connection_loss_work);
2811 }
2812 
2813 static int ath10k_station_assoc(struct ath10k *ar,
2814 				struct ieee80211_vif *vif,
2815 				struct ieee80211_sta *sta,
2816 				bool reassoc)
2817 {
2818 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2819 	struct wmi_peer_assoc_complete_arg peer_arg;
2820 	int ret = 0;
2821 
2822 	lockdep_assert_held(&ar->conf_mutex);
2823 
2824 	ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2825 	if (ret) {
2826 		ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2827 			    sta->addr, arvif->vdev_id, ret);
2828 		return ret;
2829 	}
2830 
2831 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2832 	if (ret) {
2833 		ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2834 			    sta->addr, arvif->vdev_id, ret);
2835 		return ret;
2836 	}
2837 
2838 	/* Re-assoc is run only to update supported rates for given station. It
2839 	 * doesn't make much sense to reconfigure the peer completely.
2840 	 */
2841 	if (!reassoc) {
2842 		ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2843 					     &sta->ht_cap);
2844 		if (ret) {
2845 			ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2846 				    arvif->vdev_id, ret);
2847 			return ret;
2848 		}
2849 
2850 		ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2851 		if (ret) {
2852 			ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2853 				    sta->addr, arvif->vdev_id, ret);
2854 			return ret;
2855 		}
2856 
2857 		if (!sta->wme) {
2858 			arvif->num_legacy_stations++;
2859 			ret  = ath10k_recalc_rtscts_prot(arvif);
2860 			if (ret) {
2861 				ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2862 					    arvif->vdev_id, ret);
2863 				return ret;
2864 			}
2865 		}
2866 
2867 		/* Plumb cached keys only for static WEP */
2868 		if (arvif->def_wep_key_idx != -1) {
2869 			ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2870 			if (ret) {
2871 				ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2872 					    arvif->vdev_id, ret);
2873 				return ret;
2874 			}
2875 		}
2876 	}
2877 
2878 	return ret;
2879 }
2880 
2881 static int ath10k_station_disassoc(struct ath10k *ar,
2882 				   struct ieee80211_vif *vif,
2883 				   struct ieee80211_sta *sta)
2884 {
2885 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2886 	int ret = 0;
2887 
2888 	lockdep_assert_held(&ar->conf_mutex);
2889 
2890 	if (!sta->wme) {
2891 		arvif->num_legacy_stations--;
2892 		ret = ath10k_recalc_rtscts_prot(arvif);
2893 		if (ret) {
2894 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2895 				    arvif->vdev_id, ret);
2896 			return ret;
2897 		}
2898 	}
2899 
2900 	ret = ath10k_clear_peer_keys(arvif, sta->addr);
2901 	if (ret) {
2902 		ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
2903 			    arvif->vdev_id, ret);
2904 		return ret;
2905 	}
2906 
2907 	return ret;
2908 }
2909 
2910 /**************/
2911 /* Regulatory */
2912 /**************/
2913 
2914 static int ath10k_update_channel_list(struct ath10k *ar)
2915 {
2916 	struct ieee80211_hw *hw = ar->hw;
2917 	struct ieee80211_supported_band **bands;
2918 	enum nl80211_band band;
2919 	struct ieee80211_channel *channel;
2920 	struct wmi_scan_chan_list_arg arg = {0};
2921 	struct wmi_channel_arg *ch;
2922 	bool passive;
2923 	int len;
2924 	int ret;
2925 	int i;
2926 
2927 	lockdep_assert_held(&ar->conf_mutex);
2928 
2929 	bands = hw->wiphy->bands;
2930 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
2931 		if (!bands[band])
2932 			continue;
2933 
2934 		for (i = 0; i < bands[band]->n_channels; i++) {
2935 			if (bands[band]->channels[i].flags &
2936 			    IEEE80211_CHAN_DISABLED)
2937 				continue;
2938 
2939 			arg.n_channels++;
2940 		}
2941 	}
2942 
2943 	len = sizeof(struct wmi_channel_arg) * arg.n_channels;
2944 	arg.channels = kzalloc(len, GFP_KERNEL);
2945 	if (!arg.channels)
2946 		return -ENOMEM;
2947 
2948 	ch = arg.channels;
2949 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
2950 		if (!bands[band])
2951 			continue;
2952 
2953 		for (i = 0; i < bands[band]->n_channels; i++) {
2954 			channel = &bands[band]->channels[i];
2955 
2956 			if (channel->flags & IEEE80211_CHAN_DISABLED)
2957 				continue;
2958 
2959 			ch->allow_ht = true;
2960 
2961 			/* FIXME: when should we really allow VHT? */
2962 			ch->allow_vht = true;
2963 
2964 			ch->allow_ibss =
2965 				!(channel->flags & IEEE80211_CHAN_NO_IR);
2966 
2967 			ch->ht40plus =
2968 				!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
2969 
2970 			ch->chan_radar =
2971 				!!(channel->flags & IEEE80211_CHAN_RADAR);
2972 
2973 			passive = channel->flags & IEEE80211_CHAN_NO_IR;
2974 			ch->passive = passive;
2975 
2976 			ch->freq = channel->center_freq;
2977 			ch->band_center_freq1 = channel->center_freq;
2978 			ch->min_power = 0;
2979 			ch->max_power = channel->max_power * 2;
2980 			ch->max_reg_power = channel->max_reg_power * 2;
2981 			ch->max_antenna_gain = channel->max_antenna_gain * 2;
2982 			ch->reg_class_id = 0; /* FIXME */
2983 
2984 			/* FIXME: why use only legacy modes, why not any
2985 			 * HT/VHT modes? Would that even make any
2986 			 * difference? */
2987 			if (channel->band == NL80211_BAND_2GHZ)
2988 				ch->mode = MODE_11G;
2989 			else
2990 				ch->mode = MODE_11A;
2991 
2992 			if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
2993 				continue;
2994 
2995 			ath10k_dbg(ar, ATH10K_DBG_WMI,
2996 				   "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
2997 				    ch - arg.channels, arg.n_channels,
2998 				   ch->freq, ch->max_power, ch->max_reg_power,
2999 				   ch->max_antenna_gain, ch->mode);
3000 
3001 			ch++;
3002 		}
3003 	}
3004 
3005 	ret = ath10k_wmi_scan_chan_list(ar, &arg);
3006 	kfree(arg.channels);
3007 
3008 	return ret;
3009 }
3010 
3011 static enum wmi_dfs_region
3012 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3013 {
3014 	switch (dfs_region) {
3015 	case NL80211_DFS_UNSET:
3016 		return WMI_UNINIT_DFS_DOMAIN;
3017 	case NL80211_DFS_FCC:
3018 		return WMI_FCC_DFS_DOMAIN;
3019 	case NL80211_DFS_ETSI:
3020 		return WMI_ETSI_DFS_DOMAIN;
3021 	case NL80211_DFS_JP:
3022 		return WMI_MKK4_DFS_DOMAIN;
3023 	}
3024 	return WMI_UNINIT_DFS_DOMAIN;
3025 }
3026 
3027 static void ath10k_regd_update(struct ath10k *ar)
3028 {
3029 	struct reg_dmn_pair_mapping *regpair;
3030 	int ret;
3031 	enum wmi_dfs_region wmi_dfs_reg;
3032 	enum nl80211_dfs_regions nl_dfs_reg;
3033 
3034 	lockdep_assert_held(&ar->conf_mutex);
3035 
3036 	ret = ath10k_update_channel_list(ar);
3037 	if (ret)
3038 		ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3039 
3040 	regpair = ar->ath_common.regulatory.regpair;
3041 
3042 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3043 		nl_dfs_reg = ar->dfs_detector->region;
3044 		wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3045 	} else {
3046 		wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3047 	}
3048 
3049 	/* Target allows setting up per-band regdomain but ath_common provides
3050 	 * a combined one only */
3051 	ret = ath10k_wmi_pdev_set_regdomain(ar,
3052 					    regpair->reg_domain,
3053 					    regpair->reg_domain, /* 2ghz */
3054 					    regpair->reg_domain, /* 5ghz */
3055 					    regpair->reg_2ghz_ctl,
3056 					    regpair->reg_5ghz_ctl,
3057 					    wmi_dfs_reg);
3058 	if (ret)
3059 		ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3060 }
3061 
3062 static void ath10k_reg_notifier(struct wiphy *wiphy,
3063 				struct regulatory_request *request)
3064 {
3065 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3066 	struct ath10k *ar = hw->priv;
3067 	bool result;
3068 
3069 	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3070 
3071 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3072 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3073 			   request->dfs_region);
3074 		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3075 							  request->dfs_region);
3076 		if (!result)
3077 			ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3078 				    request->dfs_region);
3079 	}
3080 
3081 	mutex_lock(&ar->conf_mutex);
3082 	if (ar->state == ATH10K_STATE_ON)
3083 		ath10k_regd_update(ar);
3084 	mutex_unlock(&ar->conf_mutex);
3085 }
3086 
3087 /***************/
3088 /* TX handlers */
3089 /***************/
3090 
3091 enum ath10k_mac_tx_path {
3092 	ATH10K_MAC_TX_HTT,
3093 	ATH10K_MAC_TX_HTT_MGMT,
3094 	ATH10K_MAC_TX_WMI_MGMT,
3095 	ATH10K_MAC_TX_UNKNOWN,
3096 };
3097 
3098 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3099 {
3100 	lockdep_assert_held(&ar->htt.tx_lock);
3101 
3102 	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3103 	ar->tx_paused |= BIT(reason);
3104 	ieee80211_stop_queues(ar->hw);
3105 }
3106 
3107 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3108 				      struct ieee80211_vif *vif)
3109 {
3110 	struct ath10k *ar = data;
3111 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3112 
3113 	if (arvif->tx_paused)
3114 		return;
3115 
3116 	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3117 }
3118 
3119 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3120 {
3121 	lockdep_assert_held(&ar->htt.tx_lock);
3122 
3123 	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3124 	ar->tx_paused &= ~BIT(reason);
3125 
3126 	if (ar->tx_paused)
3127 		return;
3128 
3129 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3130 						   IEEE80211_IFACE_ITER_RESUME_ALL,
3131 						   ath10k_mac_tx_unlock_iter,
3132 						   ar);
3133 
3134 	ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3135 }
3136 
3137 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3138 {
3139 	struct ath10k *ar = arvif->ar;
3140 
3141 	lockdep_assert_held(&ar->htt.tx_lock);
3142 
3143 	WARN_ON(reason >= BITS_PER_LONG);
3144 	arvif->tx_paused |= BIT(reason);
3145 	ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3146 }
3147 
3148 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3149 {
3150 	struct ath10k *ar = arvif->ar;
3151 
3152 	lockdep_assert_held(&ar->htt.tx_lock);
3153 
3154 	WARN_ON(reason >= BITS_PER_LONG);
3155 	arvif->tx_paused &= ~BIT(reason);
3156 
3157 	if (ar->tx_paused)
3158 		return;
3159 
3160 	if (arvif->tx_paused)
3161 		return;
3162 
3163 	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3164 }
3165 
3166 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3167 					   enum wmi_tlv_tx_pause_id pause_id,
3168 					   enum wmi_tlv_tx_pause_action action)
3169 {
3170 	struct ath10k *ar = arvif->ar;
3171 
3172 	lockdep_assert_held(&ar->htt.tx_lock);
3173 
3174 	switch (action) {
3175 	case WMI_TLV_TX_PAUSE_ACTION_STOP:
3176 		ath10k_mac_vif_tx_lock(arvif, pause_id);
3177 		break;
3178 	case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3179 		ath10k_mac_vif_tx_unlock(arvif, pause_id);
3180 		break;
3181 	default:
3182 		ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
3183 			    action, arvif->vdev_id);
3184 		break;
3185 	}
3186 }
3187 
3188 struct ath10k_mac_tx_pause {
3189 	u32 vdev_id;
3190 	enum wmi_tlv_tx_pause_id pause_id;
3191 	enum wmi_tlv_tx_pause_action action;
3192 };
3193 
3194 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3195 					    struct ieee80211_vif *vif)
3196 {
3197 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3198 	struct ath10k_mac_tx_pause *arg = data;
3199 
3200 	if (arvif->vdev_id != arg->vdev_id)
3201 		return;
3202 
3203 	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3204 }
3205 
3206 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3207 				     enum wmi_tlv_tx_pause_id pause_id,
3208 				     enum wmi_tlv_tx_pause_action action)
3209 {
3210 	struct ath10k_mac_tx_pause arg = {
3211 		.vdev_id = vdev_id,
3212 		.pause_id = pause_id,
3213 		.action = action,
3214 	};
3215 
3216 	spin_lock_bh(&ar->htt.tx_lock);
3217 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3218 						   IEEE80211_IFACE_ITER_RESUME_ALL,
3219 						   ath10k_mac_handle_tx_pause_iter,
3220 						   &arg);
3221 	spin_unlock_bh(&ar->htt.tx_lock);
3222 }
3223 
3224 static enum ath10k_hw_txrx_mode
3225 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3226 			   struct ieee80211_vif *vif,
3227 			   struct ieee80211_sta *sta,
3228 			   struct sk_buff *skb)
3229 {
3230 	const struct ieee80211_hdr *hdr = (void *)skb->data;
3231 	__le16 fc = hdr->frame_control;
3232 
3233 	if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3234 		return ATH10K_HW_TXRX_RAW;
3235 
3236 	if (ieee80211_is_mgmt(fc))
3237 		return ATH10K_HW_TXRX_MGMT;
3238 
3239 	/* Workaround:
3240 	 *
3241 	 * NullFunc frames are mostly used to ping if a client or AP are still
3242 	 * reachable and responsive. This implies tx status reports must be
3243 	 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3244 	 * come to a conclusion that the other end disappeared and tear down
3245 	 * BSS connection or it can never disconnect from BSS/client (which is
3246 	 * the case).
3247 	 *
3248 	 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3249 	 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3250 	 * which seems to deliver correct tx reports for NullFunc frames. The
3251 	 * downside of using it is it ignores client powersave state so it can
3252 	 * end up disconnecting sleeping clients in AP mode. It should fix STA
3253 	 * mode though because AP don't sleep.
3254 	 */
3255 	if (ar->htt.target_version_major < 3 &&
3256 	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3257 	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3258 		      ar->running_fw->fw_file.fw_features))
3259 		return ATH10K_HW_TXRX_MGMT;
3260 
3261 	/* Workaround:
3262 	 *
3263 	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3264 	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3265 	 * to work with Ethernet txmode so use it.
3266 	 *
3267 	 * FIXME: Check if raw mode works with TDLS.
3268 	 */
3269 	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3270 		return ATH10K_HW_TXRX_ETHERNET;
3271 
3272 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3273 		return ATH10K_HW_TXRX_RAW;
3274 
3275 	return ATH10K_HW_TXRX_NATIVE_WIFI;
3276 }
3277 
3278 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3279 				     struct sk_buff *skb)
3280 {
3281 	const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3282 	const struct ieee80211_hdr *hdr = (void *)skb->data;
3283 	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3284 			 IEEE80211_TX_CTL_INJECTED;
3285 
3286 	if (!ieee80211_has_protected(hdr->frame_control))
3287 		return false;
3288 
3289 	if ((info->flags & mask) == mask)
3290 		return false;
3291 
3292 	if (vif)
3293 		return !ath10k_vif_to_arvif(vif)->nohwcrypt;
3294 
3295 	return true;
3296 }
3297 
3298 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3299  * Control in the header.
3300  */
3301 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3302 {
3303 	struct ieee80211_hdr *hdr = (void *)skb->data;
3304 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3305 	u8 *qos_ctl;
3306 
3307 	if (!ieee80211_is_data_qos(hdr->frame_control))
3308 		return;
3309 
3310 	qos_ctl = ieee80211_get_qos_ctl(hdr);
3311 	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3312 		skb->data, (void *)qos_ctl - (void *)skb->data);
3313 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3314 
3315 	/* Some firmware revisions don't handle sending QoS NullFunc well.
3316 	 * These frames are mainly used for CQM purposes so it doesn't really
3317 	 * matter whether QoS NullFunc or NullFunc are sent.
3318 	 */
3319 	hdr = (void *)skb->data;
3320 	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3321 		cb->flags &= ~ATH10K_SKB_F_QOS;
3322 
3323 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3324 }
3325 
3326 static void ath10k_tx_h_8023(struct sk_buff *skb)
3327 {
3328 	struct ieee80211_hdr *hdr;
3329 	struct rfc1042_hdr *rfc1042;
3330 	struct ethhdr *eth;
3331 	size_t hdrlen;
3332 	u8 da[ETH_ALEN];
3333 	u8 sa[ETH_ALEN];
3334 	__be16 type;
3335 
3336 	hdr = (void *)skb->data;
3337 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
3338 	rfc1042 = (void *)skb->data + hdrlen;
3339 
3340 	ether_addr_copy(da, ieee80211_get_DA(hdr));
3341 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
3342 	type = rfc1042->snap_type;
3343 
3344 	skb_pull(skb, hdrlen + sizeof(*rfc1042));
3345 	skb_push(skb, sizeof(*eth));
3346 
3347 	eth = (void *)skb->data;
3348 	ether_addr_copy(eth->h_dest, da);
3349 	ether_addr_copy(eth->h_source, sa);
3350 	eth->h_proto = type;
3351 }
3352 
3353 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3354 				       struct ieee80211_vif *vif,
3355 				       struct sk_buff *skb)
3356 {
3357 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3358 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3359 
3360 	/* This is case only for P2P_GO */
3361 	if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3362 		return;
3363 
3364 	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3365 		spin_lock_bh(&ar->data_lock);
3366 		if (arvif->u.ap.noa_data)
3367 			if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3368 					      GFP_ATOMIC))
3369 				memcpy(skb_put(skb, arvif->u.ap.noa_len),
3370 				       arvif->u.ap.noa_data,
3371 				       arvif->u.ap.noa_len);
3372 		spin_unlock_bh(&ar->data_lock);
3373 	}
3374 }
3375 
3376 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3377 				    struct ieee80211_vif *vif,
3378 				    struct ieee80211_txq *txq,
3379 				    struct sk_buff *skb)
3380 {
3381 	struct ieee80211_hdr *hdr = (void *)skb->data;
3382 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3383 
3384 	cb->flags = 0;
3385 	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3386 		cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3387 
3388 	if (ieee80211_is_mgmt(hdr->frame_control))
3389 		cb->flags |= ATH10K_SKB_F_MGMT;
3390 
3391 	if (ieee80211_is_data_qos(hdr->frame_control))
3392 		cb->flags |= ATH10K_SKB_F_QOS;
3393 
3394 	cb->vif = vif;
3395 	cb->txq = txq;
3396 }
3397 
3398 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3399 {
3400 	/* FIXME: Not really sure since when the behaviour changed. At some
3401 	 * point new firmware stopped requiring creation of peer entries for
3402 	 * offchannel tx (and actually creating them causes issues with wmi-htc
3403 	 * tx credit replenishment and reliability). Assuming it's at least 3.4
3404 	 * because that's when the `freq` was introduced to TX_FRM HTT command.
3405 	 */
3406 	return (ar->htt.target_version_major >= 3 &&
3407 		ar->htt.target_version_minor >= 4 &&
3408 		ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3409 }
3410 
3411 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3412 {
3413 	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3414 	int ret = 0;
3415 
3416 	spin_lock_bh(&ar->data_lock);
3417 
3418 	if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3419 		ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3420 		ret = -ENOSPC;
3421 		goto unlock;
3422 	}
3423 
3424 	__skb_queue_tail(q, skb);
3425 	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3426 
3427 unlock:
3428 	spin_unlock_bh(&ar->data_lock);
3429 
3430 	return ret;
3431 }
3432 
3433 static enum ath10k_mac_tx_path
3434 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3435 			   struct sk_buff *skb,
3436 			   enum ath10k_hw_txrx_mode txmode)
3437 {
3438 	switch (txmode) {
3439 	case ATH10K_HW_TXRX_RAW:
3440 	case ATH10K_HW_TXRX_NATIVE_WIFI:
3441 	case ATH10K_HW_TXRX_ETHERNET:
3442 		return ATH10K_MAC_TX_HTT;
3443 	case ATH10K_HW_TXRX_MGMT:
3444 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3445 			     ar->running_fw->fw_file.fw_features))
3446 			return ATH10K_MAC_TX_WMI_MGMT;
3447 		else if (ar->htt.target_version_major >= 3)
3448 			return ATH10K_MAC_TX_HTT;
3449 		else
3450 			return ATH10K_MAC_TX_HTT_MGMT;
3451 	}
3452 
3453 	return ATH10K_MAC_TX_UNKNOWN;
3454 }
3455 
3456 static int ath10k_mac_tx_submit(struct ath10k *ar,
3457 				enum ath10k_hw_txrx_mode txmode,
3458 				enum ath10k_mac_tx_path txpath,
3459 				struct sk_buff *skb)
3460 {
3461 	struct ath10k_htt *htt = &ar->htt;
3462 	int ret = -EINVAL;
3463 
3464 	switch (txpath) {
3465 	case ATH10K_MAC_TX_HTT:
3466 		ret = ath10k_htt_tx(htt, txmode, skb);
3467 		break;
3468 	case ATH10K_MAC_TX_HTT_MGMT:
3469 		ret = ath10k_htt_mgmt_tx(htt, skb);
3470 		break;
3471 	case ATH10K_MAC_TX_WMI_MGMT:
3472 		ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3473 		break;
3474 	case ATH10K_MAC_TX_UNKNOWN:
3475 		WARN_ON_ONCE(1);
3476 		ret = -EINVAL;
3477 		break;
3478 	}
3479 
3480 	if (ret) {
3481 		ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3482 			    ret);
3483 		ieee80211_free_txskb(ar->hw, skb);
3484 	}
3485 
3486 	return ret;
3487 }
3488 
3489 /* This function consumes the sk_buff regardless of return value as far as
3490  * caller is concerned so no freeing is necessary afterwards.
3491  */
3492 static int ath10k_mac_tx(struct ath10k *ar,
3493 			 struct ieee80211_vif *vif,
3494 			 struct ieee80211_sta *sta,
3495 			 enum ath10k_hw_txrx_mode txmode,
3496 			 enum ath10k_mac_tx_path txpath,
3497 			 struct sk_buff *skb)
3498 {
3499 	struct ieee80211_hw *hw = ar->hw;
3500 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3501 	int ret;
3502 
3503 	/* We should disable CCK RATE due to P2P */
3504 	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3505 		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3506 
3507 	switch (txmode) {
3508 	case ATH10K_HW_TXRX_MGMT:
3509 	case ATH10K_HW_TXRX_NATIVE_WIFI:
3510 		ath10k_tx_h_nwifi(hw, skb);
3511 		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3512 		ath10k_tx_h_seq_no(vif, skb);
3513 		break;
3514 	case ATH10K_HW_TXRX_ETHERNET:
3515 		ath10k_tx_h_8023(skb);
3516 		break;
3517 	case ATH10K_HW_TXRX_RAW:
3518 		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3519 			WARN_ON_ONCE(1);
3520 			ieee80211_free_txskb(hw, skb);
3521 			return -ENOTSUPP;
3522 		}
3523 	}
3524 
3525 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3526 		if (!ath10k_mac_tx_frm_has_freq(ar)) {
3527 			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
3528 				   skb);
3529 
3530 			skb_queue_tail(&ar->offchan_tx_queue, skb);
3531 			ieee80211_queue_work(hw, &ar->offchan_tx_work);
3532 			return 0;
3533 		}
3534 	}
3535 
3536 	ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3537 	if (ret) {
3538 		ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3539 		return ret;
3540 	}
3541 
3542 	return 0;
3543 }
3544 
3545 void ath10k_offchan_tx_purge(struct ath10k *ar)
3546 {
3547 	struct sk_buff *skb;
3548 
3549 	for (;;) {
3550 		skb = skb_dequeue(&ar->offchan_tx_queue);
3551 		if (!skb)
3552 			break;
3553 
3554 		ieee80211_free_txskb(ar->hw, skb);
3555 	}
3556 }
3557 
3558 void ath10k_offchan_tx_work(struct work_struct *work)
3559 {
3560 	struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3561 	struct ath10k_peer *peer;
3562 	struct ath10k_vif *arvif;
3563 	enum ath10k_hw_txrx_mode txmode;
3564 	enum ath10k_mac_tx_path txpath;
3565 	struct ieee80211_hdr *hdr;
3566 	struct ieee80211_vif *vif;
3567 	struct ieee80211_sta *sta;
3568 	struct sk_buff *skb;
3569 	const u8 *peer_addr;
3570 	int vdev_id;
3571 	int ret;
3572 	unsigned long time_left;
3573 	bool tmp_peer_created = false;
3574 
3575 	/* FW requirement: We must create a peer before FW will send out
3576 	 * an offchannel frame. Otherwise the frame will be stuck and
3577 	 * never transmitted. We delete the peer upon tx completion.
3578 	 * It is unlikely that a peer for offchannel tx will already be
3579 	 * present. However it may be in some rare cases so account for that.
3580 	 * Otherwise we might remove a legitimate peer and break stuff. */
3581 
3582 	for (;;) {
3583 		skb = skb_dequeue(&ar->offchan_tx_queue);
3584 		if (!skb)
3585 			break;
3586 
3587 		mutex_lock(&ar->conf_mutex);
3588 
3589 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
3590 			   skb);
3591 
3592 		hdr = (struct ieee80211_hdr *)skb->data;
3593 		peer_addr = ieee80211_get_DA(hdr);
3594 
3595 		spin_lock_bh(&ar->data_lock);
3596 		vdev_id = ar->scan.vdev_id;
3597 		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3598 		spin_unlock_bh(&ar->data_lock);
3599 
3600 		if (peer)
3601 			/* FIXME: should this use ath10k_warn()? */
3602 			ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3603 				   peer_addr, vdev_id);
3604 
3605 		if (!peer) {
3606 			ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3607 						 peer_addr,
3608 						 WMI_PEER_TYPE_DEFAULT);
3609 			if (ret)
3610 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3611 					    peer_addr, vdev_id, ret);
3612 			tmp_peer_created = (ret == 0);
3613 		}
3614 
3615 		spin_lock_bh(&ar->data_lock);
3616 		reinit_completion(&ar->offchan_tx_completed);
3617 		ar->offchan_tx_skb = skb;
3618 		spin_unlock_bh(&ar->data_lock);
3619 
3620 		/* It's safe to access vif and sta - conf_mutex guarantees that
3621 		 * sta_state() and remove_interface() are locked exclusively
3622 		 * out wrt to this offchannel worker.
3623 		 */
3624 		arvif = ath10k_get_arvif(ar, vdev_id);
3625 		if (arvif) {
3626 			vif = arvif->vif;
3627 			sta = ieee80211_find_sta(vif, peer_addr);
3628 		} else {
3629 			vif = NULL;
3630 			sta = NULL;
3631 		}
3632 
3633 		txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3634 		txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3635 
3636 		ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3637 		if (ret) {
3638 			ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3639 				    ret);
3640 			/* not serious */
3641 		}
3642 
3643 		time_left =
3644 		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3645 		if (time_left == 0)
3646 			ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
3647 				    skb);
3648 
3649 		if (!peer && tmp_peer_created) {
3650 			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3651 			if (ret)
3652 				ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3653 					    peer_addr, vdev_id, ret);
3654 		}
3655 
3656 		mutex_unlock(&ar->conf_mutex);
3657 	}
3658 }
3659 
3660 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3661 {
3662 	struct sk_buff *skb;
3663 
3664 	for (;;) {
3665 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3666 		if (!skb)
3667 			break;
3668 
3669 		ieee80211_free_txskb(ar->hw, skb);
3670 	}
3671 }
3672 
3673 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3674 {
3675 	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3676 	struct sk_buff *skb;
3677 	int ret;
3678 
3679 	for (;;) {
3680 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3681 		if (!skb)
3682 			break;
3683 
3684 		ret = ath10k_wmi_mgmt_tx(ar, skb);
3685 		if (ret) {
3686 			ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3687 				    ret);
3688 			ieee80211_free_txskb(ar->hw, skb);
3689 		}
3690 	}
3691 }
3692 
3693 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3694 {
3695 	struct ath10k_txq *artxq;
3696 
3697 	if (!txq)
3698 		return;
3699 
3700 	artxq = (void *)txq->drv_priv;
3701 	INIT_LIST_HEAD(&artxq->list);
3702 }
3703 
3704 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3705 {
3706 	struct ath10k_txq *artxq;
3707 	struct ath10k_skb_cb *cb;
3708 	struct sk_buff *msdu;
3709 	int msdu_id;
3710 
3711 	if (!txq)
3712 		return;
3713 
3714 	artxq = (void *)txq->drv_priv;
3715 	spin_lock_bh(&ar->txqs_lock);
3716 	if (!list_empty(&artxq->list))
3717 		list_del_init(&artxq->list);
3718 	spin_unlock_bh(&ar->txqs_lock);
3719 
3720 	spin_lock_bh(&ar->htt.tx_lock);
3721 	idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3722 		cb = ATH10K_SKB_CB(msdu);
3723 		if (cb->txq == txq)
3724 			cb->txq = NULL;
3725 	}
3726 	spin_unlock_bh(&ar->htt.tx_lock);
3727 }
3728 
3729 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3730 					    u16 peer_id,
3731 					    u8 tid)
3732 {
3733 	struct ath10k_peer *peer;
3734 
3735 	lockdep_assert_held(&ar->data_lock);
3736 
3737 	peer = ar->peer_map[peer_id];
3738 	if (!peer)
3739 		return NULL;
3740 
3741 	if (peer->sta)
3742 		return peer->sta->txq[tid];
3743 	else if (peer->vif)
3744 		return peer->vif->txq;
3745 	else
3746 		return NULL;
3747 }
3748 
3749 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3750 				   struct ieee80211_txq *txq)
3751 {
3752 	struct ath10k *ar = hw->priv;
3753 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3754 
3755 	/* No need to get locks */
3756 
3757 	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3758 		return true;
3759 
3760 	if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3761 		return true;
3762 
3763 	if (artxq->num_fw_queued < artxq->num_push_allowed)
3764 		return true;
3765 
3766 	return false;
3767 }
3768 
3769 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3770 			   struct ieee80211_txq *txq)
3771 {
3772 	struct ath10k *ar = hw->priv;
3773 	struct ath10k_htt *htt = &ar->htt;
3774 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3775 	struct ieee80211_vif *vif = txq->vif;
3776 	struct ieee80211_sta *sta = txq->sta;
3777 	enum ath10k_hw_txrx_mode txmode;
3778 	enum ath10k_mac_tx_path txpath;
3779 	struct sk_buff *skb;
3780 	size_t skb_len;
3781 	int ret;
3782 
3783 	spin_lock_bh(&ar->htt.tx_lock);
3784 	ret = ath10k_htt_tx_inc_pending(htt);
3785 	spin_unlock_bh(&ar->htt.tx_lock);
3786 
3787 	if (ret)
3788 		return ret;
3789 
3790 	skb = ieee80211_tx_dequeue(hw, txq);
3791 	if (!skb) {
3792 		spin_lock_bh(&ar->htt.tx_lock);
3793 		ath10k_htt_tx_dec_pending(htt);
3794 		spin_unlock_bh(&ar->htt.tx_lock);
3795 
3796 		return -ENOENT;
3797 	}
3798 
3799 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3800 
3801 	skb_len = skb->len;
3802 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3803 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3804 
3805 	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3806 	if (unlikely(ret)) {
3807 		ath10k_warn(ar, "failed to push frame: %d\n", ret);
3808 
3809 		spin_lock_bh(&ar->htt.tx_lock);
3810 		ath10k_htt_tx_dec_pending(htt);
3811 		spin_unlock_bh(&ar->htt.tx_lock);
3812 
3813 		return ret;
3814 	}
3815 
3816 	spin_lock_bh(&ar->htt.tx_lock);
3817 	artxq->num_fw_queued++;
3818 	spin_unlock_bh(&ar->htt.tx_lock);
3819 
3820 	return skb_len;
3821 }
3822 
3823 void ath10k_mac_tx_push_pending(struct ath10k *ar)
3824 {
3825 	struct ieee80211_hw *hw = ar->hw;
3826 	struct ieee80211_txq *txq;
3827 	struct ath10k_txq *artxq;
3828 	struct ath10k_txq *last;
3829 	int ret;
3830 	int max;
3831 
3832 	if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
3833 		return;
3834 
3835 	spin_lock_bh(&ar->txqs_lock);
3836 	rcu_read_lock();
3837 
3838 	last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3839 	while (!list_empty(&ar->txqs)) {
3840 		artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3841 		txq = container_of((void *)artxq, struct ieee80211_txq,
3842 				   drv_priv);
3843 
3844 		/* Prevent aggressive sta/tid taking over tx queue */
3845 		max = 16;
3846 		ret = 0;
3847 		while (ath10k_mac_tx_can_push(hw, txq) && max--) {
3848 			ret = ath10k_mac_tx_push_txq(hw, txq);
3849 			if (ret < 0)
3850 				break;
3851 		}
3852 
3853 		list_del_init(&artxq->list);
3854 		if (ret != -ENOENT)
3855 			list_add_tail(&artxq->list, &ar->txqs);
3856 
3857 		ath10k_htt_tx_txq_update(hw, txq);
3858 
3859 		if (artxq == last || (ret < 0 && ret != -ENOENT))
3860 			break;
3861 	}
3862 
3863 	rcu_read_unlock();
3864 	spin_unlock_bh(&ar->txqs_lock);
3865 }
3866 
3867 /************/
3868 /* Scanning */
3869 /************/
3870 
3871 void __ath10k_scan_finish(struct ath10k *ar)
3872 {
3873 	lockdep_assert_held(&ar->data_lock);
3874 
3875 	switch (ar->scan.state) {
3876 	case ATH10K_SCAN_IDLE:
3877 		break;
3878 	case ATH10K_SCAN_RUNNING:
3879 	case ATH10K_SCAN_ABORTING:
3880 		if (!ar->scan.is_roc) {
3881 			struct cfg80211_scan_info info = {
3882 				.aborted = (ar->scan.state ==
3883 					    ATH10K_SCAN_ABORTING),
3884 			};
3885 
3886 			ieee80211_scan_completed(ar->hw, &info);
3887 		} else if (ar->scan.roc_notify) {
3888 			ieee80211_remain_on_channel_expired(ar->hw);
3889 		}
3890 		/* fall through */
3891 	case ATH10K_SCAN_STARTING:
3892 		ar->scan.state = ATH10K_SCAN_IDLE;
3893 		ar->scan_channel = NULL;
3894 		ar->scan.roc_freq = 0;
3895 		ath10k_offchan_tx_purge(ar);
3896 		cancel_delayed_work(&ar->scan.timeout);
3897 		complete_all(&ar->scan.completed);
3898 		break;
3899 	}
3900 }
3901 
3902 void ath10k_scan_finish(struct ath10k *ar)
3903 {
3904 	spin_lock_bh(&ar->data_lock);
3905 	__ath10k_scan_finish(ar);
3906 	spin_unlock_bh(&ar->data_lock);
3907 }
3908 
3909 static int ath10k_scan_stop(struct ath10k *ar)
3910 {
3911 	struct wmi_stop_scan_arg arg = {
3912 		.req_id = 1, /* FIXME */
3913 		.req_type = WMI_SCAN_STOP_ONE,
3914 		.u.scan_id = ATH10K_SCAN_ID,
3915 	};
3916 	int ret;
3917 
3918 	lockdep_assert_held(&ar->conf_mutex);
3919 
3920 	ret = ath10k_wmi_stop_scan(ar, &arg);
3921 	if (ret) {
3922 		ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
3923 		goto out;
3924 	}
3925 
3926 	ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
3927 	if (ret == 0) {
3928 		ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
3929 		ret = -ETIMEDOUT;
3930 	} else if (ret > 0) {
3931 		ret = 0;
3932 	}
3933 
3934 out:
3935 	/* Scan state should be updated upon scan completion but in case
3936 	 * firmware fails to deliver the event (for whatever reason) it is
3937 	 * desired to clean up scan state anyway. Firmware may have just
3938 	 * dropped the scan completion event delivery due to transport pipe
3939 	 * being overflown with data and/or it can recover on its own before
3940 	 * next scan request is submitted.
3941 	 */
3942 	spin_lock_bh(&ar->data_lock);
3943 	if (ar->scan.state != ATH10K_SCAN_IDLE)
3944 		__ath10k_scan_finish(ar);
3945 	spin_unlock_bh(&ar->data_lock);
3946 
3947 	return ret;
3948 }
3949 
3950 static void ath10k_scan_abort(struct ath10k *ar)
3951 {
3952 	int ret;
3953 
3954 	lockdep_assert_held(&ar->conf_mutex);
3955 
3956 	spin_lock_bh(&ar->data_lock);
3957 
3958 	switch (ar->scan.state) {
3959 	case ATH10K_SCAN_IDLE:
3960 		/* This can happen if timeout worker kicked in and called
3961 		 * abortion while scan completion was being processed.
3962 		 */
3963 		break;
3964 	case ATH10K_SCAN_STARTING:
3965 	case ATH10K_SCAN_ABORTING:
3966 		ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
3967 			    ath10k_scan_state_str(ar->scan.state),
3968 			    ar->scan.state);
3969 		break;
3970 	case ATH10K_SCAN_RUNNING:
3971 		ar->scan.state = ATH10K_SCAN_ABORTING;
3972 		spin_unlock_bh(&ar->data_lock);
3973 
3974 		ret = ath10k_scan_stop(ar);
3975 		if (ret)
3976 			ath10k_warn(ar, "failed to abort scan: %d\n", ret);
3977 
3978 		spin_lock_bh(&ar->data_lock);
3979 		break;
3980 	}
3981 
3982 	spin_unlock_bh(&ar->data_lock);
3983 }
3984 
3985 void ath10k_scan_timeout_work(struct work_struct *work)
3986 {
3987 	struct ath10k *ar = container_of(work, struct ath10k,
3988 					 scan.timeout.work);
3989 
3990 	mutex_lock(&ar->conf_mutex);
3991 	ath10k_scan_abort(ar);
3992 	mutex_unlock(&ar->conf_mutex);
3993 }
3994 
3995 static int ath10k_start_scan(struct ath10k *ar,
3996 			     const struct wmi_start_scan_arg *arg)
3997 {
3998 	int ret;
3999 
4000 	lockdep_assert_held(&ar->conf_mutex);
4001 
4002 	ret = ath10k_wmi_start_scan(ar, arg);
4003 	if (ret)
4004 		return ret;
4005 
4006 	ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
4007 	if (ret == 0) {
4008 		ret = ath10k_scan_stop(ar);
4009 		if (ret)
4010 			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
4011 
4012 		return -ETIMEDOUT;
4013 	}
4014 
4015 	/* If we failed to start the scan, return error code at
4016 	 * this point.  This is probably due to some issue in the
4017 	 * firmware, but no need to wedge the driver due to that...
4018 	 */
4019 	spin_lock_bh(&ar->data_lock);
4020 	if (ar->scan.state == ATH10K_SCAN_IDLE) {
4021 		spin_unlock_bh(&ar->data_lock);
4022 		return -EINVAL;
4023 	}
4024 	spin_unlock_bh(&ar->data_lock);
4025 
4026 	return 0;
4027 }
4028 
4029 /**********************/
4030 /* mac80211 callbacks */
4031 /**********************/
4032 
4033 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4034 			     struct ieee80211_tx_control *control,
4035 			     struct sk_buff *skb)
4036 {
4037 	struct ath10k *ar = hw->priv;
4038 	struct ath10k_htt *htt = &ar->htt;
4039 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4040 	struct ieee80211_vif *vif = info->control.vif;
4041 	struct ieee80211_sta *sta = control->sta;
4042 	struct ieee80211_txq *txq = NULL;
4043 	struct ieee80211_hdr *hdr = (void *)skb->data;
4044 	enum ath10k_hw_txrx_mode txmode;
4045 	enum ath10k_mac_tx_path txpath;
4046 	bool is_htt;
4047 	bool is_mgmt;
4048 	bool is_presp;
4049 	int ret;
4050 
4051 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
4052 
4053 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4054 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4055 	is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4056 		  txpath == ATH10K_MAC_TX_HTT_MGMT);
4057 	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4058 
4059 	if (is_htt) {
4060 		spin_lock_bh(&ar->htt.tx_lock);
4061 		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4062 
4063 		ret = ath10k_htt_tx_inc_pending(htt);
4064 		if (ret) {
4065 			ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4066 				    ret);
4067 			spin_unlock_bh(&ar->htt.tx_lock);
4068 			ieee80211_free_txskb(ar->hw, skb);
4069 			return;
4070 		}
4071 
4072 		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4073 		if (ret) {
4074 			ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4075 				   ret);
4076 			ath10k_htt_tx_dec_pending(htt);
4077 			spin_unlock_bh(&ar->htt.tx_lock);
4078 			ieee80211_free_txskb(ar->hw, skb);
4079 			return;
4080 		}
4081 		spin_unlock_bh(&ar->htt.tx_lock);
4082 	}
4083 
4084 	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
4085 	if (ret) {
4086 		ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4087 		if (is_htt) {
4088 			spin_lock_bh(&ar->htt.tx_lock);
4089 			ath10k_htt_tx_dec_pending(htt);
4090 			if (is_mgmt)
4091 				ath10k_htt_tx_mgmt_dec_pending(htt);
4092 			spin_unlock_bh(&ar->htt.tx_lock);
4093 		}
4094 		return;
4095 	}
4096 }
4097 
4098 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4099 					struct ieee80211_txq *txq)
4100 {
4101 	struct ath10k *ar = hw->priv;
4102 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
4103 
4104 	spin_lock_bh(&ar->txqs_lock);
4105 	if (list_empty(&artxq->list))
4106 		list_add_tail(&artxq->list, &ar->txqs);
4107 	spin_unlock_bh(&ar->txqs_lock);
4108 
4109 	ath10k_mac_tx_push_pending(ar);
4110 	ath10k_htt_tx_txq_update(hw, txq);
4111 }
4112 
4113 /* Must not be called with conf_mutex held as workers can use that also. */
4114 void ath10k_drain_tx(struct ath10k *ar)
4115 {
4116 	/* make sure rcu-protected mac80211 tx path itself is drained */
4117 	synchronize_net();
4118 
4119 	ath10k_offchan_tx_purge(ar);
4120 	ath10k_mgmt_over_wmi_tx_purge(ar);
4121 
4122 	cancel_work_sync(&ar->offchan_tx_work);
4123 	cancel_work_sync(&ar->wmi_mgmt_tx_work);
4124 }
4125 
4126 void ath10k_halt(struct ath10k *ar)
4127 {
4128 	struct ath10k_vif *arvif;
4129 
4130 	lockdep_assert_held(&ar->conf_mutex);
4131 
4132 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4133 	ar->filter_flags = 0;
4134 	ar->monitor = false;
4135 	ar->monitor_arvif = NULL;
4136 
4137 	if (ar->monitor_started)
4138 		ath10k_monitor_stop(ar);
4139 
4140 	ar->monitor_started = false;
4141 	ar->tx_paused = 0;
4142 
4143 	ath10k_scan_finish(ar);
4144 	ath10k_peer_cleanup_all(ar);
4145 	ath10k_core_stop(ar);
4146 	ath10k_hif_power_down(ar);
4147 
4148 	spin_lock_bh(&ar->data_lock);
4149 	list_for_each_entry(arvif, &ar->arvifs, list)
4150 		ath10k_mac_vif_beacon_cleanup(arvif);
4151 	spin_unlock_bh(&ar->data_lock);
4152 }
4153 
4154 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4155 {
4156 	struct ath10k *ar = hw->priv;
4157 
4158 	mutex_lock(&ar->conf_mutex);
4159 
4160 	*tx_ant = ar->cfg_tx_chainmask;
4161 	*rx_ant = ar->cfg_rx_chainmask;
4162 
4163 	mutex_unlock(&ar->conf_mutex);
4164 
4165 	return 0;
4166 }
4167 
4168 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4169 {
4170 	/* It is not clear that allowing gaps in chainmask
4171 	 * is helpful.  Probably it will not do what user
4172 	 * is hoping for, so warn in that case.
4173 	 */
4174 	if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4175 		return;
4176 
4177 	ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x.  Suggested values: 15, 7, 3, 1 or 0.\n",
4178 		    dbg, cm);
4179 }
4180 
4181 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4182 {
4183 	int nsts = ar->vht_cap_info;
4184 
4185 	nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4186 	nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4187 
4188 	/* If firmware does not deliver to host number of space-time
4189 	 * streams supported, assume it support up to 4 BF STS and return
4190 	 * the value for VHT CAP: nsts-1)
4191 	 */
4192 	if (nsts == 0)
4193 		return 3;
4194 
4195 	return nsts;
4196 }
4197 
4198 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4199 {
4200 	int sound_dim = ar->vht_cap_info;
4201 
4202 	sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4203 	sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4204 
4205 	/* If the sounding dimension is not advertised by the firmware,
4206 	 * let's use a default value of 1
4207 	 */
4208 	if (sound_dim == 0)
4209 		return 1;
4210 
4211 	return sound_dim;
4212 }
4213 
4214 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4215 {
4216 	struct ieee80211_sta_vht_cap vht_cap = {0};
4217 	u16 mcs_map;
4218 	u32 val;
4219 	int i;
4220 
4221 	vht_cap.vht_supported = 1;
4222 	vht_cap.cap = ar->vht_cap_info;
4223 
4224 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4225 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4226 		val = ath10k_mac_get_vht_cap_bf_sts(ar);
4227 		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4228 		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4229 
4230 		vht_cap.cap |= val;
4231 	}
4232 
4233 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4234 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4235 		val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4236 		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4237 		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4238 
4239 		vht_cap.cap |= val;
4240 	}
4241 
4242 	mcs_map = 0;
4243 	for (i = 0; i < 8; i++) {
4244 		if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4245 			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4246 		else
4247 			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4248 	}
4249 
4250 	if (ar->cfg_tx_chainmask <= 1)
4251 		vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4252 
4253 	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4254 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4255 
4256 	return vht_cap;
4257 }
4258 
4259 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4260 {
4261 	int i;
4262 	struct ieee80211_sta_ht_cap ht_cap = {0};
4263 
4264 	if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4265 		return ht_cap;
4266 
4267 	ht_cap.ht_supported = 1;
4268 	ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4269 	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4270 	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4271 	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4272 	ht_cap.cap |=
4273 		WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4274 
4275 	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4276 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4277 
4278 	if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4279 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4280 
4281 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4282 		u32 smps;
4283 
4284 		smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
4285 		smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4286 
4287 		ht_cap.cap |= smps;
4288 	}
4289 
4290 	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4291 		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4292 
4293 	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4294 		u32 stbc;
4295 
4296 		stbc   = ar->ht_cap_info;
4297 		stbc  &= WMI_HT_CAP_RX_STBC;
4298 		stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4299 		stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4300 		stbc  &= IEEE80211_HT_CAP_RX_STBC;
4301 
4302 		ht_cap.cap |= stbc;
4303 	}
4304 
4305 	if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4306 		ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4307 
4308 	if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4309 		ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4310 
4311 	/* max AMSDU is implicitly taken from vht_cap_info */
4312 	if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4313 		ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4314 
4315 	for (i = 0; i < ar->num_rf_chains; i++) {
4316 		if (ar->cfg_rx_chainmask & BIT(i))
4317 			ht_cap.mcs.rx_mask[i] = 0xFF;
4318 	}
4319 
4320 	ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4321 
4322 	return ht_cap;
4323 }
4324 
4325 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4326 {
4327 	struct ieee80211_supported_band *band;
4328 	struct ieee80211_sta_vht_cap vht_cap;
4329 	struct ieee80211_sta_ht_cap ht_cap;
4330 
4331 	ht_cap = ath10k_get_ht_cap(ar);
4332 	vht_cap = ath10k_create_vht_cap(ar);
4333 
4334 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4335 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4336 		band->ht_cap = ht_cap;
4337 	}
4338 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4339 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4340 		band->ht_cap = ht_cap;
4341 		band->vht_cap = vht_cap;
4342 	}
4343 }
4344 
4345 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4346 {
4347 	int ret;
4348 
4349 	lockdep_assert_held(&ar->conf_mutex);
4350 
4351 	ath10k_check_chain_mask(ar, tx_ant, "tx");
4352 	ath10k_check_chain_mask(ar, rx_ant, "rx");
4353 
4354 	ar->cfg_tx_chainmask = tx_ant;
4355 	ar->cfg_rx_chainmask = rx_ant;
4356 
4357 	if ((ar->state != ATH10K_STATE_ON) &&
4358 	    (ar->state != ATH10K_STATE_RESTARTED))
4359 		return 0;
4360 
4361 	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4362 					tx_ant);
4363 	if (ret) {
4364 		ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4365 			    ret, tx_ant);
4366 		return ret;
4367 	}
4368 
4369 	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4370 					rx_ant);
4371 	if (ret) {
4372 		ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4373 			    ret, rx_ant);
4374 		return ret;
4375 	}
4376 
4377 	/* Reload HT/VHT capability */
4378 	ath10k_mac_setup_ht_vht_cap(ar);
4379 
4380 	return 0;
4381 }
4382 
4383 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4384 {
4385 	struct ath10k *ar = hw->priv;
4386 	int ret;
4387 
4388 	mutex_lock(&ar->conf_mutex);
4389 	ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4390 	mutex_unlock(&ar->conf_mutex);
4391 	return ret;
4392 }
4393 
4394 static int ath10k_start(struct ieee80211_hw *hw)
4395 {
4396 	struct ath10k *ar = hw->priv;
4397 	u32 param;
4398 	int ret = 0;
4399 
4400 	/*
4401 	 * This makes sense only when restarting hw. It is harmless to call
4402 	 * unconditionally. This is necessary to make sure no HTT/WMI tx
4403 	 * commands will be submitted while restarting.
4404 	 */
4405 	ath10k_drain_tx(ar);
4406 
4407 	mutex_lock(&ar->conf_mutex);
4408 
4409 	switch (ar->state) {
4410 	case ATH10K_STATE_OFF:
4411 		ar->state = ATH10K_STATE_ON;
4412 		break;
4413 	case ATH10K_STATE_RESTARTING:
4414 		ath10k_halt(ar);
4415 		ar->state = ATH10K_STATE_RESTARTED;
4416 		break;
4417 	case ATH10K_STATE_ON:
4418 	case ATH10K_STATE_RESTARTED:
4419 	case ATH10K_STATE_WEDGED:
4420 		WARN_ON(1);
4421 		ret = -EINVAL;
4422 		goto err;
4423 	case ATH10K_STATE_UTF:
4424 		ret = -EBUSY;
4425 		goto err;
4426 	}
4427 
4428 	ret = ath10k_hif_power_up(ar);
4429 	if (ret) {
4430 		ath10k_err(ar, "Could not init hif: %d\n", ret);
4431 		goto err_off;
4432 	}
4433 
4434 	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4435 				&ar->normal_mode_fw);
4436 	if (ret) {
4437 		ath10k_err(ar, "Could not init core: %d\n", ret);
4438 		goto err_power_down;
4439 	}
4440 
4441 	param = ar->wmi.pdev_param->pmf_qos;
4442 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4443 	if (ret) {
4444 		ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4445 		goto err_core_stop;
4446 	}
4447 
4448 	param = ar->wmi.pdev_param->dynamic_bw;
4449 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4450 	if (ret) {
4451 		ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4452 		goto err_core_stop;
4453 	}
4454 
4455 	if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4456 		ret = ath10k_wmi_adaptive_qcs(ar, true);
4457 		if (ret) {
4458 			ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4459 				    ret);
4460 			goto err_core_stop;
4461 		}
4462 	}
4463 
4464 	if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4465 		param = ar->wmi.pdev_param->burst_enable;
4466 		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4467 		if (ret) {
4468 			ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4469 			goto err_core_stop;
4470 		}
4471 	}
4472 
4473 	__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4474 
4475 	/*
4476 	 * By default FW set ARP frames ac to voice (6). In that case ARP
4477 	 * exchange is not working properly for UAPSD enabled AP. ARP requests
4478 	 * which arrives with access category 0 are processed by network stack
4479 	 * and send back with access category 0, but FW changes access category
4480 	 * to 6. Set ARP frames access category to best effort (0) solves
4481 	 * this problem.
4482 	 */
4483 
4484 	param = ar->wmi.pdev_param->arp_ac_override;
4485 	ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4486 	if (ret) {
4487 		ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4488 			    ret);
4489 		goto err_core_stop;
4490 	}
4491 
4492 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4493 		     ar->running_fw->fw_file.fw_features)) {
4494 		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4495 							  WMI_CCA_DETECT_LEVEL_AUTO,
4496 							  WMI_CCA_DETECT_MARGIN_AUTO);
4497 		if (ret) {
4498 			ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4499 				    ret);
4500 			goto err_core_stop;
4501 		}
4502 	}
4503 
4504 	param = ar->wmi.pdev_param->ani_enable;
4505 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4506 	if (ret) {
4507 		ath10k_warn(ar, "failed to enable ani by default: %d\n",
4508 			    ret);
4509 		goto err_core_stop;
4510 	}
4511 
4512 	ar->ani_enabled = true;
4513 
4514 	if (ath10k_peer_stats_enabled(ar)) {
4515 		param = ar->wmi.pdev_param->peer_stats_update_period;
4516 		ret = ath10k_wmi_pdev_set_param(ar, param,
4517 						PEER_DEFAULT_STATS_UPDATE_PERIOD);
4518 		if (ret) {
4519 			ath10k_warn(ar,
4520 				    "failed to set peer stats period : %d\n",
4521 				    ret);
4522 			goto err_core_stop;
4523 		}
4524 	}
4525 
4526 	param = ar->wmi.pdev_param->enable_btcoex;
4527 	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4528 	    test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4529 		     ar->running_fw->fw_file.fw_features)) {
4530 		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4531 		if (ret) {
4532 			ath10k_warn(ar,
4533 				    "failed to set btcoex param: %d\n", ret);
4534 			goto err_core_stop;
4535 		}
4536 		clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4537 	}
4538 
4539 	ar->num_started_vdevs = 0;
4540 	ath10k_regd_update(ar);
4541 
4542 	ath10k_spectral_start(ar);
4543 	ath10k_thermal_set_throttling(ar);
4544 
4545 	mutex_unlock(&ar->conf_mutex);
4546 	return 0;
4547 
4548 err_core_stop:
4549 	ath10k_core_stop(ar);
4550 
4551 err_power_down:
4552 	ath10k_hif_power_down(ar);
4553 
4554 err_off:
4555 	ar->state = ATH10K_STATE_OFF;
4556 
4557 err:
4558 	mutex_unlock(&ar->conf_mutex);
4559 	return ret;
4560 }
4561 
4562 static void ath10k_stop(struct ieee80211_hw *hw)
4563 {
4564 	struct ath10k *ar = hw->priv;
4565 
4566 	ath10k_drain_tx(ar);
4567 
4568 	mutex_lock(&ar->conf_mutex);
4569 	if (ar->state != ATH10K_STATE_OFF) {
4570 		ath10k_halt(ar);
4571 		ar->state = ATH10K_STATE_OFF;
4572 	}
4573 	mutex_unlock(&ar->conf_mutex);
4574 
4575 	cancel_delayed_work_sync(&ar->scan.timeout);
4576 	cancel_work_sync(&ar->restart_work);
4577 }
4578 
4579 static int ath10k_config_ps(struct ath10k *ar)
4580 {
4581 	struct ath10k_vif *arvif;
4582 	int ret = 0;
4583 
4584 	lockdep_assert_held(&ar->conf_mutex);
4585 
4586 	list_for_each_entry(arvif, &ar->arvifs, list) {
4587 		ret = ath10k_mac_vif_setup_ps(arvif);
4588 		if (ret) {
4589 			ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4590 			break;
4591 		}
4592 	}
4593 
4594 	return ret;
4595 }
4596 
4597 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4598 {
4599 	int ret;
4600 	u32 param;
4601 
4602 	lockdep_assert_held(&ar->conf_mutex);
4603 
4604 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4605 
4606 	param = ar->wmi.pdev_param->txpower_limit2g;
4607 	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4608 	if (ret) {
4609 		ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4610 			    txpower, ret);
4611 		return ret;
4612 	}
4613 
4614 	param = ar->wmi.pdev_param->txpower_limit5g;
4615 	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4616 	if (ret) {
4617 		ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4618 			    txpower, ret);
4619 		return ret;
4620 	}
4621 
4622 	return 0;
4623 }
4624 
4625 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4626 {
4627 	struct ath10k_vif *arvif;
4628 	int ret, txpower = -1;
4629 
4630 	lockdep_assert_held(&ar->conf_mutex);
4631 
4632 	list_for_each_entry(arvif, &ar->arvifs, list) {
4633 		WARN_ON(arvif->txpower < 0);
4634 
4635 		if (txpower == -1)
4636 			txpower = arvif->txpower;
4637 		else
4638 			txpower = min(txpower, arvif->txpower);
4639 	}
4640 
4641 	if (WARN_ON(txpower == -1))
4642 		return -EINVAL;
4643 
4644 	ret = ath10k_mac_txpower_setup(ar, txpower);
4645 	if (ret) {
4646 		ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4647 			    txpower, ret);
4648 		return ret;
4649 	}
4650 
4651 	return 0;
4652 }
4653 
4654 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4655 {
4656 	struct ath10k *ar = hw->priv;
4657 	struct ieee80211_conf *conf = &hw->conf;
4658 	int ret = 0;
4659 
4660 	mutex_lock(&ar->conf_mutex);
4661 
4662 	if (changed & IEEE80211_CONF_CHANGE_PS)
4663 		ath10k_config_ps(ar);
4664 
4665 	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4666 		ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4667 		ret = ath10k_monitor_recalc(ar);
4668 		if (ret)
4669 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4670 	}
4671 
4672 	mutex_unlock(&ar->conf_mutex);
4673 	return ret;
4674 }
4675 
4676 static u32 get_nss_from_chainmask(u16 chain_mask)
4677 {
4678 	if ((chain_mask & 0xf) == 0xf)
4679 		return 4;
4680 	else if ((chain_mask & 0x7) == 0x7)
4681 		return 3;
4682 	else if ((chain_mask & 0x3) == 0x3)
4683 		return 2;
4684 	return 1;
4685 }
4686 
4687 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4688 {
4689 	u32 value = 0;
4690 	struct ath10k *ar = arvif->ar;
4691 	int nsts;
4692 	int sound_dim;
4693 
4694 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4695 		return 0;
4696 
4697 	nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4698 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4699 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4700 		value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4701 
4702 	sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4703 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4704 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4705 		value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4706 
4707 	if (!value)
4708 		return 0;
4709 
4710 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4711 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4712 
4713 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4714 		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4715 			  WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4716 
4717 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4718 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4719 
4720 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4721 		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4722 			  WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4723 
4724 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4725 					 ar->wmi.vdev_param->txbf, value);
4726 }
4727 
4728 /*
4729  * TODO:
4730  * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4731  * because we will send mgmt frames without CCK. This requirement
4732  * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4733  * in the TX packet.
4734  */
4735 static int ath10k_add_interface(struct ieee80211_hw *hw,
4736 				struct ieee80211_vif *vif)
4737 {
4738 	struct ath10k *ar = hw->priv;
4739 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4740 	struct ath10k_peer *peer;
4741 	enum wmi_sta_powersave_param param;
4742 	int ret = 0;
4743 	u32 value;
4744 	int bit;
4745 	int i;
4746 	u32 vdev_param;
4747 
4748 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4749 
4750 	mutex_lock(&ar->conf_mutex);
4751 
4752 	memset(arvif, 0, sizeof(*arvif));
4753 	ath10k_mac_txq_init(vif->txq);
4754 
4755 	arvif->ar = ar;
4756 	arvif->vif = vif;
4757 
4758 	INIT_LIST_HEAD(&arvif->list);
4759 	INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4760 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
4761 			  ath10k_mac_vif_sta_connection_loss_work);
4762 
4763 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4764 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4765 		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4766 		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4767 		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4768 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4769 	}
4770 
4771 	if (ar->num_peers >= ar->max_num_peers) {
4772 		ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4773 		ret = -ENOBUFS;
4774 		goto err;
4775 	}
4776 
4777 	if (ar->free_vdev_map == 0) {
4778 		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4779 		ret = -EBUSY;
4780 		goto err;
4781 	}
4782 	bit = __ffs64(ar->free_vdev_map);
4783 
4784 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4785 		   bit, ar->free_vdev_map);
4786 
4787 	arvif->vdev_id = bit;
4788 	arvif->vdev_subtype =
4789 		ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
4790 
4791 	switch (vif->type) {
4792 	case NL80211_IFTYPE_P2P_DEVICE:
4793 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
4794 		arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4795 					(ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
4796 		break;
4797 	case NL80211_IFTYPE_UNSPECIFIED:
4798 	case NL80211_IFTYPE_STATION:
4799 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
4800 		if (vif->p2p)
4801 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4802 					(ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
4803 		break;
4804 	case NL80211_IFTYPE_ADHOC:
4805 		arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
4806 		break;
4807 	case NL80211_IFTYPE_MESH_POINT:
4808 		if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
4809 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4810 						(ar, WMI_VDEV_SUBTYPE_MESH_11S);
4811 		} else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4812 			ret = -EINVAL;
4813 			ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
4814 			goto err;
4815 		}
4816 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
4817 		break;
4818 	case NL80211_IFTYPE_AP:
4819 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
4820 
4821 		if (vif->p2p)
4822 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4823 						(ar, WMI_VDEV_SUBTYPE_P2P_GO);
4824 		break;
4825 	case NL80211_IFTYPE_MONITOR:
4826 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
4827 		break;
4828 	default:
4829 		WARN_ON(1);
4830 		break;
4831 	}
4832 
4833 	/* Using vdev_id as queue number will make it very easy to do per-vif
4834 	 * tx queue locking. This shouldn't wrap due to interface combinations
4835 	 * but do a modulo for correctness sake and prevent using offchannel tx
4836 	 * queues for regular vif tx.
4837 	 */
4838 	vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4839 	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
4840 		vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4841 
4842 	/* Some firmware revisions don't wait for beacon tx completion before
4843 	 * sending another SWBA event. This could lead to hardware using old
4844 	 * (freed) beacon data in some cases, e.g. tx credit starvation
4845 	 * combined with missed TBTT. This is very very rare.
4846 	 *
4847 	 * On non-IOMMU-enabled hosts this could be a possible security issue
4848 	 * because hw could beacon some random data on the air.  On
4849 	 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
4850 	 * device would crash.
4851 	 *
4852 	 * Since there are no beacon tx completions (implicit nor explicit)
4853 	 * propagated to host the only workaround for this is to allocate a
4854 	 * DMA-coherent buffer for a lifetime of a vif and use it for all
4855 	 * beacon tx commands. Worst case for this approach is some beacons may
4856 	 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
4857 	 */
4858 	if (vif->type == NL80211_IFTYPE_ADHOC ||
4859 	    vif->type == NL80211_IFTYPE_MESH_POINT ||
4860 	    vif->type == NL80211_IFTYPE_AP) {
4861 		arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
4862 							IEEE80211_MAX_FRAME_LEN,
4863 							&arvif->beacon_paddr,
4864 							GFP_ATOMIC);
4865 		if (!arvif->beacon_buf) {
4866 			ret = -ENOMEM;
4867 			ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
4868 				    ret);
4869 			goto err;
4870 		}
4871 	}
4872 	if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
4873 		arvif->nohwcrypt = true;
4874 
4875 	if (arvif->nohwcrypt &&
4876 	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4877 		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
4878 		goto err;
4879 	}
4880 
4881 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
4882 		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
4883 		   arvif->beacon_buf ? "single-buf" : "per-skb");
4884 
4885 	ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
4886 				     arvif->vdev_subtype, vif->addr);
4887 	if (ret) {
4888 		ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
4889 			    arvif->vdev_id, ret);
4890 		goto err;
4891 	}
4892 
4893 	ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
4894 	list_add(&arvif->list, &ar->arvifs);
4895 
4896 	/* It makes no sense to have firmware do keepalives. mac80211 already
4897 	 * takes care of this with idle connection polling.
4898 	 */
4899 	ret = ath10k_mac_vif_disable_keepalive(arvif);
4900 	if (ret) {
4901 		ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
4902 			    arvif->vdev_id, ret);
4903 		goto err_vdev_delete;
4904 	}
4905 
4906 	arvif->def_wep_key_idx = -1;
4907 
4908 	vdev_param = ar->wmi.vdev_param->tx_encap_type;
4909 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4910 					ATH10K_HW_TXRX_NATIVE_WIFI);
4911 	/* 10.X firmware does not support this VDEV parameter. Do not warn */
4912 	if (ret && ret != -EOPNOTSUPP) {
4913 		ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
4914 			    arvif->vdev_id, ret);
4915 		goto err_vdev_delete;
4916 	}
4917 
4918 	/* Configuring number of spatial stream for monitor interface is causing
4919 	 * target assert in qca9888 and qca6174.
4920 	 */
4921 	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
4922 		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4923 
4924 		vdev_param = ar->wmi.vdev_param->nss;
4925 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4926 						nss);
4927 		if (ret) {
4928 			ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
4929 				    arvif->vdev_id, ar->cfg_tx_chainmask, nss,
4930 				    ret);
4931 			goto err_vdev_delete;
4932 		}
4933 	}
4934 
4935 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4936 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
4937 		ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4938 					 vif->addr, WMI_PEER_TYPE_DEFAULT);
4939 		if (ret) {
4940 			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
4941 				    arvif->vdev_id, ret);
4942 			goto err_vdev_delete;
4943 		}
4944 
4945 		spin_lock_bh(&ar->data_lock);
4946 
4947 		peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4948 		if (!peer) {
4949 			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4950 				    vif->addr, arvif->vdev_id);
4951 			spin_unlock_bh(&ar->data_lock);
4952 			ret = -ENOENT;
4953 			goto err_peer_delete;
4954 		}
4955 
4956 		arvif->peer_id = find_first_bit(peer->peer_ids,
4957 						ATH10K_MAX_NUM_PEER_IDS);
4958 
4959 		spin_unlock_bh(&ar->data_lock);
4960 	} else {
4961 		arvif->peer_id = HTT_INVALID_PEERID;
4962 	}
4963 
4964 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
4965 		ret = ath10k_mac_set_kickout(arvif);
4966 		if (ret) {
4967 			ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
4968 				    arvif->vdev_id, ret);
4969 			goto err_peer_delete;
4970 		}
4971 	}
4972 
4973 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
4974 		param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
4975 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
4976 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
4977 						  param, value);
4978 		if (ret) {
4979 			ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
4980 				    arvif->vdev_id, ret);
4981 			goto err_peer_delete;
4982 		}
4983 
4984 		ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
4985 		if (ret) {
4986 			ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
4987 				    arvif->vdev_id, ret);
4988 			goto err_peer_delete;
4989 		}
4990 
4991 		ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
4992 		if (ret) {
4993 			ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
4994 				    arvif->vdev_id, ret);
4995 			goto err_peer_delete;
4996 		}
4997 	}
4998 
4999 	ret = ath10k_mac_set_txbf_conf(arvif);
5000 	if (ret) {
5001 		ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
5002 			    arvif->vdev_id, ret);
5003 		goto err_peer_delete;
5004 	}
5005 
5006 	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
5007 	if (ret) {
5008 		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
5009 			    arvif->vdev_id, ret);
5010 		goto err_peer_delete;
5011 	}
5012 
5013 	arvif->txpower = vif->bss_conf.txpower;
5014 	ret = ath10k_mac_txpower_recalc(ar);
5015 	if (ret) {
5016 		ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5017 		goto err_peer_delete;
5018 	}
5019 
5020 	if (vif->type == NL80211_IFTYPE_MONITOR) {
5021 		ar->monitor_arvif = arvif;
5022 		ret = ath10k_monitor_recalc(ar);
5023 		if (ret) {
5024 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5025 			goto err_peer_delete;
5026 		}
5027 	}
5028 
5029 	spin_lock_bh(&ar->htt.tx_lock);
5030 	if (!ar->tx_paused)
5031 		ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5032 	spin_unlock_bh(&ar->htt.tx_lock);
5033 
5034 	mutex_unlock(&ar->conf_mutex);
5035 	return 0;
5036 
5037 err_peer_delete:
5038 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5039 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
5040 		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5041 
5042 err_vdev_delete:
5043 	ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5044 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5045 	list_del(&arvif->list);
5046 
5047 err:
5048 	if (arvif->beacon_buf) {
5049 		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5050 				  arvif->beacon_buf, arvif->beacon_paddr);
5051 		arvif->beacon_buf = NULL;
5052 	}
5053 
5054 	mutex_unlock(&ar->conf_mutex);
5055 
5056 	return ret;
5057 }
5058 
5059 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5060 {
5061 	int i;
5062 
5063 	for (i = 0; i < BITS_PER_LONG; i++)
5064 		ath10k_mac_vif_tx_unlock(arvif, i);
5065 }
5066 
5067 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5068 				    struct ieee80211_vif *vif)
5069 {
5070 	struct ath10k *ar = hw->priv;
5071 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5072 	struct ath10k_peer *peer;
5073 	int ret;
5074 	int i;
5075 
5076 	cancel_work_sync(&arvif->ap_csa_work);
5077 	cancel_delayed_work_sync(&arvif->connection_loss_work);
5078 
5079 	mutex_lock(&ar->conf_mutex);
5080 
5081 	spin_lock_bh(&ar->data_lock);
5082 	ath10k_mac_vif_beacon_cleanup(arvif);
5083 	spin_unlock_bh(&ar->data_lock);
5084 
5085 	ret = ath10k_spectral_vif_stop(arvif);
5086 	if (ret)
5087 		ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5088 			    arvif->vdev_id, ret);
5089 
5090 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5091 	list_del(&arvif->list);
5092 
5093 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5094 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5095 		ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5096 					     vif->addr);
5097 		if (ret)
5098 			ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5099 				    arvif->vdev_id, ret);
5100 
5101 		kfree(arvif->u.ap.noa_data);
5102 	}
5103 
5104 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5105 		   arvif->vdev_id);
5106 
5107 	ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5108 	if (ret)
5109 		ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5110 			    arvif->vdev_id, ret);
5111 
5112 	/* Some firmware revisions don't notify host about self-peer removal
5113 	 * until after associated vdev is deleted.
5114 	 */
5115 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5116 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5117 		ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5118 						   vif->addr);
5119 		if (ret)
5120 			ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5121 				    arvif->vdev_id, ret);
5122 
5123 		spin_lock_bh(&ar->data_lock);
5124 		ar->num_peers--;
5125 		spin_unlock_bh(&ar->data_lock);
5126 	}
5127 
5128 	spin_lock_bh(&ar->data_lock);
5129 	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5130 		peer = ar->peer_map[i];
5131 		if (!peer)
5132 			continue;
5133 
5134 		if (peer->vif == vif) {
5135 			ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5136 				    vif->addr, arvif->vdev_id);
5137 			peer->vif = NULL;
5138 		}
5139 	}
5140 	spin_unlock_bh(&ar->data_lock);
5141 
5142 	ath10k_peer_cleanup(ar, arvif->vdev_id);
5143 	ath10k_mac_txq_unref(ar, vif->txq);
5144 
5145 	if (vif->type == NL80211_IFTYPE_MONITOR) {
5146 		ar->monitor_arvif = NULL;
5147 		ret = ath10k_monitor_recalc(ar);
5148 		if (ret)
5149 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5150 	}
5151 
5152 	spin_lock_bh(&ar->htt.tx_lock);
5153 	ath10k_mac_vif_tx_unlock_all(arvif);
5154 	spin_unlock_bh(&ar->htt.tx_lock);
5155 
5156 	ath10k_mac_txq_unref(ar, vif->txq);
5157 
5158 	mutex_unlock(&ar->conf_mutex);
5159 }
5160 
5161 /*
5162  * FIXME: Has to be verified.
5163  */
5164 #define SUPPORTED_FILTERS			\
5165 	(FIF_ALLMULTI |				\
5166 	FIF_CONTROL |				\
5167 	FIF_PSPOLL |				\
5168 	FIF_OTHER_BSS |				\
5169 	FIF_BCN_PRBRESP_PROMISC |		\
5170 	FIF_PROBE_REQ |				\
5171 	FIF_FCSFAIL)
5172 
5173 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5174 				    unsigned int changed_flags,
5175 				    unsigned int *total_flags,
5176 				    u64 multicast)
5177 {
5178 	struct ath10k *ar = hw->priv;
5179 	int ret;
5180 
5181 	mutex_lock(&ar->conf_mutex);
5182 
5183 	changed_flags &= SUPPORTED_FILTERS;
5184 	*total_flags &= SUPPORTED_FILTERS;
5185 	ar->filter_flags = *total_flags;
5186 
5187 	ret = ath10k_monitor_recalc(ar);
5188 	if (ret)
5189 		ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
5190 
5191 	mutex_unlock(&ar->conf_mutex);
5192 }
5193 
5194 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5195 				    struct ieee80211_vif *vif,
5196 				    struct ieee80211_bss_conf *info,
5197 				    u32 changed)
5198 {
5199 	struct ath10k *ar = hw->priv;
5200 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5201 	int ret = 0;
5202 	u32 vdev_param, pdev_param, slottime, preamble;
5203 
5204 	mutex_lock(&ar->conf_mutex);
5205 
5206 	if (changed & BSS_CHANGED_IBSS)
5207 		ath10k_control_ibss(arvif, info, vif->addr);
5208 
5209 	if (changed & BSS_CHANGED_BEACON_INT) {
5210 		arvif->beacon_interval = info->beacon_int;
5211 		vdev_param = ar->wmi.vdev_param->beacon_interval;
5212 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5213 						arvif->beacon_interval);
5214 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5215 			   "mac vdev %d beacon_interval %d\n",
5216 			   arvif->vdev_id, arvif->beacon_interval);
5217 
5218 		if (ret)
5219 			ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5220 				    arvif->vdev_id, ret);
5221 	}
5222 
5223 	if (changed & BSS_CHANGED_BEACON) {
5224 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5225 			   "vdev %d set beacon tx mode to staggered\n",
5226 			   arvif->vdev_id);
5227 
5228 		pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5229 		ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5230 						WMI_BEACON_STAGGERED_MODE);
5231 		if (ret)
5232 			ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5233 				    arvif->vdev_id, ret);
5234 
5235 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
5236 		if (ret)
5237 			ath10k_warn(ar, "failed to update beacon template: %d\n",
5238 				    ret);
5239 
5240 		if (ieee80211_vif_is_mesh(vif)) {
5241 			/* mesh doesn't use SSID but firmware needs it */
5242 			strncpy(arvif->u.ap.ssid, "mesh",
5243 				sizeof(arvif->u.ap.ssid));
5244 			arvif->u.ap.ssid_len = 4;
5245 		}
5246 	}
5247 
5248 	if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5249 		ret = ath10k_mac_setup_prb_tmpl(arvif);
5250 		if (ret)
5251 			ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5252 				    arvif->vdev_id, ret);
5253 	}
5254 
5255 	if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5256 		arvif->dtim_period = info->dtim_period;
5257 
5258 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5259 			   "mac vdev %d dtim_period %d\n",
5260 			   arvif->vdev_id, arvif->dtim_period);
5261 
5262 		vdev_param = ar->wmi.vdev_param->dtim_period;
5263 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5264 						arvif->dtim_period);
5265 		if (ret)
5266 			ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5267 				    arvif->vdev_id, ret);
5268 	}
5269 
5270 	if (changed & BSS_CHANGED_SSID &&
5271 	    vif->type == NL80211_IFTYPE_AP) {
5272 		arvif->u.ap.ssid_len = info->ssid_len;
5273 		if (info->ssid_len)
5274 			memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5275 		arvif->u.ap.hidden_ssid = info->hidden_ssid;
5276 	}
5277 
5278 	if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5279 		ether_addr_copy(arvif->bssid, info->bssid);
5280 
5281 	if (changed & BSS_CHANGED_BEACON_ENABLED)
5282 		ath10k_control_beaconing(arvif, info);
5283 
5284 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5285 		arvif->use_cts_prot = info->use_cts_prot;
5286 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
5287 			   arvif->vdev_id, info->use_cts_prot);
5288 
5289 		ret = ath10k_recalc_rtscts_prot(arvif);
5290 		if (ret)
5291 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5292 				    arvif->vdev_id, ret);
5293 
5294 		vdev_param = ar->wmi.vdev_param->protection_mode;
5295 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5296 						info->use_cts_prot ? 1 : 0);
5297 		if (ret)
5298 			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
5299 				    info->use_cts_prot, arvif->vdev_id, ret);
5300 	}
5301 
5302 	if (changed & BSS_CHANGED_ERP_SLOT) {
5303 		if (info->use_short_slot)
5304 			slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5305 
5306 		else
5307 			slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5308 
5309 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5310 			   arvif->vdev_id, slottime);
5311 
5312 		vdev_param = ar->wmi.vdev_param->slot_time;
5313 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5314 						slottime);
5315 		if (ret)
5316 			ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5317 				    arvif->vdev_id, ret);
5318 	}
5319 
5320 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5321 		if (info->use_short_preamble)
5322 			preamble = WMI_VDEV_PREAMBLE_SHORT;
5323 		else
5324 			preamble = WMI_VDEV_PREAMBLE_LONG;
5325 
5326 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5327 			   "mac vdev %d preamble %dn",
5328 			   arvif->vdev_id, preamble);
5329 
5330 		vdev_param = ar->wmi.vdev_param->preamble;
5331 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5332 						preamble);
5333 		if (ret)
5334 			ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5335 				    arvif->vdev_id, ret);
5336 	}
5337 
5338 	if (changed & BSS_CHANGED_ASSOC) {
5339 		if (info->assoc) {
5340 			/* Workaround: Make sure monitor vdev is not running
5341 			 * when associating to prevent some firmware revisions
5342 			 * (e.g. 10.1 and 10.2) from crashing.
5343 			 */
5344 			if (ar->monitor_started)
5345 				ath10k_monitor_stop(ar);
5346 			ath10k_bss_assoc(hw, vif, info);
5347 			ath10k_monitor_recalc(ar);
5348 		} else {
5349 			ath10k_bss_disassoc(hw, vif);
5350 		}
5351 	}
5352 
5353 	if (changed & BSS_CHANGED_TXPOWER) {
5354 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5355 			   arvif->vdev_id, info->txpower);
5356 
5357 		arvif->txpower = info->txpower;
5358 		ret = ath10k_mac_txpower_recalc(ar);
5359 		if (ret)
5360 			ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5361 	}
5362 
5363 	if (changed & BSS_CHANGED_PS) {
5364 		arvif->ps = vif->bss_conf.ps;
5365 
5366 		ret = ath10k_config_ps(ar);
5367 		if (ret)
5368 			ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5369 				    arvif->vdev_id, ret);
5370 	}
5371 
5372 	mutex_unlock(&ar->conf_mutex);
5373 }
5374 
5375 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5376 			  struct ieee80211_vif *vif,
5377 			  struct ieee80211_scan_request *hw_req)
5378 {
5379 	struct ath10k *ar = hw->priv;
5380 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5381 	struct cfg80211_scan_request *req = &hw_req->req;
5382 	struct wmi_start_scan_arg arg;
5383 	int ret = 0;
5384 	int i;
5385 
5386 	mutex_lock(&ar->conf_mutex);
5387 
5388 	spin_lock_bh(&ar->data_lock);
5389 	switch (ar->scan.state) {
5390 	case ATH10K_SCAN_IDLE:
5391 		reinit_completion(&ar->scan.started);
5392 		reinit_completion(&ar->scan.completed);
5393 		ar->scan.state = ATH10K_SCAN_STARTING;
5394 		ar->scan.is_roc = false;
5395 		ar->scan.vdev_id = arvif->vdev_id;
5396 		ret = 0;
5397 		break;
5398 	case ATH10K_SCAN_STARTING:
5399 	case ATH10K_SCAN_RUNNING:
5400 	case ATH10K_SCAN_ABORTING:
5401 		ret = -EBUSY;
5402 		break;
5403 	}
5404 	spin_unlock_bh(&ar->data_lock);
5405 
5406 	if (ret)
5407 		goto exit;
5408 
5409 	memset(&arg, 0, sizeof(arg));
5410 	ath10k_wmi_start_scan_init(ar, &arg);
5411 	arg.vdev_id = arvif->vdev_id;
5412 	arg.scan_id = ATH10K_SCAN_ID;
5413 
5414 	if (req->ie_len) {
5415 		arg.ie_len = req->ie_len;
5416 		memcpy(arg.ie, req->ie, arg.ie_len);
5417 	}
5418 
5419 	if (req->n_ssids) {
5420 		arg.n_ssids = req->n_ssids;
5421 		for (i = 0; i < arg.n_ssids; i++) {
5422 			arg.ssids[i].len  = req->ssids[i].ssid_len;
5423 			arg.ssids[i].ssid = req->ssids[i].ssid;
5424 		}
5425 	} else {
5426 		arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5427 	}
5428 
5429 	if (req->n_channels) {
5430 		arg.n_channels = req->n_channels;
5431 		for (i = 0; i < arg.n_channels; i++)
5432 			arg.channels[i] = req->channels[i]->center_freq;
5433 	}
5434 
5435 	ret = ath10k_start_scan(ar, &arg);
5436 	if (ret) {
5437 		ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5438 		spin_lock_bh(&ar->data_lock);
5439 		ar->scan.state = ATH10K_SCAN_IDLE;
5440 		spin_unlock_bh(&ar->data_lock);
5441 	}
5442 
5443 	/* Add a 200ms margin to account for event/command processing */
5444 	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5445 				     msecs_to_jiffies(arg.max_scan_time +
5446 						      200));
5447 
5448 exit:
5449 	mutex_unlock(&ar->conf_mutex);
5450 	return ret;
5451 }
5452 
5453 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5454 				  struct ieee80211_vif *vif)
5455 {
5456 	struct ath10k *ar = hw->priv;
5457 
5458 	mutex_lock(&ar->conf_mutex);
5459 	ath10k_scan_abort(ar);
5460 	mutex_unlock(&ar->conf_mutex);
5461 
5462 	cancel_delayed_work_sync(&ar->scan.timeout);
5463 }
5464 
5465 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5466 					struct ath10k_vif *arvif,
5467 					enum set_key_cmd cmd,
5468 					struct ieee80211_key_conf *key)
5469 {
5470 	u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5471 	int ret;
5472 
5473 	/* 10.1 firmware branch requires default key index to be set to group
5474 	 * key index after installing it. Otherwise FW/HW Txes corrupted
5475 	 * frames with multi-vif APs. This is not required for main firmware
5476 	 * branch (e.g. 636).
5477 	 *
5478 	 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5479 	 *
5480 	 * FIXME: It remains unknown if this is required for multi-vif STA
5481 	 * interfaces on 10.1.
5482 	 */
5483 
5484 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5485 	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5486 		return;
5487 
5488 	if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5489 		return;
5490 
5491 	if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5492 		return;
5493 
5494 	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5495 		return;
5496 
5497 	if (cmd != SET_KEY)
5498 		return;
5499 
5500 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5501 					key->keyidx);
5502 	if (ret)
5503 		ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5504 			    arvif->vdev_id, ret);
5505 }
5506 
5507 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5508 			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5509 			  struct ieee80211_key_conf *key)
5510 {
5511 	struct ath10k *ar = hw->priv;
5512 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5513 	struct ath10k_peer *peer;
5514 	const u8 *peer_addr;
5515 	bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5516 		      key->cipher == WLAN_CIPHER_SUITE_WEP104;
5517 	int ret = 0;
5518 	int ret2;
5519 	u32 flags = 0;
5520 	u32 flags2;
5521 
5522 	/* this one needs to be done in software */
5523 	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5524 		return 1;
5525 
5526 	if (arvif->nohwcrypt)
5527 		return 1;
5528 
5529 	if (key->keyidx > WMI_MAX_KEY_INDEX)
5530 		return -ENOSPC;
5531 
5532 	mutex_lock(&ar->conf_mutex);
5533 
5534 	if (sta)
5535 		peer_addr = sta->addr;
5536 	else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5537 		peer_addr = vif->bss_conf.bssid;
5538 	else
5539 		peer_addr = vif->addr;
5540 
5541 	key->hw_key_idx = key->keyidx;
5542 
5543 	if (is_wep) {
5544 		if (cmd == SET_KEY)
5545 			arvif->wep_keys[key->keyidx] = key;
5546 		else
5547 			arvif->wep_keys[key->keyidx] = NULL;
5548 	}
5549 
5550 	/* the peer should not disappear in mid-way (unless FW goes awry) since
5551 	 * we already hold conf_mutex. we just make sure its there now. */
5552 	spin_lock_bh(&ar->data_lock);
5553 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5554 	spin_unlock_bh(&ar->data_lock);
5555 
5556 	if (!peer) {
5557 		if (cmd == SET_KEY) {
5558 			ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5559 				    peer_addr);
5560 			ret = -EOPNOTSUPP;
5561 			goto exit;
5562 		} else {
5563 			/* if the peer doesn't exist there is no key to disable
5564 			 * anymore */
5565 			goto exit;
5566 		}
5567 	}
5568 
5569 	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5570 		flags |= WMI_KEY_PAIRWISE;
5571 	else
5572 		flags |= WMI_KEY_GROUP;
5573 
5574 	if (is_wep) {
5575 		if (cmd == DISABLE_KEY)
5576 			ath10k_clear_vdev_key(arvif, key);
5577 
5578 		/* When WEP keys are uploaded it's possible that there are
5579 		 * stations associated already (e.g. when merging) without any
5580 		 * keys. Static WEP needs an explicit per-peer key upload.
5581 		 */
5582 		if (vif->type == NL80211_IFTYPE_ADHOC &&
5583 		    cmd == SET_KEY)
5584 			ath10k_mac_vif_update_wep_key(arvif, key);
5585 
5586 		/* 802.1x never sets the def_wep_key_idx so each set_key()
5587 		 * call changes default tx key.
5588 		 *
5589 		 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5590 		 * after first set_key().
5591 		 */
5592 		if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5593 			flags |= WMI_KEY_TX_USAGE;
5594 	}
5595 
5596 	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5597 	if (ret) {
5598 		WARN_ON(ret > 0);
5599 		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5600 			    arvif->vdev_id, peer_addr, ret);
5601 		goto exit;
5602 	}
5603 
5604 	/* mac80211 sets static WEP keys as groupwise while firmware requires
5605 	 * them to be installed twice as both pairwise and groupwise.
5606 	 */
5607 	if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5608 		flags2 = flags;
5609 		flags2 &= ~WMI_KEY_GROUP;
5610 		flags2 |= WMI_KEY_PAIRWISE;
5611 
5612 		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5613 		if (ret) {
5614 			WARN_ON(ret > 0);
5615 			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5616 				    arvif->vdev_id, peer_addr, ret);
5617 			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5618 						  peer_addr, flags);
5619 			if (ret2) {
5620 				WARN_ON(ret2 > 0);
5621 				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5622 					    arvif->vdev_id, peer_addr, ret2);
5623 			}
5624 			goto exit;
5625 		}
5626 	}
5627 
5628 	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5629 
5630 	spin_lock_bh(&ar->data_lock);
5631 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5632 	if (peer && cmd == SET_KEY)
5633 		peer->keys[key->keyidx] = key;
5634 	else if (peer && cmd == DISABLE_KEY)
5635 		peer->keys[key->keyidx] = NULL;
5636 	else if (peer == NULL)
5637 		/* impossible unless FW goes crazy */
5638 		ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5639 	spin_unlock_bh(&ar->data_lock);
5640 
5641 exit:
5642 	mutex_unlock(&ar->conf_mutex);
5643 	return ret;
5644 }
5645 
5646 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5647 					   struct ieee80211_vif *vif,
5648 					   int keyidx)
5649 {
5650 	struct ath10k *ar = hw->priv;
5651 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5652 	int ret;
5653 
5654 	mutex_lock(&arvif->ar->conf_mutex);
5655 
5656 	if (arvif->ar->state != ATH10K_STATE_ON)
5657 		goto unlock;
5658 
5659 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5660 		   arvif->vdev_id, keyidx);
5661 
5662 	ret = ath10k_wmi_vdev_set_param(arvif->ar,
5663 					arvif->vdev_id,
5664 					arvif->ar->wmi.vdev_param->def_keyid,
5665 					keyidx);
5666 
5667 	if (ret) {
5668 		ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5669 			    arvif->vdev_id,
5670 			    ret);
5671 		goto unlock;
5672 	}
5673 
5674 	arvif->def_wep_key_idx = keyidx;
5675 
5676 unlock:
5677 	mutex_unlock(&arvif->ar->conf_mutex);
5678 }
5679 
5680 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5681 {
5682 	struct ath10k *ar;
5683 	struct ath10k_vif *arvif;
5684 	struct ath10k_sta *arsta;
5685 	struct ieee80211_sta *sta;
5686 	struct cfg80211_chan_def def;
5687 	enum nl80211_band band;
5688 	const u8 *ht_mcs_mask;
5689 	const u16 *vht_mcs_mask;
5690 	u32 changed, bw, nss, smps;
5691 	int err;
5692 
5693 	arsta = container_of(wk, struct ath10k_sta, update_wk);
5694 	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5695 	arvif = arsta->arvif;
5696 	ar = arvif->ar;
5697 
5698 	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5699 		return;
5700 
5701 	band = def.chan->band;
5702 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5703 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5704 
5705 	spin_lock_bh(&ar->data_lock);
5706 
5707 	changed = arsta->changed;
5708 	arsta->changed = 0;
5709 
5710 	bw = arsta->bw;
5711 	nss = arsta->nss;
5712 	smps = arsta->smps;
5713 
5714 	spin_unlock_bh(&ar->data_lock);
5715 
5716 	mutex_lock(&ar->conf_mutex);
5717 
5718 	nss = max_t(u32, 1, nss);
5719 	nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5720 			   ath10k_mac_max_vht_nss(vht_mcs_mask)));
5721 
5722 	if (changed & IEEE80211_RC_BW_CHANGED) {
5723 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
5724 			   sta->addr, bw);
5725 
5726 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5727 						WMI_PEER_CHAN_WIDTH, bw);
5728 		if (err)
5729 			ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
5730 				    sta->addr, bw, err);
5731 	}
5732 
5733 	if (changed & IEEE80211_RC_NSS_CHANGED) {
5734 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
5735 			   sta->addr, nss);
5736 
5737 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5738 						WMI_PEER_NSS, nss);
5739 		if (err)
5740 			ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
5741 				    sta->addr, nss, err);
5742 	}
5743 
5744 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
5745 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
5746 			   sta->addr, smps);
5747 
5748 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5749 						WMI_PEER_SMPS_STATE, smps);
5750 		if (err)
5751 			ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
5752 				    sta->addr, smps, err);
5753 	}
5754 
5755 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
5756 	    changed & IEEE80211_RC_NSS_CHANGED) {
5757 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
5758 			   sta->addr);
5759 
5760 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
5761 		if (err)
5762 			ath10k_warn(ar, "failed to reassociate station: %pM\n",
5763 				    sta->addr);
5764 	}
5765 
5766 	mutex_unlock(&ar->conf_mutex);
5767 }
5768 
5769 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5770 				       struct ieee80211_sta *sta)
5771 {
5772 	struct ath10k *ar = arvif->ar;
5773 
5774 	lockdep_assert_held(&ar->conf_mutex);
5775 
5776 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5777 		return 0;
5778 
5779 	if (ar->num_stations >= ar->max_num_stations)
5780 		return -ENOBUFS;
5781 
5782 	ar->num_stations++;
5783 
5784 	return 0;
5785 }
5786 
5787 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
5788 					struct ieee80211_sta *sta)
5789 {
5790 	struct ath10k *ar = arvif->ar;
5791 
5792 	lockdep_assert_held(&ar->conf_mutex);
5793 
5794 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5795 		return;
5796 
5797 	ar->num_stations--;
5798 }
5799 
5800 struct ath10k_mac_tdls_iter_data {
5801 	u32 num_tdls_stations;
5802 	struct ieee80211_vif *curr_vif;
5803 };
5804 
5805 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5806 						    struct ieee80211_sta *sta)
5807 {
5808 	struct ath10k_mac_tdls_iter_data *iter_data = data;
5809 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5810 	struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5811 
5812 	if (sta->tdls && sta_vif == iter_data->curr_vif)
5813 		iter_data->num_tdls_stations++;
5814 }
5815 
5816 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5817 					      struct ieee80211_vif *vif)
5818 {
5819 	struct ath10k_mac_tdls_iter_data data = {};
5820 
5821 	data.curr_vif = vif;
5822 
5823 	ieee80211_iterate_stations_atomic(hw,
5824 					  ath10k_mac_tdls_vif_stations_count_iter,
5825 					  &data);
5826 	return data.num_tdls_stations;
5827 }
5828 
5829 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5830 					    struct ieee80211_vif *vif)
5831 {
5832 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5833 	int *num_tdls_vifs = data;
5834 
5835 	if (vif->type != NL80211_IFTYPE_STATION)
5836 		return;
5837 
5838 	if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5839 		(*num_tdls_vifs)++;
5840 }
5841 
5842 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5843 {
5844 	int num_tdls_vifs = 0;
5845 
5846 	ieee80211_iterate_active_interfaces_atomic(hw,
5847 						   IEEE80211_IFACE_ITER_NORMAL,
5848 						   ath10k_mac_tdls_vifs_count_iter,
5849 						   &num_tdls_vifs);
5850 	return num_tdls_vifs;
5851 }
5852 
5853 static int ath10k_sta_state(struct ieee80211_hw *hw,
5854 			    struct ieee80211_vif *vif,
5855 			    struct ieee80211_sta *sta,
5856 			    enum ieee80211_sta_state old_state,
5857 			    enum ieee80211_sta_state new_state)
5858 {
5859 	struct ath10k *ar = hw->priv;
5860 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5861 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5862 	struct ath10k_peer *peer;
5863 	int ret = 0;
5864 	int i;
5865 
5866 	if (old_state == IEEE80211_STA_NOTEXIST &&
5867 	    new_state == IEEE80211_STA_NONE) {
5868 		memset(arsta, 0, sizeof(*arsta));
5869 		arsta->arvif = arvif;
5870 		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
5871 
5872 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5873 			ath10k_mac_txq_init(sta->txq[i]);
5874 	}
5875 
5876 	/* cancel must be done outside the mutex to avoid deadlock */
5877 	if ((old_state == IEEE80211_STA_NONE &&
5878 	     new_state == IEEE80211_STA_NOTEXIST))
5879 		cancel_work_sync(&arsta->update_wk);
5880 
5881 	mutex_lock(&ar->conf_mutex);
5882 
5883 	if (old_state == IEEE80211_STA_NOTEXIST &&
5884 	    new_state == IEEE80211_STA_NONE) {
5885 		/*
5886 		 * New station addition.
5887 		 */
5888 		enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
5889 		u32 num_tdls_stations;
5890 		u32 num_tdls_vifs;
5891 
5892 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5893 			   "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
5894 			   arvif->vdev_id, sta->addr,
5895 			   ar->num_stations + 1, ar->max_num_stations,
5896 			   ar->num_peers + 1, ar->max_num_peers);
5897 
5898 		ret = ath10k_mac_inc_num_stations(arvif, sta);
5899 		if (ret) {
5900 			ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
5901 				    ar->max_num_stations);
5902 			goto exit;
5903 		}
5904 
5905 		if (sta->tdls)
5906 			peer_type = WMI_PEER_TYPE_TDLS;
5907 
5908 		ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5909 					 sta->addr, peer_type);
5910 		if (ret) {
5911 			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
5912 				    sta->addr, arvif->vdev_id, ret);
5913 			ath10k_mac_dec_num_stations(arvif, sta);
5914 			goto exit;
5915 		}
5916 
5917 		spin_lock_bh(&ar->data_lock);
5918 
5919 		peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5920 		if (!peer) {
5921 			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5922 				    vif->addr, arvif->vdev_id);
5923 			spin_unlock_bh(&ar->data_lock);
5924 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5925 			ath10k_mac_dec_num_stations(arvif, sta);
5926 			ret = -ENOENT;
5927 			goto exit;
5928 		}
5929 
5930 		arsta->peer_id = find_first_bit(peer->peer_ids,
5931 						ATH10K_MAX_NUM_PEER_IDS);
5932 
5933 		spin_unlock_bh(&ar->data_lock);
5934 
5935 		if (!sta->tdls)
5936 			goto exit;
5937 
5938 		num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
5939 		num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
5940 
5941 		if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
5942 		    num_tdls_stations == 0) {
5943 			ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
5944 				    arvif->vdev_id, ar->max_num_tdls_vdevs);
5945 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5946 			ath10k_mac_dec_num_stations(arvif, sta);
5947 			ret = -ENOBUFS;
5948 			goto exit;
5949 		}
5950 
5951 		if (num_tdls_stations == 0) {
5952 			/* This is the first tdls peer in current vif */
5953 			enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
5954 
5955 			ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5956 							      state);
5957 			if (ret) {
5958 				ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
5959 					    arvif->vdev_id, ret);
5960 				ath10k_peer_delete(ar, arvif->vdev_id,
5961 						   sta->addr);
5962 				ath10k_mac_dec_num_stations(arvif, sta);
5963 				goto exit;
5964 			}
5965 		}
5966 
5967 		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
5968 						  WMI_TDLS_PEER_STATE_PEERING);
5969 		if (ret) {
5970 			ath10k_warn(ar,
5971 				    "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
5972 				    sta->addr, arvif->vdev_id, ret);
5973 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5974 			ath10k_mac_dec_num_stations(arvif, sta);
5975 
5976 			if (num_tdls_stations != 0)
5977 				goto exit;
5978 			ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5979 							WMI_TDLS_DISABLE);
5980 		}
5981 	} else if ((old_state == IEEE80211_STA_NONE &&
5982 		    new_state == IEEE80211_STA_NOTEXIST)) {
5983 		/*
5984 		 * Existing station deletion.
5985 		 */
5986 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5987 			   "mac vdev %d peer delete %pM (sta gone)\n",
5988 			   arvif->vdev_id, sta->addr);
5989 
5990 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5991 		if (ret)
5992 			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
5993 				    sta->addr, arvif->vdev_id, ret);
5994 
5995 		ath10k_mac_dec_num_stations(arvif, sta);
5996 
5997 		spin_lock_bh(&ar->data_lock);
5998 		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5999 			peer = ar->peer_map[i];
6000 			if (!peer)
6001 				continue;
6002 
6003 			if (peer->sta == sta) {
6004 				ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
6005 					    sta->addr, peer, i, arvif->vdev_id);
6006 				peer->sta = NULL;
6007 
6008 				/* Clean up the peer object as well since we
6009 				 * must have failed to do this above.
6010 				 */
6011 				list_del(&peer->list);
6012 				ar->peer_map[i] = NULL;
6013 				kfree(peer);
6014 				ar->num_peers--;
6015 			}
6016 		}
6017 		spin_unlock_bh(&ar->data_lock);
6018 
6019 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6020 			ath10k_mac_txq_unref(ar, sta->txq[i]);
6021 
6022 		if (!sta->tdls)
6023 			goto exit;
6024 
6025 		if (ath10k_mac_tdls_vif_stations_count(hw, vif))
6026 			goto exit;
6027 
6028 		/* This was the last tdls peer in current vif */
6029 		ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6030 						      WMI_TDLS_DISABLE);
6031 		if (ret) {
6032 			ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6033 				    arvif->vdev_id, ret);
6034 		}
6035 	} else if (old_state == IEEE80211_STA_AUTH &&
6036 		   new_state == IEEE80211_STA_ASSOC &&
6037 		   (vif->type == NL80211_IFTYPE_AP ||
6038 		    vif->type == NL80211_IFTYPE_MESH_POINT ||
6039 		    vif->type == NL80211_IFTYPE_ADHOC)) {
6040 		/*
6041 		 * New association.
6042 		 */
6043 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
6044 			   sta->addr);
6045 
6046 		ret = ath10k_station_assoc(ar, vif, sta, false);
6047 		if (ret)
6048 			ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
6049 				    sta->addr, arvif->vdev_id, ret);
6050 	} else if (old_state == IEEE80211_STA_ASSOC &&
6051 		   new_state == IEEE80211_STA_AUTHORIZED &&
6052 		   sta->tdls) {
6053 		/*
6054 		 * Tdls station authorized.
6055 		 */
6056 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
6057 			   sta->addr);
6058 
6059 		ret = ath10k_station_assoc(ar, vif, sta, false);
6060 		if (ret) {
6061 			ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
6062 				    sta->addr, arvif->vdev_id, ret);
6063 			goto exit;
6064 		}
6065 
6066 		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6067 						  WMI_TDLS_PEER_STATE_CONNECTED);
6068 		if (ret)
6069 			ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
6070 				    sta->addr, arvif->vdev_id, ret);
6071 	} else if (old_state == IEEE80211_STA_ASSOC &&
6072 		    new_state == IEEE80211_STA_AUTH &&
6073 		    (vif->type == NL80211_IFTYPE_AP ||
6074 		     vif->type == NL80211_IFTYPE_MESH_POINT ||
6075 		     vif->type == NL80211_IFTYPE_ADHOC)) {
6076 		/*
6077 		 * Disassociation.
6078 		 */
6079 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6080 			   sta->addr);
6081 
6082 		ret = ath10k_station_disassoc(ar, vif, sta);
6083 		if (ret)
6084 			ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6085 				    sta->addr, arvif->vdev_id, ret);
6086 	}
6087 exit:
6088 	mutex_unlock(&ar->conf_mutex);
6089 	return ret;
6090 }
6091 
6092 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6093 				u16 ac, bool enable)
6094 {
6095 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6096 	struct wmi_sta_uapsd_auto_trig_arg arg = {};
6097 	u32 prio = 0, acc = 0;
6098 	u32 value = 0;
6099 	int ret = 0;
6100 
6101 	lockdep_assert_held(&ar->conf_mutex);
6102 
6103 	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6104 		return 0;
6105 
6106 	switch (ac) {
6107 	case IEEE80211_AC_VO:
6108 		value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6109 			WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6110 		prio = 7;
6111 		acc = 3;
6112 		break;
6113 	case IEEE80211_AC_VI:
6114 		value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6115 			WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6116 		prio = 5;
6117 		acc = 2;
6118 		break;
6119 	case IEEE80211_AC_BE:
6120 		value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6121 			WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6122 		prio = 2;
6123 		acc = 1;
6124 		break;
6125 	case IEEE80211_AC_BK:
6126 		value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6127 			WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6128 		prio = 0;
6129 		acc = 0;
6130 		break;
6131 	}
6132 
6133 	if (enable)
6134 		arvif->u.sta.uapsd |= value;
6135 	else
6136 		arvif->u.sta.uapsd &= ~value;
6137 
6138 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6139 					  WMI_STA_PS_PARAM_UAPSD,
6140 					  arvif->u.sta.uapsd);
6141 	if (ret) {
6142 		ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6143 		goto exit;
6144 	}
6145 
6146 	if (arvif->u.sta.uapsd)
6147 		value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6148 	else
6149 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6150 
6151 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6152 					  WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6153 					  value);
6154 	if (ret)
6155 		ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6156 
6157 	ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6158 	if (ret) {
6159 		ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6160 			    arvif->vdev_id, ret);
6161 		return ret;
6162 	}
6163 
6164 	ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6165 	if (ret) {
6166 		ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6167 			    arvif->vdev_id, ret);
6168 		return ret;
6169 	}
6170 
6171 	if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6172 	    test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6173 		/* Only userspace can make an educated decision when to send
6174 		 * trigger frame. The following effectively disables u-UAPSD
6175 		 * autotrigger in firmware (which is enabled by default
6176 		 * provided the autotrigger service is available).
6177 		 */
6178 
6179 		arg.wmm_ac = acc;
6180 		arg.user_priority = prio;
6181 		arg.service_interval = 0;
6182 		arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6183 		arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6184 
6185 		ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6186 						arvif->bssid, &arg, 1);
6187 		if (ret) {
6188 			ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6189 				    ret);
6190 			return ret;
6191 		}
6192 	}
6193 
6194 exit:
6195 	return ret;
6196 }
6197 
6198 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6199 			  struct ieee80211_vif *vif, u16 ac,
6200 			  const struct ieee80211_tx_queue_params *params)
6201 {
6202 	struct ath10k *ar = hw->priv;
6203 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6204 	struct wmi_wmm_params_arg *p = NULL;
6205 	int ret;
6206 
6207 	mutex_lock(&ar->conf_mutex);
6208 
6209 	switch (ac) {
6210 	case IEEE80211_AC_VO:
6211 		p = &arvif->wmm_params.ac_vo;
6212 		break;
6213 	case IEEE80211_AC_VI:
6214 		p = &arvif->wmm_params.ac_vi;
6215 		break;
6216 	case IEEE80211_AC_BE:
6217 		p = &arvif->wmm_params.ac_be;
6218 		break;
6219 	case IEEE80211_AC_BK:
6220 		p = &arvif->wmm_params.ac_bk;
6221 		break;
6222 	}
6223 
6224 	if (WARN_ON(!p)) {
6225 		ret = -EINVAL;
6226 		goto exit;
6227 	}
6228 
6229 	p->cwmin = params->cw_min;
6230 	p->cwmax = params->cw_max;
6231 	p->aifs = params->aifs;
6232 
6233 	/*
6234 	 * The channel time duration programmed in the HW is in absolute
6235 	 * microseconds, while mac80211 gives the txop in units of
6236 	 * 32 microseconds.
6237 	 */
6238 	p->txop = params->txop * 32;
6239 
6240 	if (ar->wmi.ops->gen_vdev_wmm_conf) {
6241 		ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6242 					       &arvif->wmm_params);
6243 		if (ret) {
6244 			ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6245 				    arvif->vdev_id, ret);
6246 			goto exit;
6247 		}
6248 	} else {
6249 		/* This won't work well with multi-interface cases but it's
6250 		 * better than nothing.
6251 		 */
6252 		ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6253 		if (ret) {
6254 			ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6255 			goto exit;
6256 		}
6257 	}
6258 
6259 	ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6260 	if (ret)
6261 		ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6262 
6263 exit:
6264 	mutex_unlock(&ar->conf_mutex);
6265 	return ret;
6266 }
6267 
6268 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6269 
6270 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6271 				    struct ieee80211_vif *vif,
6272 				    struct ieee80211_channel *chan,
6273 				    int duration,
6274 				    enum ieee80211_roc_type type)
6275 {
6276 	struct ath10k *ar = hw->priv;
6277 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6278 	struct wmi_start_scan_arg arg;
6279 	int ret = 0;
6280 	u32 scan_time_msec;
6281 
6282 	mutex_lock(&ar->conf_mutex);
6283 
6284 	spin_lock_bh(&ar->data_lock);
6285 	switch (ar->scan.state) {
6286 	case ATH10K_SCAN_IDLE:
6287 		reinit_completion(&ar->scan.started);
6288 		reinit_completion(&ar->scan.completed);
6289 		reinit_completion(&ar->scan.on_channel);
6290 		ar->scan.state = ATH10K_SCAN_STARTING;
6291 		ar->scan.is_roc = true;
6292 		ar->scan.vdev_id = arvif->vdev_id;
6293 		ar->scan.roc_freq = chan->center_freq;
6294 		ar->scan.roc_notify = true;
6295 		ret = 0;
6296 		break;
6297 	case ATH10K_SCAN_STARTING:
6298 	case ATH10K_SCAN_RUNNING:
6299 	case ATH10K_SCAN_ABORTING:
6300 		ret = -EBUSY;
6301 		break;
6302 	}
6303 	spin_unlock_bh(&ar->data_lock);
6304 
6305 	if (ret)
6306 		goto exit;
6307 
6308 	scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6309 
6310 	memset(&arg, 0, sizeof(arg));
6311 	ath10k_wmi_start_scan_init(ar, &arg);
6312 	arg.vdev_id = arvif->vdev_id;
6313 	arg.scan_id = ATH10K_SCAN_ID;
6314 	arg.n_channels = 1;
6315 	arg.channels[0] = chan->center_freq;
6316 	arg.dwell_time_active = scan_time_msec;
6317 	arg.dwell_time_passive = scan_time_msec;
6318 	arg.max_scan_time = scan_time_msec;
6319 	arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6320 	arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6321 	arg.burst_duration_ms = duration;
6322 
6323 	ret = ath10k_start_scan(ar, &arg);
6324 	if (ret) {
6325 		ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6326 		spin_lock_bh(&ar->data_lock);
6327 		ar->scan.state = ATH10K_SCAN_IDLE;
6328 		spin_unlock_bh(&ar->data_lock);
6329 		goto exit;
6330 	}
6331 
6332 	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6333 	if (ret == 0) {
6334 		ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6335 
6336 		ret = ath10k_scan_stop(ar);
6337 		if (ret)
6338 			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6339 
6340 		ret = -ETIMEDOUT;
6341 		goto exit;
6342 	}
6343 
6344 	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6345 				     msecs_to_jiffies(duration));
6346 
6347 	ret = 0;
6348 exit:
6349 	mutex_unlock(&ar->conf_mutex);
6350 	return ret;
6351 }
6352 
6353 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6354 {
6355 	struct ath10k *ar = hw->priv;
6356 
6357 	mutex_lock(&ar->conf_mutex);
6358 
6359 	spin_lock_bh(&ar->data_lock);
6360 	ar->scan.roc_notify = false;
6361 	spin_unlock_bh(&ar->data_lock);
6362 
6363 	ath10k_scan_abort(ar);
6364 
6365 	mutex_unlock(&ar->conf_mutex);
6366 
6367 	cancel_delayed_work_sync(&ar->scan.timeout);
6368 
6369 	return 0;
6370 }
6371 
6372 /*
6373  * Both RTS and Fragmentation threshold are interface-specific
6374  * in ath10k, but device-specific in mac80211.
6375  */
6376 
6377 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6378 {
6379 	struct ath10k *ar = hw->priv;
6380 	struct ath10k_vif *arvif;
6381 	int ret = 0;
6382 
6383 	mutex_lock(&ar->conf_mutex);
6384 	list_for_each_entry(arvif, &ar->arvifs, list) {
6385 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6386 			   arvif->vdev_id, value);
6387 
6388 		ret = ath10k_mac_set_rts(arvif, value);
6389 		if (ret) {
6390 			ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6391 				    arvif->vdev_id, ret);
6392 			break;
6393 		}
6394 	}
6395 	mutex_unlock(&ar->conf_mutex);
6396 
6397 	return ret;
6398 }
6399 
6400 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6401 {
6402 	/* Even though there's a WMI enum for fragmentation threshold no known
6403 	 * firmware actually implements it. Moreover it is not possible to rely
6404 	 * frame fragmentation to mac80211 because firmware clears the "more
6405 	 * fragments" bit in frame control making it impossible for remote
6406 	 * devices to reassemble frames.
6407 	 *
6408 	 * Hence implement a dummy callback just to say fragmentation isn't
6409 	 * supported. This effectively prevents mac80211 from doing frame
6410 	 * fragmentation in software.
6411 	 */
6412 	return -EOPNOTSUPP;
6413 }
6414 
6415 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6416 			 u32 queues, bool drop)
6417 {
6418 	struct ath10k *ar = hw->priv;
6419 	bool skip;
6420 	long time_left;
6421 
6422 	/* mac80211 doesn't care if we really xmit queued frames or not
6423 	 * we'll collect those frames either way if we stop/delete vdevs */
6424 	if (drop)
6425 		return;
6426 
6427 	mutex_lock(&ar->conf_mutex);
6428 
6429 	if (ar->state == ATH10K_STATE_WEDGED)
6430 		goto skip;
6431 
6432 	time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6433 			bool empty;
6434 
6435 			spin_lock_bh(&ar->htt.tx_lock);
6436 			empty = (ar->htt.num_pending_tx == 0);
6437 			spin_unlock_bh(&ar->htt.tx_lock);
6438 
6439 			skip = (ar->state == ATH10K_STATE_WEDGED) ||
6440 			       test_bit(ATH10K_FLAG_CRASH_FLUSH,
6441 					&ar->dev_flags);
6442 
6443 			(empty || skip);
6444 		}), ATH10K_FLUSH_TIMEOUT_HZ);
6445 
6446 	if (time_left == 0 || skip)
6447 		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6448 			    skip, ar->state, time_left);
6449 
6450 skip:
6451 	mutex_unlock(&ar->conf_mutex);
6452 }
6453 
6454 /* TODO: Implement this function properly
6455  * For now it is needed to reply to Probe Requests in IBSS mode.
6456  * Propably we need this information from FW.
6457  */
6458 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6459 {
6460 	return 1;
6461 }
6462 
6463 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6464 				     enum ieee80211_reconfig_type reconfig_type)
6465 {
6466 	struct ath10k *ar = hw->priv;
6467 
6468 	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6469 		return;
6470 
6471 	mutex_lock(&ar->conf_mutex);
6472 
6473 	/* If device failed to restart it will be in a different state, e.g.
6474 	 * ATH10K_STATE_WEDGED */
6475 	if (ar->state == ATH10K_STATE_RESTARTED) {
6476 		ath10k_info(ar, "device successfully recovered\n");
6477 		ar->state = ATH10K_STATE_ON;
6478 		ieee80211_wake_queues(ar->hw);
6479 	}
6480 
6481 	mutex_unlock(&ar->conf_mutex);
6482 }
6483 
6484 static void
6485 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6486 				  struct ieee80211_channel *channel)
6487 {
6488 	int ret;
6489 	enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6490 
6491 	lockdep_assert_held(&ar->conf_mutex);
6492 
6493 	if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6494 	    (ar->rx_channel != channel))
6495 		return;
6496 
6497 	if (ar->scan.state != ATH10K_SCAN_IDLE) {
6498 		ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6499 		return;
6500 	}
6501 
6502 	reinit_completion(&ar->bss_survey_done);
6503 
6504 	ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6505 	if (ret) {
6506 		ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6507 		return;
6508 	}
6509 
6510 	ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6511 	if (!ret) {
6512 		ath10k_warn(ar, "bss channel survey timed out\n");
6513 		return;
6514 	}
6515 }
6516 
6517 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6518 			     struct survey_info *survey)
6519 {
6520 	struct ath10k *ar = hw->priv;
6521 	struct ieee80211_supported_band *sband;
6522 	struct survey_info *ar_survey = &ar->survey[idx];
6523 	int ret = 0;
6524 
6525 	mutex_lock(&ar->conf_mutex);
6526 
6527 	sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6528 	if (sband && idx >= sband->n_channels) {
6529 		idx -= sband->n_channels;
6530 		sband = NULL;
6531 	}
6532 
6533 	if (!sband)
6534 		sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6535 
6536 	if (!sband || idx >= sband->n_channels) {
6537 		ret = -ENOENT;
6538 		goto exit;
6539 	}
6540 
6541 	ath10k_mac_update_bss_chan_survey(ar, survey->channel);
6542 
6543 	spin_lock_bh(&ar->data_lock);
6544 	memcpy(survey, ar_survey, sizeof(*survey));
6545 	spin_unlock_bh(&ar->data_lock);
6546 
6547 	survey->channel = &sband->channels[idx];
6548 
6549 	if (ar->rx_channel == survey->channel)
6550 		survey->filled |= SURVEY_INFO_IN_USE;
6551 
6552 exit:
6553 	mutex_unlock(&ar->conf_mutex);
6554 	return ret;
6555 }
6556 
6557 static bool
6558 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6559 					enum nl80211_band band,
6560 					const struct cfg80211_bitrate_mask *mask)
6561 {
6562 	int num_rates = 0;
6563 	int i;
6564 
6565 	num_rates += hweight32(mask->control[band].legacy);
6566 
6567 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6568 		num_rates += hweight8(mask->control[band].ht_mcs[i]);
6569 
6570 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6571 		num_rates += hweight16(mask->control[band].vht_mcs[i]);
6572 
6573 	return num_rates == 1;
6574 }
6575 
6576 static bool
6577 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6578 				       enum nl80211_band band,
6579 				       const struct cfg80211_bitrate_mask *mask,
6580 				       int *nss)
6581 {
6582 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6583 	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6584 	u8 ht_nss_mask = 0;
6585 	u8 vht_nss_mask = 0;
6586 	int i;
6587 
6588 	if (mask->control[band].legacy)
6589 		return false;
6590 
6591 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6592 		if (mask->control[band].ht_mcs[i] == 0)
6593 			continue;
6594 		else if (mask->control[band].ht_mcs[i] ==
6595 			 sband->ht_cap.mcs.rx_mask[i])
6596 			ht_nss_mask |= BIT(i);
6597 		else
6598 			return false;
6599 	}
6600 
6601 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6602 		if (mask->control[band].vht_mcs[i] == 0)
6603 			continue;
6604 		else if (mask->control[band].vht_mcs[i] ==
6605 			 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6606 			vht_nss_mask |= BIT(i);
6607 		else
6608 			return false;
6609 	}
6610 
6611 	if (ht_nss_mask != vht_nss_mask)
6612 		return false;
6613 
6614 	if (ht_nss_mask == 0)
6615 		return false;
6616 
6617 	if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6618 		return false;
6619 
6620 	*nss = fls(ht_nss_mask);
6621 
6622 	return true;
6623 }
6624 
6625 static int
6626 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6627 					enum nl80211_band band,
6628 					const struct cfg80211_bitrate_mask *mask,
6629 					u8 *rate, u8 *nss)
6630 {
6631 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6632 	int rate_idx;
6633 	int i;
6634 	u16 bitrate;
6635 	u8 preamble;
6636 	u8 hw_rate;
6637 
6638 	if (hweight32(mask->control[band].legacy) == 1) {
6639 		rate_idx = ffs(mask->control[band].legacy) - 1;
6640 
6641 		hw_rate = sband->bitrates[rate_idx].hw_value;
6642 		bitrate = sband->bitrates[rate_idx].bitrate;
6643 
6644 		if (ath10k_mac_bitrate_is_cck(bitrate))
6645 			preamble = WMI_RATE_PREAMBLE_CCK;
6646 		else
6647 			preamble = WMI_RATE_PREAMBLE_OFDM;
6648 
6649 		*nss = 1;
6650 		*rate = preamble << 6 |
6651 			(*nss - 1) << 4 |
6652 			hw_rate << 0;
6653 
6654 		return 0;
6655 	}
6656 
6657 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6658 		if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6659 			*nss = i + 1;
6660 			*rate = WMI_RATE_PREAMBLE_HT << 6 |
6661 				(*nss - 1) << 4 |
6662 				(ffs(mask->control[band].ht_mcs[i]) - 1);
6663 
6664 			return 0;
6665 		}
6666 	}
6667 
6668 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6669 		if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6670 			*nss = i + 1;
6671 			*rate = WMI_RATE_PREAMBLE_VHT << 6 |
6672 				(*nss - 1) << 4 |
6673 				(ffs(mask->control[band].vht_mcs[i]) - 1);
6674 
6675 			return 0;
6676 		}
6677 	}
6678 
6679 	return -EINVAL;
6680 }
6681 
6682 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6683 					    u8 rate, u8 nss, u8 sgi, u8 ldpc)
6684 {
6685 	struct ath10k *ar = arvif->ar;
6686 	u32 vdev_param;
6687 	int ret;
6688 
6689 	lockdep_assert_held(&ar->conf_mutex);
6690 
6691 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6692 		   arvif->vdev_id, rate, nss, sgi);
6693 
6694 	vdev_param = ar->wmi.vdev_param->fixed_rate;
6695 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6696 	if (ret) {
6697 		ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6698 			    rate, ret);
6699 		return ret;
6700 	}
6701 
6702 	vdev_param = ar->wmi.vdev_param->nss;
6703 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6704 	if (ret) {
6705 		ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6706 		return ret;
6707 	}
6708 
6709 	vdev_param = ar->wmi.vdev_param->sgi;
6710 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6711 	if (ret) {
6712 		ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6713 		return ret;
6714 	}
6715 
6716 	vdev_param = ar->wmi.vdev_param->ldpc;
6717 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6718 	if (ret) {
6719 		ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6720 		return ret;
6721 	}
6722 
6723 	return 0;
6724 }
6725 
6726 static bool
6727 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6728 				enum nl80211_band band,
6729 				const struct cfg80211_bitrate_mask *mask)
6730 {
6731 	int i;
6732 	u16 vht_mcs;
6733 
6734 	/* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6735 	 * to express all VHT MCS rate masks. Effectively only the following
6736 	 * ranges can be used: none, 0-7, 0-8 and 0-9.
6737 	 */
6738 	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6739 		vht_mcs = mask->control[band].vht_mcs[i];
6740 
6741 		switch (vht_mcs) {
6742 		case 0:
6743 		case BIT(8) - 1:
6744 		case BIT(9) - 1:
6745 		case BIT(10) - 1:
6746 			break;
6747 		default:
6748 			ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6749 			return false;
6750 		}
6751 	}
6752 
6753 	return true;
6754 }
6755 
6756 static void ath10k_mac_set_bitrate_mask_iter(void *data,
6757 					     struct ieee80211_sta *sta)
6758 {
6759 	struct ath10k_vif *arvif = data;
6760 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6761 	struct ath10k *ar = arvif->ar;
6762 
6763 	if (arsta->arvif != arvif)
6764 		return;
6765 
6766 	spin_lock_bh(&ar->data_lock);
6767 	arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
6768 	spin_unlock_bh(&ar->data_lock);
6769 
6770 	ieee80211_queue_work(ar->hw, &arsta->update_wk);
6771 }
6772 
6773 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6774 					  struct ieee80211_vif *vif,
6775 					  const struct cfg80211_bitrate_mask *mask)
6776 {
6777 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6778 	struct cfg80211_chan_def def;
6779 	struct ath10k *ar = arvif->ar;
6780 	enum nl80211_band band;
6781 	const u8 *ht_mcs_mask;
6782 	const u16 *vht_mcs_mask;
6783 	u8 rate;
6784 	u8 nss;
6785 	u8 sgi;
6786 	u8 ldpc;
6787 	int single_nss;
6788 	int ret;
6789 
6790 	if (ath10k_mac_vif_chan(vif, &def))
6791 		return -EPERM;
6792 
6793 	band = def.chan->band;
6794 	ht_mcs_mask = mask->control[band].ht_mcs;
6795 	vht_mcs_mask = mask->control[band].vht_mcs;
6796 	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
6797 
6798 	sgi = mask->control[band].gi;
6799 	if (sgi == NL80211_TXRATE_FORCE_LGI)
6800 		return -EINVAL;
6801 
6802 	if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
6803 		ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
6804 							      &rate, &nss);
6805 		if (ret) {
6806 			ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
6807 				    arvif->vdev_id, ret);
6808 			return ret;
6809 		}
6810 	} else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
6811 							  &single_nss)) {
6812 		rate = WMI_FIXED_RATE_NONE;
6813 		nss = single_nss;
6814 	} else {
6815 		rate = WMI_FIXED_RATE_NONE;
6816 		nss = min(ar->num_rf_chains,
6817 			  max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6818 			      ath10k_mac_max_vht_nss(vht_mcs_mask)));
6819 
6820 		if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
6821 			return -EINVAL;
6822 
6823 		mutex_lock(&ar->conf_mutex);
6824 
6825 		arvif->bitrate_mask = *mask;
6826 		ieee80211_iterate_stations_atomic(ar->hw,
6827 						  ath10k_mac_set_bitrate_mask_iter,
6828 						  arvif);
6829 
6830 		mutex_unlock(&ar->conf_mutex);
6831 	}
6832 
6833 	mutex_lock(&ar->conf_mutex);
6834 
6835 	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
6836 	if (ret) {
6837 		ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
6838 			    arvif->vdev_id, ret);
6839 		goto exit;
6840 	}
6841 
6842 exit:
6843 	mutex_unlock(&ar->conf_mutex);
6844 
6845 	return ret;
6846 }
6847 
6848 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6849 				 struct ieee80211_vif *vif,
6850 				 struct ieee80211_sta *sta,
6851 				 u32 changed)
6852 {
6853 	struct ath10k *ar = hw->priv;
6854 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6855 	u32 bw, smps;
6856 
6857 	spin_lock_bh(&ar->data_lock);
6858 
6859 	ath10k_dbg(ar, ATH10K_DBG_MAC,
6860 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6861 		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
6862 		   sta->smps_mode);
6863 
6864 	if (changed & IEEE80211_RC_BW_CHANGED) {
6865 		bw = WMI_PEER_CHWIDTH_20MHZ;
6866 
6867 		switch (sta->bandwidth) {
6868 		case IEEE80211_STA_RX_BW_20:
6869 			bw = WMI_PEER_CHWIDTH_20MHZ;
6870 			break;
6871 		case IEEE80211_STA_RX_BW_40:
6872 			bw = WMI_PEER_CHWIDTH_40MHZ;
6873 			break;
6874 		case IEEE80211_STA_RX_BW_80:
6875 			bw = WMI_PEER_CHWIDTH_80MHZ;
6876 			break;
6877 		case IEEE80211_STA_RX_BW_160:
6878 			ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
6879 				    sta->bandwidth, sta->addr);
6880 			bw = WMI_PEER_CHWIDTH_20MHZ;
6881 			break;
6882 		}
6883 
6884 		arsta->bw = bw;
6885 	}
6886 
6887 	if (changed & IEEE80211_RC_NSS_CHANGED)
6888 		arsta->nss = sta->rx_nss;
6889 
6890 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
6891 		smps = WMI_PEER_SMPS_PS_NONE;
6892 
6893 		switch (sta->smps_mode) {
6894 		case IEEE80211_SMPS_AUTOMATIC:
6895 		case IEEE80211_SMPS_OFF:
6896 			smps = WMI_PEER_SMPS_PS_NONE;
6897 			break;
6898 		case IEEE80211_SMPS_STATIC:
6899 			smps = WMI_PEER_SMPS_STATIC;
6900 			break;
6901 		case IEEE80211_SMPS_DYNAMIC:
6902 			smps = WMI_PEER_SMPS_DYNAMIC;
6903 			break;
6904 		case IEEE80211_SMPS_NUM_MODES:
6905 			ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
6906 				    sta->smps_mode, sta->addr);
6907 			smps = WMI_PEER_SMPS_PS_NONE;
6908 			break;
6909 		}
6910 
6911 		arsta->smps = smps;
6912 	}
6913 
6914 	arsta->changed |= changed;
6915 
6916 	spin_unlock_bh(&ar->data_lock);
6917 
6918 	ieee80211_queue_work(hw, &arsta->update_wk);
6919 }
6920 
6921 static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
6922 {
6923 	/*
6924 	 * FIXME: Return 0 for time being. Need to figure out whether FW
6925 	 * has the API to fetch 64-bit local TSF
6926 	 */
6927 
6928 	return 0;
6929 }
6930 
6931 static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6932 			   u64 tsf)
6933 {
6934 	struct ath10k *ar = hw->priv;
6935 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6936 	u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
6937 	int ret;
6938 
6939 	/* Workaround:
6940 	 *
6941 	 * Given tsf argument is entire TSF value, but firmware accepts
6942 	 * only TSF offset to current TSF.
6943 	 *
6944 	 * get_tsf function is used to get offset value, however since
6945 	 * ath10k_get_tsf is not implemented properly, it will return 0 always.
6946 	 * Luckily all the caller functions to set_tsf, as of now, also rely on
6947 	 * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
6948 	 * final tsf offset value to firmware will be arithmetically correct.
6949 	 */
6950 	tsf_offset = tsf - ath10k_get_tsf(hw, vif);
6951 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
6952 					vdev_param, tsf_offset);
6953 	if (ret && ret != -EOPNOTSUPP)
6954 		ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
6955 }
6956 
6957 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
6958 			       struct ieee80211_vif *vif,
6959 			       struct ieee80211_ampdu_params *params)
6960 {
6961 	struct ath10k *ar = hw->priv;
6962 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6963 	struct ieee80211_sta *sta = params->sta;
6964 	enum ieee80211_ampdu_mlme_action action = params->action;
6965 	u16 tid = params->tid;
6966 
6967 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
6968 		   arvif->vdev_id, sta->addr, tid, action);
6969 
6970 	switch (action) {
6971 	case IEEE80211_AMPDU_RX_START:
6972 	case IEEE80211_AMPDU_RX_STOP:
6973 		/* HTT AddBa/DelBa events trigger mac80211 Rx BA session
6974 		 * creation/removal. Do we need to verify this?
6975 		 */
6976 		return 0;
6977 	case IEEE80211_AMPDU_TX_START:
6978 	case IEEE80211_AMPDU_TX_STOP_CONT:
6979 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
6980 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6981 	case IEEE80211_AMPDU_TX_OPERATIONAL:
6982 		/* Firmware offloads Tx aggregation entirely so deny mac80211
6983 		 * Tx aggregation requests.
6984 		 */
6985 		return -EOPNOTSUPP;
6986 	}
6987 
6988 	return -EINVAL;
6989 }
6990 
6991 static void
6992 ath10k_mac_update_rx_channel(struct ath10k *ar,
6993 			     struct ieee80211_chanctx_conf *ctx,
6994 			     struct ieee80211_vif_chanctx_switch *vifs,
6995 			     int n_vifs)
6996 {
6997 	struct cfg80211_chan_def *def = NULL;
6998 
6999 	/* Both locks are required because ar->rx_channel is modified. This
7000 	 * allows readers to hold either lock.
7001 	 */
7002 	lockdep_assert_held(&ar->conf_mutex);
7003 	lockdep_assert_held(&ar->data_lock);
7004 
7005 	WARN_ON(ctx && vifs);
7006 	WARN_ON(vifs && n_vifs != 1);
7007 
7008 	/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
7009 	 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
7010 	 * ppdu on Rx may reduce performance on low-end systems. It should be
7011 	 * possible to make tables/hashmaps to speed the lookup up (be vary of
7012 	 * cpu data cache lines though regarding sizes) but to keep the initial
7013 	 * implementation simple and less intrusive fallback to the slow lookup
7014 	 * only for multi-channel cases. Single-channel cases will remain to
7015 	 * use the old channel derival and thus performance should not be
7016 	 * affected much.
7017 	 */
7018 	rcu_read_lock();
7019 	if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
7020 		ieee80211_iter_chan_contexts_atomic(ar->hw,
7021 						    ath10k_mac_get_any_chandef_iter,
7022 						    &def);
7023 
7024 		if (vifs)
7025 			def = &vifs[0].new_ctx->def;
7026 
7027 		ar->rx_channel = def->chan;
7028 	} else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
7029 		   (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
7030 		/* During driver restart due to firmware assert, since mac80211
7031 		 * already has valid channel context for given radio, channel
7032 		 * context iteration return num_chanctx > 0. So fix rx_channel
7033 		 * when restart is in progress.
7034 		 */
7035 		ar->rx_channel = ctx->def.chan;
7036 	} else {
7037 		ar->rx_channel = NULL;
7038 	}
7039 	rcu_read_unlock();
7040 }
7041 
7042 static void
7043 ath10k_mac_update_vif_chan(struct ath10k *ar,
7044 			   struct ieee80211_vif_chanctx_switch *vifs,
7045 			   int n_vifs)
7046 {
7047 	struct ath10k_vif *arvif;
7048 	int ret;
7049 	int i;
7050 
7051 	lockdep_assert_held(&ar->conf_mutex);
7052 
7053 	/* First stop monitor interface. Some FW versions crash if there's a
7054 	 * lone monitor interface.
7055 	 */
7056 	if (ar->monitor_started)
7057 		ath10k_monitor_stop(ar);
7058 
7059 	for (i = 0; i < n_vifs; i++) {
7060 		arvif = ath10k_vif_to_arvif(vifs[i].vif);
7061 
7062 		ath10k_dbg(ar, ATH10K_DBG_MAC,
7063 			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
7064 			   arvif->vdev_id,
7065 			   vifs[i].old_ctx->def.chan->center_freq,
7066 			   vifs[i].new_ctx->def.chan->center_freq,
7067 			   vifs[i].old_ctx->def.width,
7068 			   vifs[i].new_ctx->def.width);
7069 
7070 		if (WARN_ON(!arvif->is_started))
7071 			continue;
7072 
7073 		if (WARN_ON(!arvif->is_up))
7074 			continue;
7075 
7076 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7077 		if (ret) {
7078 			ath10k_warn(ar, "failed to down vdev %d: %d\n",
7079 				    arvif->vdev_id, ret);
7080 			continue;
7081 		}
7082 	}
7083 
7084 	/* All relevant vdevs are downed and associated channel resources
7085 	 * should be available for the channel switch now.
7086 	 */
7087 
7088 	spin_lock_bh(&ar->data_lock);
7089 	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7090 	spin_unlock_bh(&ar->data_lock);
7091 
7092 	for (i = 0; i < n_vifs; i++) {
7093 		arvif = ath10k_vif_to_arvif(vifs[i].vif);
7094 
7095 		if (WARN_ON(!arvif->is_started))
7096 			continue;
7097 
7098 		if (WARN_ON(!arvif->is_up))
7099 			continue;
7100 
7101 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
7102 		if (ret)
7103 			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7104 				    ret);
7105 
7106 		ret = ath10k_mac_setup_prb_tmpl(arvif);
7107 		if (ret)
7108 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7109 				    ret);
7110 
7111 		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7112 		if (ret) {
7113 			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7114 				    arvif->vdev_id, ret);
7115 			continue;
7116 		}
7117 
7118 		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7119 					 arvif->bssid);
7120 		if (ret) {
7121 			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7122 				    arvif->vdev_id, ret);
7123 			continue;
7124 		}
7125 	}
7126 
7127 	ath10k_monitor_recalc(ar);
7128 }
7129 
7130 static int
7131 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7132 			  struct ieee80211_chanctx_conf *ctx)
7133 {
7134 	struct ath10k *ar = hw->priv;
7135 
7136 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7137 		   "mac chanctx add freq %hu width %d ptr %p\n",
7138 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7139 
7140 	mutex_lock(&ar->conf_mutex);
7141 
7142 	spin_lock_bh(&ar->data_lock);
7143 	ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7144 	spin_unlock_bh(&ar->data_lock);
7145 
7146 	ath10k_recalc_radar_detection(ar);
7147 	ath10k_monitor_recalc(ar);
7148 
7149 	mutex_unlock(&ar->conf_mutex);
7150 
7151 	return 0;
7152 }
7153 
7154 static void
7155 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7156 			     struct ieee80211_chanctx_conf *ctx)
7157 {
7158 	struct ath10k *ar = hw->priv;
7159 
7160 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7161 		   "mac chanctx remove freq %hu width %d ptr %p\n",
7162 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7163 
7164 	mutex_lock(&ar->conf_mutex);
7165 
7166 	spin_lock_bh(&ar->data_lock);
7167 	ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7168 	spin_unlock_bh(&ar->data_lock);
7169 
7170 	ath10k_recalc_radar_detection(ar);
7171 	ath10k_monitor_recalc(ar);
7172 
7173 	mutex_unlock(&ar->conf_mutex);
7174 }
7175 
7176 struct ath10k_mac_change_chanctx_arg {
7177 	struct ieee80211_chanctx_conf *ctx;
7178 	struct ieee80211_vif_chanctx_switch *vifs;
7179 	int n_vifs;
7180 	int next_vif;
7181 };
7182 
7183 static void
7184 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7185 				   struct ieee80211_vif *vif)
7186 {
7187 	struct ath10k_mac_change_chanctx_arg *arg = data;
7188 
7189 	if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7190 		return;
7191 
7192 	arg->n_vifs++;
7193 }
7194 
7195 static void
7196 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7197 				    struct ieee80211_vif *vif)
7198 {
7199 	struct ath10k_mac_change_chanctx_arg *arg = data;
7200 	struct ieee80211_chanctx_conf *ctx;
7201 
7202 	ctx = rcu_access_pointer(vif->chanctx_conf);
7203 	if (ctx != arg->ctx)
7204 		return;
7205 
7206 	if (WARN_ON(arg->next_vif == arg->n_vifs))
7207 		return;
7208 
7209 	arg->vifs[arg->next_vif].vif = vif;
7210 	arg->vifs[arg->next_vif].old_ctx = ctx;
7211 	arg->vifs[arg->next_vif].new_ctx = ctx;
7212 	arg->next_vif++;
7213 }
7214 
7215 static void
7216 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7217 			     struct ieee80211_chanctx_conf *ctx,
7218 			     u32 changed)
7219 {
7220 	struct ath10k *ar = hw->priv;
7221 	struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7222 
7223 	mutex_lock(&ar->conf_mutex);
7224 
7225 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7226 		   "mac chanctx change freq %hu width %d ptr %p changed %x\n",
7227 		   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7228 
7229 	/* This shouldn't really happen because channel switching should use
7230 	 * switch_vif_chanctx().
7231 	 */
7232 	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7233 		goto unlock;
7234 
7235 	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7236 		ieee80211_iterate_active_interfaces_atomic(
7237 					hw,
7238 					IEEE80211_IFACE_ITER_NORMAL,
7239 					ath10k_mac_change_chanctx_cnt_iter,
7240 					&arg);
7241 		if (arg.n_vifs == 0)
7242 			goto radar;
7243 
7244 		arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7245 				   GFP_KERNEL);
7246 		if (!arg.vifs)
7247 			goto radar;
7248 
7249 		ieee80211_iterate_active_interfaces_atomic(
7250 					hw,
7251 					IEEE80211_IFACE_ITER_NORMAL,
7252 					ath10k_mac_change_chanctx_fill_iter,
7253 					&arg);
7254 		ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7255 		kfree(arg.vifs);
7256 	}
7257 
7258 radar:
7259 	ath10k_recalc_radar_detection(ar);
7260 
7261 	/* FIXME: How to configure Rx chains properly? */
7262 
7263 	/* No other actions are actually necessary. Firmware maintains channel
7264 	 * definitions per vdev internally and there's no host-side channel
7265 	 * context abstraction to configure, e.g. channel width.
7266 	 */
7267 
7268 unlock:
7269 	mutex_unlock(&ar->conf_mutex);
7270 }
7271 
7272 static int
7273 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7274 				 struct ieee80211_vif *vif,
7275 				 struct ieee80211_chanctx_conf *ctx)
7276 {
7277 	struct ath10k *ar = hw->priv;
7278 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7279 	int ret;
7280 
7281 	mutex_lock(&ar->conf_mutex);
7282 
7283 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7284 		   "mac chanctx assign ptr %p vdev_id %i\n",
7285 		   ctx, arvif->vdev_id);
7286 
7287 	if (WARN_ON(arvif->is_started)) {
7288 		mutex_unlock(&ar->conf_mutex);
7289 		return -EBUSY;
7290 	}
7291 
7292 	ret = ath10k_vdev_start(arvif, &ctx->def);
7293 	if (ret) {
7294 		ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7295 			    arvif->vdev_id, vif->addr,
7296 			    ctx->def.chan->center_freq, ret);
7297 		goto err;
7298 	}
7299 
7300 	arvif->is_started = true;
7301 
7302 	ret = ath10k_mac_vif_setup_ps(arvif);
7303 	if (ret) {
7304 		ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7305 			    arvif->vdev_id, ret);
7306 		goto err_stop;
7307 	}
7308 
7309 	if (vif->type == NL80211_IFTYPE_MONITOR) {
7310 		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7311 		if (ret) {
7312 			ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7313 				    arvif->vdev_id, ret);
7314 			goto err_stop;
7315 		}
7316 
7317 		arvif->is_up = true;
7318 	}
7319 
7320 	mutex_unlock(&ar->conf_mutex);
7321 	return 0;
7322 
7323 err_stop:
7324 	ath10k_vdev_stop(arvif);
7325 	arvif->is_started = false;
7326 	ath10k_mac_vif_setup_ps(arvif);
7327 
7328 err:
7329 	mutex_unlock(&ar->conf_mutex);
7330 	return ret;
7331 }
7332 
7333 static void
7334 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7335 				   struct ieee80211_vif *vif,
7336 				   struct ieee80211_chanctx_conf *ctx)
7337 {
7338 	struct ath10k *ar = hw->priv;
7339 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7340 	int ret;
7341 
7342 	mutex_lock(&ar->conf_mutex);
7343 
7344 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7345 		   "mac chanctx unassign ptr %p vdev_id %i\n",
7346 		   ctx, arvif->vdev_id);
7347 
7348 	WARN_ON(!arvif->is_started);
7349 
7350 	if (vif->type == NL80211_IFTYPE_MONITOR) {
7351 		WARN_ON(!arvif->is_up);
7352 
7353 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7354 		if (ret)
7355 			ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7356 				    arvif->vdev_id, ret);
7357 
7358 		arvif->is_up = false;
7359 	}
7360 
7361 	ret = ath10k_vdev_stop(arvif);
7362 	if (ret)
7363 		ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7364 			    arvif->vdev_id, ret);
7365 
7366 	arvif->is_started = false;
7367 
7368 	mutex_unlock(&ar->conf_mutex);
7369 }
7370 
7371 static int
7372 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7373 				 struct ieee80211_vif_chanctx_switch *vifs,
7374 				 int n_vifs,
7375 				 enum ieee80211_chanctx_switch_mode mode)
7376 {
7377 	struct ath10k *ar = hw->priv;
7378 
7379 	mutex_lock(&ar->conf_mutex);
7380 
7381 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7382 		   "mac chanctx switch n_vifs %d mode %d\n",
7383 		   n_vifs, mode);
7384 	ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7385 
7386 	mutex_unlock(&ar->conf_mutex);
7387 	return 0;
7388 }
7389 
7390 static const struct ieee80211_ops ath10k_ops = {
7391 	.tx				= ath10k_mac_op_tx,
7392 	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
7393 	.start				= ath10k_start,
7394 	.stop				= ath10k_stop,
7395 	.config				= ath10k_config,
7396 	.add_interface			= ath10k_add_interface,
7397 	.remove_interface		= ath10k_remove_interface,
7398 	.configure_filter		= ath10k_configure_filter,
7399 	.bss_info_changed		= ath10k_bss_info_changed,
7400 	.hw_scan			= ath10k_hw_scan,
7401 	.cancel_hw_scan			= ath10k_cancel_hw_scan,
7402 	.set_key			= ath10k_set_key,
7403 	.set_default_unicast_key        = ath10k_set_default_unicast_key,
7404 	.sta_state			= ath10k_sta_state,
7405 	.conf_tx			= ath10k_conf_tx,
7406 	.remain_on_channel		= ath10k_remain_on_channel,
7407 	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
7408 	.set_rts_threshold		= ath10k_set_rts_threshold,
7409 	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
7410 	.flush				= ath10k_flush,
7411 	.tx_last_beacon			= ath10k_tx_last_beacon,
7412 	.set_antenna			= ath10k_set_antenna,
7413 	.get_antenna			= ath10k_get_antenna,
7414 	.reconfig_complete		= ath10k_reconfig_complete,
7415 	.get_survey			= ath10k_get_survey,
7416 	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
7417 	.sta_rc_update			= ath10k_sta_rc_update,
7418 	.get_tsf			= ath10k_get_tsf,
7419 	.set_tsf			= ath10k_set_tsf,
7420 	.ampdu_action			= ath10k_ampdu_action,
7421 	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
7422 	.get_et_stats			= ath10k_debug_get_et_stats,
7423 	.get_et_strings			= ath10k_debug_get_et_strings,
7424 	.add_chanctx			= ath10k_mac_op_add_chanctx,
7425 	.remove_chanctx			= ath10k_mac_op_remove_chanctx,
7426 	.change_chanctx			= ath10k_mac_op_change_chanctx,
7427 	.assign_vif_chanctx		= ath10k_mac_op_assign_vif_chanctx,
7428 	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
7429 	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
7430 
7431 	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7432 
7433 #ifdef CONFIG_PM
7434 	.suspend			= ath10k_wow_op_suspend,
7435 	.resume				= ath10k_wow_op_resume,
7436 #endif
7437 #ifdef CONFIG_MAC80211_DEBUGFS
7438 	.sta_add_debugfs		= ath10k_sta_add_debugfs,
7439 	.sta_statistics			= ath10k_sta_statistics,
7440 #endif
7441 };
7442 
7443 #define CHAN2G(_channel, _freq, _flags) { \
7444 	.band			= NL80211_BAND_2GHZ, \
7445 	.hw_value		= (_channel), \
7446 	.center_freq		= (_freq), \
7447 	.flags			= (_flags), \
7448 	.max_antenna_gain	= 0, \
7449 	.max_power		= 30, \
7450 }
7451 
7452 #define CHAN5G(_channel, _freq, _flags) { \
7453 	.band			= NL80211_BAND_5GHZ, \
7454 	.hw_value		= (_channel), \
7455 	.center_freq		= (_freq), \
7456 	.flags			= (_flags), \
7457 	.max_antenna_gain	= 0, \
7458 	.max_power		= 30, \
7459 }
7460 
7461 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7462 	CHAN2G(1, 2412, 0),
7463 	CHAN2G(2, 2417, 0),
7464 	CHAN2G(3, 2422, 0),
7465 	CHAN2G(4, 2427, 0),
7466 	CHAN2G(5, 2432, 0),
7467 	CHAN2G(6, 2437, 0),
7468 	CHAN2G(7, 2442, 0),
7469 	CHAN2G(8, 2447, 0),
7470 	CHAN2G(9, 2452, 0),
7471 	CHAN2G(10, 2457, 0),
7472 	CHAN2G(11, 2462, 0),
7473 	CHAN2G(12, 2467, 0),
7474 	CHAN2G(13, 2472, 0),
7475 	CHAN2G(14, 2484, 0),
7476 };
7477 
7478 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7479 	CHAN5G(36, 5180, 0),
7480 	CHAN5G(40, 5200, 0),
7481 	CHAN5G(44, 5220, 0),
7482 	CHAN5G(48, 5240, 0),
7483 	CHAN5G(52, 5260, 0),
7484 	CHAN5G(56, 5280, 0),
7485 	CHAN5G(60, 5300, 0),
7486 	CHAN5G(64, 5320, 0),
7487 	CHAN5G(100, 5500, 0),
7488 	CHAN5G(104, 5520, 0),
7489 	CHAN5G(108, 5540, 0),
7490 	CHAN5G(112, 5560, 0),
7491 	CHAN5G(116, 5580, 0),
7492 	CHAN5G(120, 5600, 0),
7493 	CHAN5G(124, 5620, 0),
7494 	CHAN5G(128, 5640, 0),
7495 	CHAN5G(132, 5660, 0),
7496 	CHAN5G(136, 5680, 0),
7497 	CHAN5G(140, 5700, 0),
7498 	CHAN5G(144, 5720, 0),
7499 	CHAN5G(149, 5745, 0),
7500 	CHAN5G(153, 5765, 0),
7501 	CHAN5G(157, 5785, 0),
7502 	CHAN5G(161, 5805, 0),
7503 	CHAN5G(165, 5825, 0),
7504 };
7505 
7506 struct ath10k *ath10k_mac_create(size_t priv_size)
7507 {
7508 	struct ieee80211_hw *hw;
7509 	struct ieee80211_ops *ops;
7510 	struct ath10k *ar;
7511 
7512 	ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
7513 	if (!ops)
7514 		return NULL;
7515 
7516 	hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
7517 	if (!hw) {
7518 		kfree(ops);
7519 		return NULL;
7520 	}
7521 
7522 	ar = hw->priv;
7523 	ar->hw = hw;
7524 	ar->ops = ops;
7525 
7526 	return ar;
7527 }
7528 
7529 void ath10k_mac_destroy(struct ath10k *ar)
7530 {
7531 	struct ieee80211_ops *ops = ar->ops;
7532 
7533 	ieee80211_free_hw(ar->hw);
7534 	kfree(ops);
7535 }
7536 
7537 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7538 	{
7539 		.max	= 8,
7540 		.types	= BIT(NL80211_IFTYPE_STATION)
7541 			| BIT(NL80211_IFTYPE_P2P_CLIENT)
7542 	},
7543 	{
7544 		.max	= 3,
7545 		.types	= BIT(NL80211_IFTYPE_P2P_GO)
7546 	},
7547 	{
7548 		.max	= 1,
7549 		.types	= BIT(NL80211_IFTYPE_P2P_DEVICE)
7550 	},
7551 	{
7552 		.max	= 7,
7553 		.types	= BIT(NL80211_IFTYPE_AP)
7554 #ifdef CONFIG_MAC80211_MESH
7555 			| BIT(NL80211_IFTYPE_MESH_POINT)
7556 #endif
7557 	},
7558 };
7559 
7560 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7561 	{
7562 		.max	= 8,
7563 		.types	= BIT(NL80211_IFTYPE_AP)
7564 #ifdef CONFIG_MAC80211_MESH
7565 			| BIT(NL80211_IFTYPE_MESH_POINT)
7566 #endif
7567 	},
7568 	{
7569 		.max	= 1,
7570 		.types	= BIT(NL80211_IFTYPE_STATION)
7571 	},
7572 };
7573 
7574 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7575 	{
7576 		.limits = ath10k_if_limits,
7577 		.n_limits = ARRAY_SIZE(ath10k_if_limits),
7578 		.max_interfaces = 8,
7579 		.num_different_channels = 1,
7580 		.beacon_int_infra_match = true,
7581 	},
7582 };
7583 
7584 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7585 	{
7586 		.limits = ath10k_10x_if_limits,
7587 		.n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7588 		.max_interfaces = 8,
7589 		.num_different_channels = 1,
7590 		.beacon_int_infra_match = true,
7591 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7592 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7593 					BIT(NL80211_CHAN_WIDTH_20) |
7594 					BIT(NL80211_CHAN_WIDTH_40) |
7595 					BIT(NL80211_CHAN_WIDTH_80),
7596 #endif
7597 	},
7598 };
7599 
7600 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7601 	{
7602 		.max = 2,
7603 		.types = BIT(NL80211_IFTYPE_STATION),
7604 	},
7605 	{
7606 		.max = 2,
7607 		.types = BIT(NL80211_IFTYPE_AP) |
7608 #ifdef CONFIG_MAC80211_MESH
7609 			 BIT(NL80211_IFTYPE_MESH_POINT) |
7610 #endif
7611 			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7612 			 BIT(NL80211_IFTYPE_P2P_GO),
7613 	},
7614 	{
7615 		.max = 1,
7616 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7617 	},
7618 };
7619 
7620 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7621 	{
7622 		.max = 2,
7623 		.types = BIT(NL80211_IFTYPE_STATION),
7624 	},
7625 	{
7626 		.max = 2,
7627 		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7628 	},
7629 	{
7630 		.max = 1,
7631 		.types = BIT(NL80211_IFTYPE_AP) |
7632 #ifdef CONFIG_MAC80211_MESH
7633 			 BIT(NL80211_IFTYPE_MESH_POINT) |
7634 #endif
7635 			 BIT(NL80211_IFTYPE_P2P_GO),
7636 	},
7637 	{
7638 		.max = 1,
7639 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7640 	},
7641 };
7642 
7643 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7644 	{
7645 		.max = 1,
7646 		.types = BIT(NL80211_IFTYPE_STATION),
7647 	},
7648 	{
7649 		.max = 1,
7650 		.types = BIT(NL80211_IFTYPE_ADHOC),
7651 	},
7652 };
7653 
7654 /* FIXME: This is not thouroughly tested. These combinations may over- or
7655  * underestimate hw/fw capabilities.
7656  */
7657 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7658 	{
7659 		.limits = ath10k_tlv_if_limit,
7660 		.num_different_channels = 1,
7661 		.max_interfaces = 4,
7662 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7663 	},
7664 	{
7665 		.limits = ath10k_tlv_if_limit_ibss,
7666 		.num_different_channels = 1,
7667 		.max_interfaces = 2,
7668 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7669 	},
7670 };
7671 
7672 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7673 	{
7674 		.limits = ath10k_tlv_if_limit,
7675 		.num_different_channels = 1,
7676 		.max_interfaces = 4,
7677 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7678 	},
7679 	{
7680 		.limits = ath10k_tlv_qcs_if_limit,
7681 		.num_different_channels = 2,
7682 		.max_interfaces = 4,
7683 		.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7684 	},
7685 	{
7686 		.limits = ath10k_tlv_if_limit_ibss,
7687 		.num_different_channels = 1,
7688 		.max_interfaces = 2,
7689 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7690 	},
7691 };
7692 
7693 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7694 	{
7695 		.max = 1,
7696 		.types = BIT(NL80211_IFTYPE_STATION),
7697 	},
7698 	{
7699 		.max	= 16,
7700 		.types	= BIT(NL80211_IFTYPE_AP)
7701 #ifdef CONFIG_MAC80211_MESH
7702 			| BIT(NL80211_IFTYPE_MESH_POINT)
7703 #endif
7704 	},
7705 };
7706 
7707 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7708 	{
7709 		.limits = ath10k_10_4_if_limits,
7710 		.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7711 		.max_interfaces = 16,
7712 		.num_different_channels = 1,
7713 		.beacon_int_infra_match = true,
7714 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7715 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7716 					BIT(NL80211_CHAN_WIDTH_20) |
7717 					BIT(NL80211_CHAN_WIDTH_40) |
7718 					BIT(NL80211_CHAN_WIDTH_80),
7719 #endif
7720 	},
7721 };
7722 
7723 static void ath10k_get_arvif_iter(void *data, u8 *mac,
7724 				  struct ieee80211_vif *vif)
7725 {
7726 	struct ath10k_vif_iter *arvif_iter = data;
7727 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7728 
7729 	if (arvif->vdev_id == arvif_iter->vdev_id)
7730 		arvif_iter->arvif = arvif;
7731 }
7732 
7733 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7734 {
7735 	struct ath10k_vif_iter arvif_iter;
7736 	u32 flags;
7737 
7738 	memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7739 	arvif_iter.vdev_id = vdev_id;
7740 
7741 	flags = IEEE80211_IFACE_ITER_RESUME_ALL;
7742 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
7743 						   flags,
7744 						   ath10k_get_arvif_iter,
7745 						   &arvif_iter);
7746 	if (!arvif_iter.arvif) {
7747 		ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
7748 		return NULL;
7749 	}
7750 
7751 	return arvif_iter.arvif;
7752 }
7753 
7754 int ath10k_mac_register(struct ath10k *ar)
7755 {
7756 	static const u32 cipher_suites[] = {
7757 		WLAN_CIPHER_SUITE_WEP40,
7758 		WLAN_CIPHER_SUITE_WEP104,
7759 		WLAN_CIPHER_SUITE_TKIP,
7760 		WLAN_CIPHER_SUITE_CCMP,
7761 		WLAN_CIPHER_SUITE_AES_CMAC,
7762 	};
7763 	struct ieee80211_supported_band *band;
7764 	void *channels;
7765 	int ret;
7766 
7767 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
7768 
7769 	SET_IEEE80211_DEV(ar->hw, ar->dev);
7770 
7771 	BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
7772 		      ARRAY_SIZE(ath10k_5ghz_channels)) !=
7773 		     ATH10K_NUM_CHANS);
7774 
7775 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
7776 		channels = kmemdup(ath10k_2ghz_channels,
7777 				   sizeof(ath10k_2ghz_channels),
7778 				   GFP_KERNEL);
7779 		if (!channels) {
7780 			ret = -ENOMEM;
7781 			goto err_free;
7782 		}
7783 
7784 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7785 		band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7786 		band->channels = channels;
7787 
7788 		if (ar->hw_params.cck_rate_map_rev2) {
7789 			band->n_bitrates = ath10k_g_rates_rev2_size;
7790 			band->bitrates = ath10k_g_rates_rev2;
7791 		} else {
7792 			band->n_bitrates = ath10k_g_rates_size;
7793 			band->bitrates = ath10k_g_rates;
7794 		}
7795 
7796 		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7797 	}
7798 
7799 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
7800 		channels = kmemdup(ath10k_5ghz_channels,
7801 				   sizeof(ath10k_5ghz_channels),
7802 				   GFP_KERNEL);
7803 		if (!channels) {
7804 			ret = -ENOMEM;
7805 			goto err_free;
7806 		}
7807 
7808 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
7809 		band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7810 		band->channels = channels;
7811 		band->n_bitrates = ath10k_a_rates_size;
7812 		band->bitrates = ath10k_a_rates;
7813 		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
7814 	}
7815 
7816 	ath10k_mac_setup_ht_vht_cap(ar);
7817 
7818 	ar->hw->wiphy->interface_modes =
7819 		BIT(NL80211_IFTYPE_STATION) |
7820 		BIT(NL80211_IFTYPE_AP) |
7821 		BIT(NL80211_IFTYPE_MESH_POINT);
7822 
7823 	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
7824 	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
7825 
7826 	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
7827 		ar->hw->wiphy->interface_modes |=
7828 			BIT(NL80211_IFTYPE_P2P_DEVICE) |
7829 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
7830 			BIT(NL80211_IFTYPE_P2P_GO);
7831 
7832 	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
7833 	ieee80211_hw_set(ar->hw, SUPPORTS_PS);
7834 	ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
7835 	ieee80211_hw_set(ar->hw, MFP_CAPABLE);
7836 	ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
7837 	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
7838 	ieee80211_hw_set(ar->hw, AP_LINK_PS);
7839 	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
7840 	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
7841 	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
7842 	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
7843 	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
7844 	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
7845 	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
7846 
7847 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7848 		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
7849 
7850 	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
7851 	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
7852 
7853 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
7854 		ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
7855 
7856 	if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
7857 		ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
7858 		ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
7859 	}
7860 
7861 	ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
7862 	ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
7863 
7864 	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
7865 	ar->hw->sta_data_size = sizeof(struct ath10k_sta);
7866 	ar->hw->txq_data_size = sizeof(struct ath10k_txq);
7867 
7868 	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
7869 
7870 	if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
7871 		ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
7872 
7873 		/* Firmware delivers WPS/P2P Probe Requests frames to driver so
7874 		 * that userspace (e.g. wpa_supplicant/hostapd) can generate
7875 		 * correct Probe Responses. This is more of a hack advert..
7876 		 */
7877 		ar->hw->wiphy->probe_resp_offload |=
7878 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
7879 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
7880 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
7881 	}
7882 
7883 	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
7884 		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
7885 
7886 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
7887 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
7888 	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
7889 
7890 	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
7891 	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
7892 				   NL80211_FEATURE_AP_SCAN;
7893 
7894 	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
7895 
7896 	ret = ath10k_wow_init(ar);
7897 	if (ret) {
7898 		ath10k_warn(ar, "failed to init wow: %d\n", ret);
7899 		goto err_free;
7900 	}
7901 
7902 	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
7903 
7904 	/*
7905 	 * on LL hardware queues are managed entirely by the FW
7906 	 * so we only advertise to mac we can do the queues thing
7907 	 */
7908 	ar->hw->queues = IEEE80211_MAX_QUEUES;
7909 
7910 	/* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
7911 	 * something that vdev_ids can't reach so that we don't stop the queue
7912 	 * accidentally.
7913 	 */
7914 	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
7915 
7916 	switch (ar->running_fw->fw_file.wmi_op_version) {
7917 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
7918 		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
7919 		ar->hw->wiphy->n_iface_combinations =
7920 			ARRAY_SIZE(ath10k_if_comb);
7921 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7922 		break;
7923 	case ATH10K_FW_WMI_OP_VERSION_TLV:
7924 		if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
7925 			ar->hw->wiphy->iface_combinations =
7926 				ath10k_tlv_qcs_if_comb;
7927 			ar->hw->wiphy->n_iface_combinations =
7928 				ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
7929 		} else {
7930 			ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
7931 			ar->hw->wiphy->n_iface_combinations =
7932 				ARRAY_SIZE(ath10k_tlv_if_comb);
7933 		}
7934 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7935 		break;
7936 	case ATH10K_FW_WMI_OP_VERSION_10_1:
7937 	case ATH10K_FW_WMI_OP_VERSION_10_2:
7938 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
7939 		ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
7940 		ar->hw->wiphy->n_iface_combinations =
7941 			ARRAY_SIZE(ath10k_10x_if_comb);
7942 		break;
7943 	case ATH10K_FW_WMI_OP_VERSION_10_4:
7944 		ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
7945 		ar->hw->wiphy->n_iface_combinations =
7946 			ARRAY_SIZE(ath10k_10_4_if_comb);
7947 		break;
7948 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
7949 	case ATH10K_FW_WMI_OP_VERSION_MAX:
7950 		WARN_ON(1);
7951 		ret = -EINVAL;
7952 		goto err_free;
7953 	}
7954 
7955 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7956 		ar->hw->netdev_features = NETIF_F_HW_CSUM;
7957 
7958 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
7959 		/* Init ath dfs pattern detector */
7960 		ar->ath_common.debug_mask = ATH_DBG_DFS;
7961 		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
7962 							     NL80211_DFS_UNSET);
7963 
7964 		if (!ar->dfs_detector)
7965 			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
7966 	}
7967 
7968 	/* Current wake_tx_queue implementation imposes a significant
7969 	 * performance penalty in some setups. The tx scheduling code needs
7970 	 * more work anyway so disable the wake_tx_queue unless firmware
7971 	 * supports the pull-push mechanism.
7972 	 */
7973 	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
7974 		      ar->running_fw->fw_file.fw_features))
7975 		ar->ops->wake_tx_queue = NULL;
7976 
7977 	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
7978 			    ath10k_reg_notifier);
7979 	if (ret) {
7980 		ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
7981 		goto err_dfs_detector_exit;
7982 	}
7983 
7984 	ar->hw->wiphy->cipher_suites = cipher_suites;
7985 	ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
7986 
7987 	ret = ieee80211_register_hw(ar->hw);
7988 	if (ret) {
7989 		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
7990 		goto err_dfs_detector_exit;
7991 	}
7992 
7993 	if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
7994 		ret = regulatory_hint(ar->hw->wiphy,
7995 				      ar->ath_common.regulatory.alpha2);
7996 		if (ret)
7997 			goto err_unregister;
7998 	}
7999 
8000 	return 0;
8001 
8002 err_unregister:
8003 	ieee80211_unregister_hw(ar->hw);
8004 
8005 err_dfs_detector_exit:
8006 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8007 		ar->dfs_detector->exit(ar->dfs_detector);
8008 
8009 err_free:
8010 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8011 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8012 
8013 	SET_IEEE80211_DEV(ar->hw, NULL);
8014 	return ret;
8015 }
8016 
8017 void ath10k_mac_unregister(struct ath10k *ar)
8018 {
8019 	ieee80211_unregister_hw(ar->hw);
8020 
8021 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8022 		ar->dfs_detector->exit(ar->dfs_detector);
8023 
8024 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8025 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8026 
8027 	SET_IEEE80211_DEV(ar->hw, NULL);
8028 }
8029