xref: /openbmc/linux/drivers/net/wireless/intel/iwlwifi/mvm/sta.c (revision 943126417891372d56aa3fe46295cbf53db31370)
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <net/mac80211.h>
65 
66 #include "mvm.h"
67 #include "sta.h"
68 #include "rs.h"
69 
70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71 
72 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73 				u32 sta_id,
74 				struct ieee80211_key_conf *key, bool mcast,
75 				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 				u8 key_offset, bool mfp);
77 
78 /*
79  * New version of ADD_STA_sta command added new fields at the end of the
80  * structure, so sending the size of the relevant API's structure is enough to
81  * support both API versions.
82  */
83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84 {
85 	if (iwl_mvm_has_new_rx_api(mvm) ||
86 	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 		return sizeof(struct iwl_mvm_add_sta_cmd);
88 	else
89 		return sizeof(struct iwl_mvm_add_sta_cmd_v7);
90 }
91 
92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 				    enum nl80211_iftype iftype)
94 {
95 	int sta_id;
96 	u32 reserved_ids = 0;
97 
98 	BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
99 	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100 
101 	lockdep_assert_held(&mvm->mutex);
102 
103 	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 	if (iftype != NL80211_IFTYPE_STATION)
105 		reserved_ids = BIT(0);
106 
107 	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
108 	for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
109 		if (BIT(sta_id) & reserved_ids)
110 			continue;
111 
112 		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 					       lockdep_is_held(&mvm->mutex)))
114 			return sta_id;
115 	}
116 	return IWL_MVM_INVALID_STA;
117 }
118 
119 /* send station add/update command to firmware */
120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
121 			   bool update, unsigned int flags)
122 {
123 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
124 	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 		.sta_id = mvm_sta->sta_id,
126 		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 		.add_modify = update ? 1 : 0,
128 		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
129 						 STA_FLG_MIMO_EN_MSK |
130 						 STA_FLG_RTS_MIMO_PROT),
131 		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
132 	};
133 	int ret;
134 	u32 status;
135 	u32 agg_size = 0, mpdu_dens = 0;
136 
137 	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 		add_sta_cmd.station_type = mvm_sta->sta_type;
139 
140 	if (!update || (flags & STA_MODIFY_QUEUES)) {
141 		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
142 
143 		if (!iwl_mvm_has_new_tx_api(mvm)) {
144 			add_sta_cmd.tfd_queue_msk =
145 				cpu_to_le32(mvm_sta->tfd_queue_msk);
146 
147 			if (flags & STA_MODIFY_QUEUES)
148 				add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149 		} else {
150 			WARN_ON(flags & STA_MODIFY_QUEUES);
151 		}
152 	}
153 
154 	switch (sta->bandwidth) {
155 	case IEEE80211_STA_RX_BW_160:
156 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157 		/* fall through */
158 	case IEEE80211_STA_RX_BW_80:
159 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160 		/* fall through */
161 	case IEEE80211_STA_RX_BW_40:
162 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163 		/* fall through */
164 	case IEEE80211_STA_RX_BW_20:
165 		if (sta->ht_cap.ht_supported)
166 			add_sta_cmd.station_flags |=
167 				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168 		break;
169 	}
170 
171 	switch (sta->rx_nss) {
172 	case 1:
173 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174 		break;
175 	case 2:
176 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177 		break;
178 	case 3 ... 8:
179 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180 		break;
181 	}
182 
183 	switch (sta->smps_mode) {
184 	case IEEE80211_SMPS_AUTOMATIC:
185 	case IEEE80211_SMPS_NUM_MODES:
186 		WARN_ON(1);
187 		break;
188 	case IEEE80211_SMPS_STATIC:
189 		/* override NSS */
190 		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192 		break;
193 	case IEEE80211_SMPS_DYNAMIC:
194 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195 		break;
196 	case IEEE80211_SMPS_OFF:
197 		/* nothing */
198 		break;
199 	}
200 
201 	if (sta->ht_cap.ht_supported) {
202 		add_sta_cmd.station_flags_msk |=
203 			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 				    STA_FLG_AGG_MPDU_DENS_MSK);
205 
206 		mpdu_dens = sta->ht_cap.ampdu_density;
207 	}
208 
209 	if (sta->vht_cap.vht_supported) {
210 		agg_size = sta->vht_cap.cap &
211 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212 		agg_size >>=
213 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 	} else if (sta->ht_cap.ht_supported) {
215 		agg_size = sta->ht_cap.ampdu_factor;
216 	}
217 
218 	add_sta_cmd.station_flags |=
219 		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 	add_sta_cmd.station_flags |=
221 		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
222 	if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
223 		add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
224 
225 	if (sta->wme) {
226 		add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227 
228 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
229 			add_sta_cmd.uapsd_acs |= BIT(AC_BK);
230 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
231 			add_sta_cmd.uapsd_acs |= BIT(AC_BE);
232 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
233 			add_sta_cmd.uapsd_acs |= BIT(AC_VI);
234 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
235 			add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 		add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
237 		add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
238 	}
239 
240 	status = ADD_STA_SUCCESS;
241 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 					  iwl_mvm_add_sta_cmd_size(mvm),
243 					  &add_sta_cmd, &status);
244 	if (ret)
245 		return ret;
246 
247 	switch (status & IWL_ADD_STA_STATUS_MASK) {
248 	case ADD_STA_SUCCESS:
249 		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250 		break;
251 	default:
252 		ret = -EIO;
253 		IWL_ERR(mvm, "ADD_STA failed\n");
254 		break;
255 	}
256 
257 	return ret;
258 }
259 
260 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
261 {
262 	struct iwl_mvm_baid_data *data =
263 		from_timer(data, t, session_timer);
264 	struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
265 	struct iwl_mvm_baid_data *ba_data;
266 	struct ieee80211_sta *sta;
267 	struct iwl_mvm_sta *mvm_sta;
268 	unsigned long timeout;
269 
270 	rcu_read_lock();
271 
272 	ba_data = rcu_dereference(*rcu_ptr);
273 
274 	if (WARN_ON(!ba_data))
275 		goto unlock;
276 
277 	if (!ba_data->timeout)
278 		goto unlock;
279 
280 	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 	if (time_is_after_jiffies(timeout)) {
282 		mod_timer(&ba_data->session_timer, timeout);
283 		goto unlock;
284 	}
285 
286 	/* Timer expired */
287 	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
288 
289 	/*
290 	 * sta should be valid unless the following happens:
291 	 * The firmware asserts which triggers a reconfig flow, but
292 	 * the reconfig fails before we set the pointer to sta into
293 	 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 	 * A-MDPU and hence the timer continues to run. Then, the
295 	 * timer expires and sta is NULL.
296 	 */
297 	if (!sta)
298 		goto unlock;
299 
300 	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
301 	ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 				      sta->addr, ba_data->tid);
303 unlock:
304 	rcu_read_unlock();
305 }
306 
307 /* Disable aggregations for a bitmap of TIDs for a given station */
308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 					unsigned long disable_agg_tids,
310 					bool remove_queue)
311 {
312 	struct iwl_mvm_add_sta_cmd cmd = {};
313 	struct ieee80211_sta *sta;
314 	struct iwl_mvm_sta *mvmsta;
315 	u32 status;
316 	u8 sta_id;
317 	int ret;
318 
319 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
320 		return -EINVAL;
321 
322 	spin_lock_bh(&mvm->queue_info_lock);
323 	sta_id = mvm->queue_info[queue].ra_sta_id;
324 	spin_unlock_bh(&mvm->queue_info_lock);
325 
326 	rcu_read_lock();
327 
328 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
329 
330 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
331 		rcu_read_unlock();
332 		return -EINVAL;
333 	}
334 
335 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
336 
337 	mvmsta->tid_disable_agg |= disable_agg_tids;
338 
339 	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
340 	cmd.sta_id = mvmsta->sta_id;
341 	cmd.add_modify = STA_MODE_MODIFY;
342 	cmd.modify_mask = STA_MODIFY_QUEUES;
343 	if (disable_agg_tids)
344 		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
345 	if (remove_queue)
346 		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
347 	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
348 	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
349 
350 	rcu_read_unlock();
351 
352 	/* Notify FW of queue removal from the STA queues */
353 	status = ADD_STA_SUCCESS;
354 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
355 					  iwl_mvm_add_sta_cmd_size(mvm),
356 					  &cmd, &status);
357 
358 	return ret;
359 }
360 
361 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
362 			       int mac80211_queue, u8 tid, u8 flags)
363 {
364 	struct iwl_scd_txq_cfg_cmd cmd = {
365 		.scd_queue = queue,
366 		.action = SCD_CFG_DISABLE_QUEUE,
367 	};
368 	bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
369 	int ret;
370 
371 	if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
372 		return -EINVAL;
373 
374 	if (iwl_mvm_has_new_tx_api(mvm)) {
375 		spin_lock_bh(&mvm->queue_info_lock);
376 
377 		if (remove_mac_queue)
378 			mvm->hw_queue_to_mac80211[queue] &=
379 				~BIT(mac80211_queue);
380 
381 		spin_unlock_bh(&mvm->queue_info_lock);
382 
383 		iwl_trans_txq_free(mvm->trans, queue);
384 
385 		return 0;
386 	}
387 
388 	spin_lock_bh(&mvm->queue_info_lock);
389 
390 	if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
391 		spin_unlock_bh(&mvm->queue_info_lock);
392 		return 0;
393 	}
394 
395 	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
396 
397 	/*
398 	 * If there is another TID with the same AC - don't remove the MAC queue
399 	 * from the mapping
400 	 */
401 	if (tid < IWL_MAX_TID_COUNT) {
402 		unsigned long tid_bitmap =
403 			mvm->queue_info[queue].tid_bitmap;
404 		int ac = tid_to_mac80211_ac[tid];
405 		int i;
406 
407 		for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
408 			if (tid_to_mac80211_ac[i] == ac)
409 				remove_mac_queue = false;
410 		}
411 	}
412 
413 	if (remove_mac_queue)
414 		mvm->hw_queue_to_mac80211[queue] &=
415 			~BIT(mac80211_queue);
416 
417 	cmd.action = mvm->queue_info[queue].tid_bitmap ?
418 		SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
419 	if (cmd.action == SCD_CFG_DISABLE_QUEUE)
420 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
421 
422 	IWL_DEBUG_TX_QUEUES(mvm,
423 			    "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
424 			    queue,
425 			    mvm->queue_info[queue].tid_bitmap,
426 			    mvm->hw_queue_to_mac80211[queue]);
427 
428 	/* If the queue is still enabled - nothing left to do in this func */
429 	if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
430 		spin_unlock_bh(&mvm->queue_info_lock);
431 		return 0;
432 	}
433 
434 	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
435 	cmd.tid = mvm->queue_info[queue].txq_tid;
436 
437 	/* Make sure queue info is correct even though we overwrite it */
438 	WARN(mvm->queue_info[queue].tid_bitmap ||
439 	     mvm->hw_queue_to_mac80211[queue],
440 	     "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
441 	     queue, mvm->hw_queue_to_mac80211[queue],
442 	     mvm->queue_info[queue].tid_bitmap);
443 
444 	/* If we are here - the queue is freed and we can zero out these vals */
445 	mvm->queue_info[queue].tid_bitmap = 0;
446 	mvm->hw_queue_to_mac80211[queue] = 0;
447 
448 	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
449 	mvm->queue_info[queue].reserved = false;
450 
451 	spin_unlock_bh(&mvm->queue_info_lock);
452 
453 	iwl_trans_txq_disable(mvm->trans, queue, false);
454 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
455 				   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
456 
457 	if (ret)
458 		IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
459 			queue, ret);
460 	return ret;
461 }
462 
463 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
464 {
465 	struct ieee80211_sta *sta;
466 	struct iwl_mvm_sta *mvmsta;
467 	unsigned long tid_bitmap;
468 	unsigned long agg_tids = 0;
469 	u8 sta_id;
470 	int tid;
471 
472 	lockdep_assert_held(&mvm->mutex);
473 
474 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
475 		return -EINVAL;
476 
477 	spin_lock_bh(&mvm->queue_info_lock);
478 	sta_id = mvm->queue_info[queue].ra_sta_id;
479 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480 	spin_unlock_bh(&mvm->queue_info_lock);
481 
482 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
483 					lockdep_is_held(&mvm->mutex));
484 
485 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
486 		return -EINVAL;
487 
488 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
489 
490 	spin_lock_bh(&mvmsta->lock);
491 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
493 			agg_tids |= BIT(tid);
494 	}
495 	spin_unlock_bh(&mvmsta->lock);
496 
497 	return agg_tids;
498 }
499 
500 /*
501  * Remove a queue from a station's resources.
502  * Note that this only marks as free. It DOESN'T delete a BA agreement, and
503  * doesn't disable the queue
504  */
505 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
506 {
507 	struct ieee80211_sta *sta;
508 	struct iwl_mvm_sta *mvmsta;
509 	unsigned long tid_bitmap;
510 	unsigned long disable_agg_tids = 0;
511 	u8 sta_id;
512 	int tid;
513 
514 	lockdep_assert_held(&mvm->mutex);
515 
516 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
517 		return -EINVAL;
518 
519 	spin_lock_bh(&mvm->queue_info_lock);
520 	sta_id = mvm->queue_info[queue].ra_sta_id;
521 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
522 	spin_unlock_bh(&mvm->queue_info_lock);
523 
524 	rcu_read_lock();
525 
526 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
527 
528 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
529 		rcu_read_unlock();
530 		return 0;
531 	}
532 
533 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
534 
535 	spin_lock_bh(&mvmsta->lock);
536 	/* Unmap MAC queues and TIDs from this queue */
537 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
538 		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
539 			disable_agg_tids |= BIT(tid);
540 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
541 	}
542 
543 	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
544 	spin_unlock_bh(&mvmsta->lock);
545 
546 	rcu_read_unlock();
547 
548 	return disable_agg_tids;
549 }
550 
551 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
552 				       u8 new_sta_id)
553 {
554 	struct iwl_mvm_sta *mvmsta;
555 	u8 txq_curr_ac, sta_id, tid;
556 	unsigned long disable_agg_tids = 0;
557 	bool same_sta;
558 	int ret;
559 
560 	lockdep_assert_held(&mvm->mutex);
561 
562 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
563 		return -EINVAL;
564 
565 	spin_lock_bh(&mvm->queue_info_lock);
566 	txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
567 	sta_id = mvm->queue_info[queue].ra_sta_id;
568 	tid = mvm->queue_info[queue].txq_tid;
569 	spin_unlock_bh(&mvm->queue_info_lock);
570 
571 	same_sta = sta_id == new_sta_id;
572 
573 	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
574 	if (WARN_ON(!mvmsta))
575 		return -EINVAL;
576 
577 	disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
578 	/* Disable the queue */
579 	if (disable_agg_tids)
580 		iwl_mvm_invalidate_sta_queue(mvm, queue,
581 					     disable_agg_tids, false);
582 
583 	ret = iwl_mvm_disable_txq(mvm, queue,
584 				  mvmsta->vif->hw_queue[txq_curr_ac],
585 				  tid, 0);
586 	if (ret) {
587 		IWL_ERR(mvm,
588 			"Failed to free inactive queue %d (ret=%d)\n",
589 			queue, ret);
590 
591 		return ret;
592 	}
593 
594 	/* If TXQ is allocated to another STA, update removal in FW */
595 	if (!same_sta)
596 		iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
597 
598 	return 0;
599 }
600 
601 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
602 				    unsigned long tfd_queue_mask, u8 ac)
603 {
604 	int queue = 0;
605 	u8 ac_to_queue[IEEE80211_NUM_ACS];
606 	int i;
607 
608 	/*
609 	 * This protects us against grabbing a queue that's being reconfigured
610 	 * by the inactivity checker.
611 	 */
612 	lockdep_assert_held(&mvm->mutex);
613 	lockdep_assert_held(&mvm->queue_info_lock);
614 
615 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
616 		return -EINVAL;
617 
618 	memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
619 
620 	/* See what ACs the existing queues for this STA have */
621 	for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
622 		/* Only DATA queues can be shared */
623 		if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
624 		    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
625 			continue;
626 
627 		ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
628 	}
629 
630 	/*
631 	 * The queue to share is chosen only from DATA queues as follows (in
632 	 * descending priority):
633 	 * 1. An AC_BE queue
634 	 * 2. Same AC queue
635 	 * 3. Highest AC queue that is lower than new AC
636 	 * 4. Any existing AC (there always is at least 1 DATA queue)
637 	 */
638 
639 	/* Priority 1: An AC_BE queue */
640 	if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
641 		queue = ac_to_queue[IEEE80211_AC_BE];
642 	/* Priority 2: Same AC queue */
643 	else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
644 		queue = ac_to_queue[ac];
645 	/* Priority 3a: If new AC is VO and VI exists - use VI */
646 	else if (ac == IEEE80211_AC_VO &&
647 		 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
648 		queue = ac_to_queue[IEEE80211_AC_VI];
649 	/* Priority 3b: No BE so only AC less than the new one is BK */
650 	else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
651 		queue = ac_to_queue[IEEE80211_AC_BK];
652 	/* Priority 4a: No BE nor BK - use VI if exists */
653 	else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
654 		queue = ac_to_queue[IEEE80211_AC_VI];
655 	/* Priority 4b: No BE, BK nor VI - use VO if exists */
656 	else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
657 		queue = ac_to_queue[IEEE80211_AC_VO];
658 
659 	/* Make sure queue found (or not) is legal */
660 	if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
661 	    !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
662 	    (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
663 		IWL_ERR(mvm, "No DATA queues available to share\n");
664 		return -ENOSPC;
665 	}
666 
667 	return queue;
668 }
669 
670 /*
671  * If a given queue has a higher AC than the TID stream that is being compared
672  * to, the queue needs to be redirected to the lower AC. This function does that
673  * in such a case, otherwise - if no redirection required - it does nothing,
674  * unless the %force param is true.
675  */
676 static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
677 				      int ac, int ssn, unsigned int wdg_timeout,
678 				      bool force)
679 {
680 	struct iwl_scd_txq_cfg_cmd cmd = {
681 		.scd_queue = queue,
682 		.action = SCD_CFG_DISABLE_QUEUE,
683 	};
684 	bool shared_queue;
685 	unsigned long mq;
686 	int ret;
687 
688 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
689 		return -EINVAL;
690 
691 	/*
692 	 * If the AC is lower than current one - FIFO needs to be redirected to
693 	 * the lowest one of the streams in the queue. Check if this is needed
694 	 * here.
695 	 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
696 	 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
697 	 * we need to check if the numerical value of X is LARGER than of Y.
698 	 */
699 	spin_lock_bh(&mvm->queue_info_lock);
700 	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
701 		spin_unlock_bh(&mvm->queue_info_lock);
702 
703 		IWL_DEBUG_TX_QUEUES(mvm,
704 				    "No redirection needed on TXQ #%d\n",
705 				    queue);
706 		return 0;
707 	}
708 
709 	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
710 	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
711 	cmd.tid = mvm->queue_info[queue].txq_tid;
712 	mq = mvm->hw_queue_to_mac80211[queue];
713 	shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
714 	spin_unlock_bh(&mvm->queue_info_lock);
715 
716 	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
717 			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
718 
719 	/* Stop MAC queues and wait for this queue to empty */
720 	iwl_mvm_stop_mac_queues(mvm, mq);
721 	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
722 	if (ret) {
723 		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
724 			queue);
725 		ret = -EIO;
726 		goto out;
727 	}
728 
729 	/* Before redirecting the queue we need to de-activate it */
730 	iwl_trans_txq_disable(mvm->trans, queue, false);
731 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
732 	if (ret)
733 		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
734 			ret);
735 
736 	/* Make sure the SCD wrptr is correctly set before reconfiguring */
737 	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
738 
739 	/* Update the TID "owner" of the queue */
740 	spin_lock_bh(&mvm->queue_info_lock);
741 	mvm->queue_info[queue].txq_tid = tid;
742 	spin_unlock_bh(&mvm->queue_info_lock);
743 
744 	/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
745 
746 	/* Redirect to lower AC */
747 	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
748 			     cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
749 
750 	/* Update AC marking of the queue */
751 	spin_lock_bh(&mvm->queue_info_lock);
752 	mvm->queue_info[queue].mac80211_ac = ac;
753 	spin_unlock_bh(&mvm->queue_info_lock);
754 
755 	/*
756 	 * Mark queue as shared in transport if shared
757 	 * Note this has to be done after queue enablement because enablement
758 	 * can also set this value, and there is no indication there to shared
759 	 * queues
760 	 */
761 	if (shared_queue)
762 		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
763 
764 out:
765 	/* Continue using the MAC queues */
766 	iwl_mvm_start_mac_queues(mvm, mq);
767 
768 	return ret;
769 }
770 
771 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
772 				   u8 minq, u8 maxq)
773 {
774 	int i;
775 
776 	lockdep_assert_held(&mvm->queue_info_lock);
777 
778 	/* This should not be hit with new TX path */
779 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
780 		return -ENOSPC;
781 
782 	/* Start by looking for a free queue */
783 	for (i = minq; i <= maxq; i++)
784 		if (mvm->queue_info[i].tid_bitmap == 0 &&
785 		    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
786 			return i;
787 
788 	return -ENOSPC;
789 }
790 
791 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
792 				   u8 sta_id, u8 tid, unsigned int timeout)
793 {
794 	int queue, size = IWL_DEFAULT_QUEUE_SIZE;
795 
796 	if (tid == IWL_MAX_TID_COUNT) {
797 		tid = IWL_MGMT_TID;
798 		size = IWL_MGMT_QUEUE_SIZE;
799 	}
800 	queue = iwl_trans_txq_alloc(mvm->trans,
801 				    cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
802 				    sta_id, tid, SCD_QUEUE_CFG, size, timeout);
803 
804 	if (queue < 0) {
805 		IWL_DEBUG_TX_QUEUES(mvm,
806 				    "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
807 				    sta_id, tid, queue);
808 		return queue;
809 	}
810 
811 	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
812 			    queue, sta_id, tid);
813 
814 	mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
815 	IWL_DEBUG_TX_QUEUES(mvm,
816 			    "Enabling TXQ #%d (mac80211 map:0x%x)\n",
817 			    queue, mvm->hw_queue_to_mac80211[queue]);
818 
819 	return queue;
820 }
821 
822 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
823 					struct ieee80211_sta *sta, u8 ac,
824 					int tid)
825 {
826 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
827 	unsigned int wdg_timeout =
828 		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
829 	u8 mac_queue = mvmsta->vif->hw_queue[ac];
830 	int queue = -1;
831 
832 	lockdep_assert_held(&mvm->mutex);
833 
834 	IWL_DEBUG_TX_QUEUES(mvm,
835 			    "Allocating queue for sta %d on tid %d\n",
836 			    mvmsta->sta_id, tid);
837 	queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
838 					wdg_timeout);
839 	if (queue < 0)
840 		return queue;
841 
842 	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
843 
844 	spin_lock_bh(&mvmsta->lock);
845 	mvmsta->tid_data[tid].txq_id = queue;
846 	spin_unlock_bh(&mvmsta->lock);
847 
848 	return 0;
849 }
850 
851 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
852 				       int mac80211_queue, u8 sta_id, u8 tid)
853 {
854 	bool enable_queue = true;
855 
856 	spin_lock_bh(&mvm->queue_info_lock);
857 
858 	/* Make sure this TID isn't already enabled */
859 	if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
860 		spin_unlock_bh(&mvm->queue_info_lock);
861 		IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
862 			queue, tid);
863 		return false;
864 	}
865 
866 	/* Update mappings and refcounts */
867 	if (mvm->queue_info[queue].tid_bitmap)
868 		enable_queue = false;
869 
870 	if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
871 		WARN(mac80211_queue >=
872 		     BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
873 		     "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
874 		     mac80211_queue, queue, sta_id, tid);
875 		mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
876 	}
877 
878 	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
879 	mvm->queue_info[queue].ra_sta_id = sta_id;
880 
881 	if (enable_queue) {
882 		if (tid != IWL_MAX_TID_COUNT)
883 			mvm->queue_info[queue].mac80211_ac =
884 				tid_to_mac80211_ac[tid];
885 		else
886 			mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
887 
888 		mvm->queue_info[queue].txq_tid = tid;
889 	}
890 
891 	IWL_DEBUG_TX_QUEUES(mvm,
892 			    "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
893 			    queue, mvm->queue_info[queue].tid_bitmap,
894 			    mvm->hw_queue_to_mac80211[queue]);
895 
896 	spin_unlock_bh(&mvm->queue_info_lock);
897 
898 	return enable_queue;
899 }
900 
901 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
902 			       int mac80211_queue, u16 ssn,
903 			       const struct iwl_trans_txq_scd_cfg *cfg,
904 			       unsigned int wdg_timeout)
905 {
906 	struct iwl_scd_txq_cfg_cmd cmd = {
907 		.scd_queue = queue,
908 		.action = SCD_CFG_ENABLE_QUEUE,
909 		.window = cfg->frame_limit,
910 		.sta_id = cfg->sta_id,
911 		.ssn = cpu_to_le16(ssn),
912 		.tx_fifo = cfg->fifo,
913 		.aggregate = cfg->aggregate,
914 		.tid = cfg->tid,
915 	};
916 	bool inc_ssn;
917 
918 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
919 		return false;
920 
921 	/* Send the enabling command if we need to */
922 	if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
923 					cfg->sta_id, cfg->tid))
924 		return false;
925 
926 	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
927 					   NULL, wdg_timeout);
928 	if (inc_ssn)
929 		le16_add_cpu(&cmd.ssn, 1);
930 
931 	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
932 	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
933 
934 	return inc_ssn;
935 }
936 
937 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
938 {
939 	struct iwl_scd_txq_cfg_cmd cmd = {
940 		.scd_queue = queue,
941 		.action = SCD_CFG_UPDATE_QUEUE_TID,
942 	};
943 	int tid;
944 	unsigned long tid_bitmap;
945 	int ret;
946 
947 	lockdep_assert_held(&mvm->mutex);
948 
949 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
950 		return;
951 
952 	spin_lock_bh(&mvm->queue_info_lock);
953 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
954 	spin_unlock_bh(&mvm->queue_info_lock);
955 
956 	if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
957 		return;
958 
959 	/* Find any TID for queue */
960 	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
961 	cmd.tid = tid;
962 	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
963 
964 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
965 	if (ret) {
966 		IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
967 			queue, ret);
968 		return;
969 	}
970 
971 	spin_lock_bh(&mvm->queue_info_lock);
972 	mvm->queue_info[queue].txq_tid = tid;
973 	spin_unlock_bh(&mvm->queue_info_lock);
974 	IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
975 			    queue, tid);
976 }
977 
978 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
979 {
980 	struct ieee80211_sta *sta;
981 	struct iwl_mvm_sta *mvmsta;
982 	u8 sta_id;
983 	int tid = -1;
984 	unsigned long tid_bitmap;
985 	unsigned int wdg_timeout;
986 	int ssn;
987 	int ret = true;
988 
989 	/* queue sharing is disabled on new TX path */
990 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
991 		return;
992 
993 	lockdep_assert_held(&mvm->mutex);
994 
995 	spin_lock_bh(&mvm->queue_info_lock);
996 	sta_id = mvm->queue_info[queue].ra_sta_id;
997 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
998 	spin_unlock_bh(&mvm->queue_info_lock);
999 
1000 	/* Find TID for queue, and make sure it is the only one on the queue */
1001 	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1002 	if (tid_bitmap != BIT(tid)) {
1003 		IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1004 			queue, tid_bitmap);
1005 		return;
1006 	}
1007 
1008 	IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1009 			    tid);
1010 
1011 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1012 					lockdep_is_held(&mvm->mutex));
1013 
1014 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1015 		return;
1016 
1017 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1018 	wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1019 
1020 	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1021 
1022 	ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1023 					 tid_to_mac80211_ac[tid], ssn,
1024 					 wdg_timeout, true);
1025 	if (ret) {
1026 		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1027 		return;
1028 	}
1029 
1030 	/* If aggs should be turned back on - do it */
1031 	if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1032 		struct iwl_mvm_add_sta_cmd cmd = {0};
1033 
1034 		mvmsta->tid_disable_agg &= ~BIT(tid);
1035 
1036 		cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1037 		cmd.sta_id = mvmsta->sta_id;
1038 		cmd.add_modify = STA_MODE_MODIFY;
1039 		cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1040 		cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1041 		cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1042 
1043 		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1044 					   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1045 		if (!ret) {
1046 			IWL_DEBUG_TX_QUEUES(mvm,
1047 					    "TXQ #%d is now aggregated again\n",
1048 					    queue);
1049 
1050 			/* Mark queue intenally as aggregating again */
1051 			iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1052 		}
1053 	}
1054 
1055 	spin_lock_bh(&mvm->queue_info_lock);
1056 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1057 	spin_unlock_bh(&mvm->queue_info_lock);
1058 }
1059 
1060 /*
1061  * Remove inactive TIDs of a given queue.
1062  * If all queue TIDs are inactive - mark the queue as inactive
1063  * If only some the queue TIDs are inactive - unmap them from the queue
1064  *
1065  * Returns %true if all TIDs were removed and the queue could be reused.
1066  */
1067 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1068 					 struct iwl_mvm_sta *mvmsta, int queue,
1069 					 unsigned long tid_bitmap,
1070 					 unsigned long *unshare_queues,
1071 					 unsigned long *changetid_queues)
1072 {
1073 	int tid;
1074 
1075 	lockdep_assert_held(&mvmsta->lock);
1076 	lockdep_assert_held(&mvm->queue_info_lock);
1077 
1078 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1079 		return false;
1080 
1081 	/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1082 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1083 		/* If some TFDs are still queued - don't mark TID as inactive */
1084 		if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1085 			tid_bitmap &= ~BIT(tid);
1086 
1087 		/* Don't mark as inactive any TID that has an active BA */
1088 		if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1089 			tid_bitmap &= ~BIT(tid);
1090 	}
1091 
1092 	/* If all TIDs in the queue are inactive - return it can be reused */
1093 	if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1094 		IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1095 		return true;
1096 	}
1097 
1098 	/*
1099 	 * If we are here, this is a shared queue and not all TIDs timed-out.
1100 	 * Remove the ones that did.
1101 	 */
1102 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1103 		int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1104 		u16 tid_bitmap;
1105 
1106 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1107 		mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1108 		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1109 
1110 		tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1111 
1112 		/*
1113 		 * We need to take into account a situation in which a TXQ was
1114 		 * allocated to TID x, and then turned shared by adding TIDs y
1115 		 * and z. If TID x becomes inactive and is removed from the TXQ,
1116 		 * ownership must be given to one of the remaining TIDs.
1117 		 * This is mainly because if TID x continues - a new queue can't
1118 		 * be allocated for it as long as it is an owner of another TXQ.
1119 		 *
1120 		 * Mark this queue in the right bitmap, we'll send the command
1121 		 * to the firmware later.
1122 		 */
1123 		if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1124 			set_bit(queue, changetid_queues);
1125 
1126 		IWL_DEBUG_TX_QUEUES(mvm,
1127 				    "Removing inactive TID %d from shared Q:%d\n",
1128 				    tid, queue);
1129 	}
1130 
1131 	IWL_DEBUG_TX_QUEUES(mvm,
1132 			    "TXQ #%d left with tid bitmap 0x%x\n", queue,
1133 			    mvm->queue_info[queue].tid_bitmap);
1134 
1135 	/*
1136 	 * There may be different TIDs with the same mac queues, so make
1137 	 * sure all TIDs have existing corresponding mac queues enabled
1138 	 */
1139 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1140 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1141 		mvm->hw_queue_to_mac80211[queue] |=
1142 			BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1143 	}
1144 
1145 	/* If the queue is marked as shared - "unshare" it */
1146 	if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1147 	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1148 		IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1149 				    queue);
1150 		set_bit(queue, unshare_queues);
1151 	}
1152 
1153 	return false;
1154 }
1155 
1156 /*
1157  * Check for inactivity - this includes checking if any queue
1158  * can be unshared and finding one (and only one) that can be
1159  * reused.
1160  * This function is also invoked as a sort of clean-up task,
1161  * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1162  *
1163  * Returns the queue number, or -ENOSPC.
1164  */
1165 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1166 {
1167 	unsigned long now = jiffies;
1168 	unsigned long unshare_queues = 0;
1169 	unsigned long changetid_queues = 0;
1170 	int i, ret, free_queue = -ENOSPC;
1171 
1172 	lockdep_assert_held(&mvm->mutex);
1173 
1174 	if (iwl_mvm_has_new_tx_api(mvm))
1175 		return -ENOSPC;
1176 
1177 	spin_lock_bh(&mvm->queue_info_lock);
1178 
1179 	rcu_read_lock();
1180 
1181 	/* we skip the CMD queue below by starting at 1 */
1182 	BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1183 
1184 	for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1185 		struct ieee80211_sta *sta;
1186 		struct iwl_mvm_sta *mvmsta;
1187 		u8 sta_id;
1188 		int tid;
1189 		unsigned long inactive_tid_bitmap = 0;
1190 		unsigned long queue_tid_bitmap;
1191 
1192 		queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1193 		if (!queue_tid_bitmap)
1194 			continue;
1195 
1196 		/* If TXQ isn't in active use anyway - nothing to do here... */
1197 		if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1198 		    mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1199 			continue;
1200 
1201 		/* Check to see if there are inactive TIDs on this queue */
1202 		for_each_set_bit(tid, &queue_tid_bitmap,
1203 				 IWL_MAX_TID_COUNT + 1) {
1204 			if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1205 				       IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1206 				continue;
1207 
1208 			inactive_tid_bitmap |= BIT(tid);
1209 		}
1210 
1211 		/* If all TIDs are active - finish check on this queue */
1212 		if (!inactive_tid_bitmap)
1213 			continue;
1214 
1215 		/*
1216 		 * If we are here - the queue hadn't been served recently and is
1217 		 * in use
1218 		 */
1219 
1220 		sta_id = mvm->queue_info[i].ra_sta_id;
1221 		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1222 
1223 		/*
1224 		 * If the STA doesn't exist anymore, it isn't an error. It could
1225 		 * be that it was removed since getting the queues, and in this
1226 		 * case it should've inactivated its queues anyway.
1227 		 */
1228 		if (IS_ERR_OR_NULL(sta))
1229 			continue;
1230 
1231 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1232 
1233 		/* this isn't so nice, but works OK due to the way we loop */
1234 		spin_unlock(&mvm->queue_info_lock);
1235 
1236 		/* and we need this locking order */
1237 		spin_lock(&mvmsta->lock);
1238 		spin_lock(&mvm->queue_info_lock);
1239 		ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1240 						   inactive_tid_bitmap,
1241 						   &unshare_queues,
1242 						   &changetid_queues);
1243 		if (ret >= 0 && free_queue < 0)
1244 			free_queue = ret;
1245 		/* only unlock sta lock - we still need the queue info lock */
1246 		spin_unlock(&mvmsta->lock);
1247 	}
1248 
1249 	rcu_read_unlock();
1250 	spin_unlock_bh(&mvm->queue_info_lock);
1251 
1252 	/* Reconfigure queues requiring reconfiguation */
1253 	for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1254 		iwl_mvm_unshare_queue(mvm, i);
1255 	for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1256 		iwl_mvm_change_queue_tid(mvm, i);
1257 
1258 	if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1259 		ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
1260 						  alloc_for_sta);
1261 		if (ret)
1262 			return ret;
1263 	}
1264 
1265 	return free_queue;
1266 }
1267 
1268 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1269 				   struct ieee80211_sta *sta, u8 ac, int tid,
1270 				   struct ieee80211_hdr *hdr)
1271 {
1272 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1273 	struct iwl_trans_txq_scd_cfg cfg = {
1274 		.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1275 		.sta_id = mvmsta->sta_id,
1276 		.tid = tid,
1277 		.frame_limit = IWL_FRAME_LIMIT,
1278 	};
1279 	unsigned int wdg_timeout =
1280 		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1281 	u8 mac_queue = mvmsta->vif->hw_queue[ac];
1282 	int queue = -1;
1283 	unsigned long disable_agg_tids = 0;
1284 	enum iwl_mvm_agg_state queue_state;
1285 	bool shared_queue = false, inc_ssn;
1286 	int ssn;
1287 	unsigned long tfd_queue_mask;
1288 	int ret;
1289 
1290 	lockdep_assert_held(&mvm->mutex);
1291 
1292 	if (iwl_mvm_has_new_tx_api(mvm))
1293 		return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1294 
1295 	spin_lock_bh(&mvmsta->lock);
1296 	tfd_queue_mask = mvmsta->tfd_queue_msk;
1297 	spin_unlock_bh(&mvmsta->lock);
1298 
1299 	spin_lock_bh(&mvm->queue_info_lock);
1300 
1301 	/*
1302 	 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
1303 	 * exists
1304 	 */
1305 	if (!ieee80211_is_data_qos(hdr->frame_control) ||
1306 	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1307 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1308 						IWL_MVM_DQA_MIN_MGMT_QUEUE,
1309 						IWL_MVM_DQA_MAX_MGMT_QUEUE);
1310 		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1311 			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1312 					    queue);
1313 
1314 		/* If no such queue is found, we'll use a DATA queue instead */
1315 	}
1316 
1317 	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1318 	    (mvm->queue_info[mvmsta->reserved_queue].status ==
1319 			IWL_MVM_QUEUE_RESERVED)) {
1320 		queue = mvmsta->reserved_queue;
1321 		mvm->queue_info[queue].reserved = true;
1322 		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1323 	}
1324 
1325 	if (queue < 0)
1326 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1327 						IWL_MVM_DQA_MIN_DATA_QUEUE,
1328 						IWL_MVM_DQA_MAX_DATA_QUEUE);
1329 	if (queue < 0) {
1330 		spin_unlock_bh(&mvm->queue_info_lock);
1331 
1332 		/* try harder - perhaps kill an inactive queue */
1333 		queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1334 
1335 		spin_lock_bh(&mvm->queue_info_lock);
1336 	}
1337 
1338 	/* No free queue - we'll have to share */
1339 	if (queue <= 0) {
1340 		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1341 		if (queue > 0) {
1342 			shared_queue = true;
1343 			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1344 		}
1345 	}
1346 
1347 	/*
1348 	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1349 	 * to make sure no one else takes it.
1350 	 * This will allow avoiding re-acquiring the lock at the end of the
1351 	 * configuration. On error we'll mark it back as free.
1352 	 */
1353 	if (queue > 0 && !shared_queue)
1354 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1355 
1356 	spin_unlock_bh(&mvm->queue_info_lock);
1357 
1358 	/* This shouldn't happen - out of queues */
1359 	if (WARN_ON(queue <= 0)) {
1360 		IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1361 			tid, cfg.sta_id);
1362 		return queue;
1363 	}
1364 
1365 	/*
1366 	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1367 	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1368 	 * as aggregatable.
1369 	 * Mark all DATA queues as allowing to be aggregated at some point
1370 	 */
1371 	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1372 			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1373 
1374 	IWL_DEBUG_TX_QUEUES(mvm,
1375 			    "Allocating %squeue #%d to sta %d on tid %d\n",
1376 			    shared_queue ? "shared " : "", queue,
1377 			    mvmsta->sta_id, tid);
1378 
1379 	if (shared_queue) {
1380 		/* Disable any open aggs on this queue */
1381 		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1382 
1383 		if (disable_agg_tids) {
1384 			IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1385 					    queue);
1386 			iwl_mvm_invalidate_sta_queue(mvm, queue,
1387 						     disable_agg_tids, false);
1388 		}
1389 	}
1390 
1391 	ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1392 	inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
1393 				     ssn, &cfg, wdg_timeout);
1394 	if (inc_ssn) {
1395 		ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1396 		le16_add_cpu(&hdr->seq_ctrl, 0x10);
1397 	}
1398 
1399 	/*
1400 	 * Mark queue as shared in transport if shared
1401 	 * Note this has to be done after queue enablement because enablement
1402 	 * can also set this value, and there is no indication there to shared
1403 	 * queues
1404 	 */
1405 	if (shared_queue)
1406 		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1407 
1408 	spin_lock_bh(&mvmsta->lock);
1409 	/*
1410 	 * This looks racy, but it is not. We have only one packet for
1411 	 * this ra/tid in our Tx path since we stop the Qdisc when we
1412 	 * need to allocate a new TFD queue.
1413 	 */
1414 	if (inc_ssn)
1415 		mvmsta->tid_data[tid].seq_number += 0x10;
1416 	mvmsta->tid_data[tid].txq_id = queue;
1417 	mvmsta->tfd_queue_msk |= BIT(queue);
1418 	queue_state = mvmsta->tid_data[tid].state;
1419 
1420 	if (mvmsta->reserved_queue == queue)
1421 		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1422 	spin_unlock_bh(&mvmsta->lock);
1423 
1424 	if (!shared_queue) {
1425 		ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1426 		if (ret)
1427 			goto out_err;
1428 
1429 		/* If we need to re-enable aggregations... */
1430 		if (queue_state == IWL_AGG_ON) {
1431 			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1432 			if (ret)
1433 				goto out_err;
1434 		}
1435 	} else {
1436 		/* Redirect queue, if needed */
1437 		ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
1438 						 wdg_timeout, false);
1439 		if (ret)
1440 			goto out_err;
1441 	}
1442 
1443 	return 0;
1444 
1445 out_err:
1446 	iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
1447 
1448 	return ret;
1449 }
1450 
1451 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1452 {
1453 	if (tid == IWL_MAX_TID_COUNT)
1454 		return IEEE80211_AC_VO; /* MGMT */
1455 
1456 	return tid_to_mac80211_ac[tid];
1457 }
1458 
1459 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1460 				       struct ieee80211_sta *sta, int tid)
1461 {
1462 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1463 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1464 	struct sk_buff *skb;
1465 	struct ieee80211_hdr *hdr;
1466 	struct sk_buff_head deferred_tx;
1467 	u8 mac_queue;
1468 	bool no_queue = false; /* Marks if there is a problem with the queue */
1469 	u8 ac;
1470 
1471 	lockdep_assert_held(&mvm->mutex);
1472 
1473 	skb = skb_peek(&tid_data->deferred_tx_frames);
1474 	if (!skb)
1475 		return;
1476 	hdr = (void *)skb->data;
1477 
1478 	ac = iwl_mvm_tid_to_ac_queue(tid);
1479 	mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1480 
1481 	if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
1482 	    iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1483 		IWL_ERR(mvm,
1484 			"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1485 			mvmsta->sta_id, tid);
1486 
1487 		/*
1488 		 * Mark queue as problematic so later the deferred traffic is
1489 		 * freed, as we can do nothing with it
1490 		 */
1491 		no_queue = true;
1492 	}
1493 
1494 	__skb_queue_head_init(&deferred_tx);
1495 
1496 	/* Disable bottom-halves when entering TX path */
1497 	local_bh_disable();
1498 	spin_lock(&mvmsta->lock);
1499 	skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1500 	mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
1501 	spin_unlock(&mvmsta->lock);
1502 
1503 	while ((skb = __skb_dequeue(&deferred_tx)))
1504 		if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1505 			ieee80211_free_txskb(mvm->hw, skb);
1506 	local_bh_enable();
1507 
1508 	/* Wake queue */
1509 	iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1510 }
1511 
1512 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1513 {
1514 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1515 					   add_stream_wk);
1516 	struct ieee80211_sta *sta;
1517 	struct iwl_mvm_sta *mvmsta;
1518 	unsigned long deferred_tid_traffic;
1519 	int sta_id, tid;
1520 
1521 	mutex_lock(&mvm->mutex);
1522 
1523 	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1524 
1525 	/* Go over all stations with deferred traffic */
1526 	for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1527 			 IWL_MVM_STATION_COUNT) {
1528 		clear_bit(sta_id, mvm->sta_deferred_frames);
1529 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1530 						lockdep_is_held(&mvm->mutex));
1531 		if (IS_ERR_OR_NULL(sta))
1532 			continue;
1533 
1534 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1535 		deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1536 
1537 		for_each_set_bit(tid, &deferred_tid_traffic,
1538 				 IWL_MAX_TID_COUNT + 1)
1539 			iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1540 	}
1541 
1542 	mutex_unlock(&mvm->mutex);
1543 }
1544 
1545 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1546 				      struct ieee80211_sta *sta,
1547 				      enum nl80211_iftype vif_type)
1548 {
1549 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1550 	int queue;
1551 
1552 	/* queue reserving is disabled on new TX path */
1553 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1554 		return 0;
1555 
1556 	/* run the general cleanup/unsharing of queues */
1557 	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1558 
1559 	spin_lock_bh(&mvm->queue_info_lock);
1560 
1561 	/* Make sure we have free resources for this STA */
1562 	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1563 	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1564 	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1565 	     IWL_MVM_QUEUE_FREE))
1566 		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1567 	else
1568 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1569 						IWL_MVM_DQA_MIN_DATA_QUEUE,
1570 						IWL_MVM_DQA_MAX_DATA_QUEUE);
1571 	if (queue < 0) {
1572 		spin_unlock_bh(&mvm->queue_info_lock);
1573 		/* try again - this time kick out a queue if needed */
1574 		queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1575 		if (queue < 0) {
1576 			IWL_ERR(mvm, "No available queues for new station\n");
1577 			return -ENOSPC;
1578 		}
1579 		spin_lock_bh(&mvm->queue_info_lock);
1580 	}
1581 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1582 
1583 	spin_unlock_bh(&mvm->queue_info_lock);
1584 
1585 	mvmsta->reserved_queue = queue;
1586 
1587 	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1588 			    queue, mvmsta->sta_id);
1589 
1590 	return 0;
1591 }
1592 
1593 /*
1594  * In DQA mode, after a HW restart the queues should be allocated as before, in
1595  * order to avoid race conditions when there are shared queues. This function
1596  * does the re-mapping and queue allocation.
1597  *
1598  * Note that re-enabling aggregations isn't done in this function.
1599  */
1600 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1601 						 struct iwl_mvm_sta *mvm_sta)
1602 {
1603 	unsigned int wdg_timeout =
1604 			iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1605 	int i;
1606 	struct iwl_trans_txq_scd_cfg cfg = {
1607 		.sta_id = mvm_sta->sta_id,
1608 		.frame_limit = IWL_FRAME_LIMIT,
1609 	};
1610 
1611 	/* Make sure reserved queue is still marked as such (if allocated) */
1612 	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1613 		mvm->queue_info[mvm_sta->reserved_queue].status =
1614 			IWL_MVM_QUEUE_RESERVED;
1615 
1616 	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1617 		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1618 		int txq_id = tid_data->txq_id;
1619 		int ac;
1620 		u8 mac_queue;
1621 
1622 		if (txq_id == IWL_MVM_INVALID_QUEUE)
1623 			continue;
1624 
1625 		skb_queue_head_init(&tid_data->deferred_tx_frames);
1626 
1627 		ac = tid_to_mac80211_ac[i];
1628 		mac_queue = mvm_sta->vif->hw_queue[ac];
1629 
1630 		if (iwl_mvm_has_new_tx_api(mvm)) {
1631 			IWL_DEBUG_TX_QUEUES(mvm,
1632 					    "Re-mapping sta %d tid %d\n",
1633 					    mvm_sta->sta_id, i);
1634 			txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1635 							 mvm_sta->sta_id,
1636 							 i, wdg_timeout);
1637 			tid_data->txq_id = txq_id;
1638 
1639 			/*
1640 			 * Since we don't set the seq number after reset, and HW
1641 			 * sets it now, FW reset will cause the seq num to start
1642 			 * at 0 again, so driver will need to update it
1643 			 * internally as well, so it keeps in sync with real val
1644 			 */
1645 			tid_data->seq_number = 0;
1646 		} else {
1647 			u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1648 
1649 			cfg.tid = i;
1650 			cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1651 			cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1652 					 txq_id ==
1653 					 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1654 
1655 			IWL_DEBUG_TX_QUEUES(mvm,
1656 					    "Re-mapping sta %d tid %d to queue %d\n",
1657 					    mvm_sta->sta_id, i, txq_id);
1658 
1659 			iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1660 					   wdg_timeout);
1661 			mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1662 		}
1663 	}
1664 }
1665 
1666 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1667 				      struct iwl_mvm_int_sta *sta,
1668 				      const u8 *addr,
1669 				      u16 mac_id, u16 color)
1670 {
1671 	struct iwl_mvm_add_sta_cmd cmd;
1672 	int ret;
1673 	u32 status = ADD_STA_SUCCESS;
1674 
1675 	lockdep_assert_held(&mvm->mutex);
1676 
1677 	memset(&cmd, 0, sizeof(cmd));
1678 	cmd.sta_id = sta->sta_id;
1679 	cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1680 							     color));
1681 	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1682 		cmd.station_type = sta->type;
1683 
1684 	if (!iwl_mvm_has_new_tx_api(mvm))
1685 		cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1686 	cmd.tid_disable_tx = cpu_to_le16(0xffff);
1687 
1688 	if (addr)
1689 		memcpy(cmd.addr, addr, ETH_ALEN);
1690 
1691 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1692 					  iwl_mvm_add_sta_cmd_size(mvm),
1693 					  &cmd, &status);
1694 	if (ret)
1695 		return ret;
1696 
1697 	switch (status & IWL_ADD_STA_STATUS_MASK) {
1698 	case ADD_STA_SUCCESS:
1699 		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1700 		return 0;
1701 	default:
1702 		ret = -EIO;
1703 		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1704 			status);
1705 		break;
1706 	}
1707 	return ret;
1708 }
1709 
1710 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1711 		    struct ieee80211_vif *vif,
1712 		    struct ieee80211_sta *sta)
1713 {
1714 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1715 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1716 	struct iwl_mvm_rxq_dup_data *dup_data;
1717 	int i, ret, sta_id;
1718 	bool sta_update = false;
1719 	unsigned int sta_flags = 0;
1720 
1721 	lockdep_assert_held(&mvm->mutex);
1722 
1723 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1724 		sta_id = iwl_mvm_find_free_sta_id(mvm,
1725 						  ieee80211_vif_type_p2p(vif));
1726 	else
1727 		sta_id = mvm_sta->sta_id;
1728 
1729 	if (sta_id == IWL_MVM_INVALID_STA)
1730 		return -ENOSPC;
1731 
1732 	spin_lock_init(&mvm_sta->lock);
1733 
1734 	/* if this is a HW restart re-alloc existing queues */
1735 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1736 		struct iwl_mvm_int_sta tmp_sta = {
1737 			.sta_id = sta_id,
1738 			.type = mvm_sta->sta_type,
1739 		};
1740 
1741 		/*
1742 		 * First add an empty station since allocating
1743 		 * a queue requires a valid station
1744 		 */
1745 		ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1746 						 mvmvif->id, mvmvif->color);
1747 		if (ret)
1748 			goto err;
1749 
1750 		iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1751 		sta_update = true;
1752 		sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1753 		goto update_fw;
1754 	}
1755 
1756 	mvm_sta->sta_id = sta_id;
1757 	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1758 						      mvmvif->color);
1759 	mvm_sta->vif = vif;
1760 	if (!mvm->trans->cfg->gen2)
1761 		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1762 	else
1763 		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1764 	mvm_sta->tx_protection = 0;
1765 	mvm_sta->tt_tx_protection = false;
1766 	mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1767 
1768 	/* HW restart, don't assume the memory has been zeroed */
1769 	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1770 	mvm_sta->tfd_queue_msk = 0;
1771 
1772 	/* for HW restart - reset everything but the sequence number */
1773 	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1774 		u16 seq = mvm_sta->tid_data[i].seq_number;
1775 		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1776 		mvm_sta->tid_data[i].seq_number = seq;
1777 
1778 		/*
1779 		 * Mark all queues for this STA as unallocated and defer TX
1780 		 * frames until the queue is allocated
1781 		 */
1782 		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1783 		skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1784 	}
1785 	mvm_sta->deferred_traffic_tid_map = 0;
1786 	mvm_sta->agg_tids = 0;
1787 
1788 	if (iwl_mvm_has_new_rx_api(mvm) &&
1789 	    !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1790 		int q;
1791 
1792 		dup_data = kcalloc(mvm->trans->num_rx_queues,
1793 				   sizeof(*dup_data), GFP_KERNEL);
1794 		if (!dup_data)
1795 			return -ENOMEM;
1796 		/*
1797 		 * Initialize all the last_seq values to 0xffff which can never
1798 		 * compare equal to the frame's seq_ctrl in the check in
1799 		 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1800 		 * number and fragmented packets don't reach that function.
1801 		 *
1802 		 * This thus allows receiving a packet with seqno 0 and the
1803 		 * retry bit set as the very first packet on a new TID.
1804 		 */
1805 		for (q = 0; q < mvm->trans->num_rx_queues; q++)
1806 			memset(dup_data[q].last_seq, 0xff,
1807 			       sizeof(dup_data[q].last_seq));
1808 		mvm_sta->dup_data = dup_data;
1809 	}
1810 
1811 	if (!iwl_mvm_has_new_tx_api(mvm)) {
1812 		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1813 						 ieee80211_vif_type_p2p(vif));
1814 		if (ret)
1815 			goto err;
1816 	}
1817 
1818 	/*
1819 	 * if rs is registered with mac80211, then "add station" will be handled
1820 	 * via the corresponding ops, otherwise need to notify rate scaling here
1821 	 */
1822 	if (iwl_mvm_has_tlc_offload(mvm))
1823 		iwl_mvm_rs_add_sta(mvm, mvm_sta);
1824 
1825 update_fw:
1826 	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1827 	if (ret)
1828 		goto err;
1829 
1830 	if (vif->type == NL80211_IFTYPE_STATION) {
1831 		if (!sta->tdls) {
1832 			WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1833 			mvmvif->ap_sta_id = sta_id;
1834 		} else {
1835 			WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1836 		}
1837 	}
1838 
1839 	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1840 
1841 	return 0;
1842 
1843 err:
1844 	return ret;
1845 }
1846 
1847 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1848 		      bool drain)
1849 {
1850 	struct iwl_mvm_add_sta_cmd cmd = {};
1851 	int ret;
1852 	u32 status;
1853 
1854 	lockdep_assert_held(&mvm->mutex);
1855 
1856 	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1857 	cmd.sta_id = mvmsta->sta_id;
1858 	cmd.add_modify = STA_MODE_MODIFY;
1859 	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1860 	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1861 
1862 	status = ADD_STA_SUCCESS;
1863 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1864 					  iwl_mvm_add_sta_cmd_size(mvm),
1865 					  &cmd, &status);
1866 	if (ret)
1867 		return ret;
1868 
1869 	switch (status & IWL_ADD_STA_STATUS_MASK) {
1870 	case ADD_STA_SUCCESS:
1871 		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1872 			       mvmsta->sta_id);
1873 		break;
1874 	default:
1875 		ret = -EIO;
1876 		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1877 			mvmsta->sta_id);
1878 		break;
1879 	}
1880 
1881 	return ret;
1882 }
1883 
1884 /*
1885  * Remove a station from the FW table. Before sending the command to remove
1886  * the station validate that the station is indeed known to the driver (sanity
1887  * only).
1888  */
1889 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1890 {
1891 	struct ieee80211_sta *sta;
1892 	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1893 		.sta_id = sta_id,
1894 	};
1895 	int ret;
1896 
1897 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1898 					lockdep_is_held(&mvm->mutex));
1899 
1900 	/* Note: internal stations are marked as error values */
1901 	if (!sta) {
1902 		IWL_ERR(mvm, "Invalid station id\n");
1903 		return -EINVAL;
1904 	}
1905 
1906 	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1907 				   sizeof(rm_sta_cmd), &rm_sta_cmd);
1908 	if (ret) {
1909 		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1910 		return ret;
1911 	}
1912 
1913 	return 0;
1914 }
1915 
1916 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1917 				       struct ieee80211_vif *vif,
1918 				       struct iwl_mvm_sta *mvm_sta)
1919 {
1920 	int ac;
1921 	int i;
1922 
1923 	lockdep_assert_held(&mvm->mutex);
1924 
1925 	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1926 		if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1927 			continue;
1928 
1929 		ac = iwl_mvm_tid_to_ac_queue(i);
1930 		iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1931 				    vif->hw_queue[ac], i, 0);
1932 		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1933 	}
1934 }
1935 
1936 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1937 				  struct iwl_mvm_sta *mvm_sta)
1938 {
1939 	int i;
1940 
1941 	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1942 		u16 txq_id;
1943 		int ret;
1944 
1945 		spin_lock_bh(&mvm_sta->lock);
1946 		txq_id = mvm_sta->tid_data[i].txq_id;
1947 		spin_unlock_bh(&mvm_sta->lock);
1948 
1949 		if (txq_id == IWL_MVM_INVALID_QUEUE)
1950 			continue;
1951 
1952 		ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1953 		if (ret)
1954 			return ret;
1955 	}
1956 
1957 	return 0;
1958 }
1959 
1960 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1961 		   struct ieee80211_vif *vif,
1962 		   struct ieee80211_sta *sta)
1963 {
1964 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1965 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1966 	u8 sta_id = mvm_sta->sta_id;
1967 	int ret;
1968 
1969 	lockdep_assert_held(&mvm->mutex);
1970 
1971 	if (iwl_mvm_has_new_rx_api(mvm))
1972 		kfree(mvm_sta->dup_data);
1973 
1974 	ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1975 	if (ret)
1976 		return ret;
1977 
1978 	/* flush its queues here since we are freeing mvm_sta */
1979 	ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1980 	if (ret)
1981 		return ret;
1982 	if (iwl_mvm_has_new_tx_api(mvm)) {
1983 		ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1984 	} else {
1985 		u32 q_mask = mvm_sta->tfd_queue_msk;
1986 
1987 		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1988 						     q_mask);
1989 	}
1990 	if (ret)
1991 		return ret;
1992 
1993 	ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1994 
1995 	iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1996 
1997 	/* If there is a TXQ still marked as reserved - free it */
1998 	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1999 		u8 reserved_txq = mvm_sta->reserved_queue;
2000 		enum iwl_mvm_queue_status *status;
2001 
2002 		/*
2003 		 * If no traffic has gone through the reserved TXQ - it
2004 		 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2005 		 * should be manually marked as free again
2006 		 */
2007 		spin_lock_bh(&mvm->queue_info_lock);
2008 		status = &mvm->queue_info[reserved_txq].status;
2009 		if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2010 			 (*status != IWL_MVM_QUEUE_FREE),
2011 			 "sta_id %d reserved txq %d status %d",
2012 			 sta_id, reserved_txq, *status)) {
2013 			spin_unlock_bh(&mvm->queue_info_lock);
2014 			return -EINVAL;
2015 		}
2016 
2017 		*status = IWL_MVM_QUEUE_FREE;
2018 		spin_unlock_bh(&mvm->queue_info_lock);
2019 	}
2020 
2021 	if (vif->type == NL80211_IFTYPE_STATION &&
2022 	    mvmvif->ap_sta_id == sta_id) {
2023 		/* if associated - we can't remove the AP STA now */
2024 		if (vif->bss_conf.assoc)
2025 			return ret;
2026 
2027 		/* unassoc - go ahead - remove the AP STA now */
2028 		mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2029 
2030 		/* clear d0i3_ap_sta_id if no longer relevant */
2031 		if (mvm->d0i3_ap_sta_id == sta_id)
2032 			mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
2033 	}
2034 
2035 	/*
2036 	 * This shouldn't happen - the TDLS channel switch should be canceled
2037 	 * before the STA is removed.
2038 	 */
2039 	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
2040 		mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
2041 		cancel_delayed_work(&mvm->tdls_cs.dwork);
2042 	}
2043 
2044 	/*
2045 	 * Make sure that the tx response code sees the station as -EBUSY and
2046 	 * calls the drain worker.
2047 	 */
2048 	spin_lock_bh(&mvm_sta->lock);
2049 	spin_unlock_bh(&mvm_sta->lock);
2050 
2051 	ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
2052 	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
2053 
2054 	return ret;
2055 }
2056 
2057 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2058 		      struct ieee80211_vif *vif,
2059 		      u8 sta_id)
2060 {
2061 	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2062 
2063 	lockdep_assert_held(&mvm->mutex);
2064 
2065 	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2066 	return ret;
2067 }
2068 
2069 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2070 			     struct iwl_mvm_int_sta *sta,
2071 			     u32 qmask, enum nl80211_iftype iftype,
2072 			     enum iwl_sta_type type)
2073 {
2074 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2075 	    sta->sta_id == IWL_MVM_INVALID_STA) {
2076 		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2077 		if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2078 			return -ENOSPC;
2079 	}
2080 
2081 	sta->tfd_queue_msk = qmask;
2082 	sta->type = type;
2083 
2084 	/* put a non-NULL value so iterating over the stations won't stop */
2085 	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2086 	return 0;
2087 }
2088 
2089 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2090 {
2091 	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2092 	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2093 	sta->sta_id = IWL_MVM_INVALID_STA;
2094 }
2095 
2096 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2097 					  u8 sta_id, u8 fifo)
2098 {
2099 	unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2100 					mvm->cfg->base_params->wd_timeout :
2101 					IWL_WATCHDOG_DISABLED;
2102 
2103 	if (iwl_mvm_has_new_tx_api(mvm)) {
2104 		int tvqm_queue =
2105 			iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2106 						IWL_MAX_TID_COUNT,
2107 						wdg_timeout);
2108 		*queue = tvqm_queue;
2109 	} else {
2110 		struct iwl_trans_txq_scd_cfg cfg = {
2111 			.fifo = fifo,
2112 			.sta_id = sta_id,
2113 			.tid = IWL_MAX_TID_COUNT,
2114 			.aggregate = false,
2115 			.frame_limit = IWL_FRAME_LIMIT,
2116 		};
2117 
2118 		iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
2119 	}
2120 }
2121 
2122 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2123 {
2124 	int ret;
2125 
2126 	lockdep_assert_held(&mvm->mutex);
2127 
2128 	/* Allocate aux station and assign to it the aux queue */
2129 	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2130 				       NL80211_IFTYPE_UNSPECIFIED,
2131 				       IWL_STA_AUX_ACTIVITY);
2132 	if (ret)
2133 		return ret;
2134 
2135 	/* Map Aux queue to fifo - needs to happen before adding Aux station */
2136 	if (!iwl_mvm_has_new_tx_api(mvm))
2137 		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2138 					      mvm->aux_sta.sta_id,
2139 					      IWL_MVM_TX_FIFO_MCAST);
2140 
2141 	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2142 					 MAC_INDEX_AUX, 0);
2143 	if (ret) {
2144 		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2145 		return ret;
2146 	}
2147 
2148 	/*
2149 	 * For 22000 firmware and on we cannot add queue to a station unknown
2150 	 * to firmware so enable queue here - after the station was added
2151 	 */
2152 	if (iwl_mvm_has_new_tx_api(mvm))
2153 		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2154 					      mvm->aux_sta.sta_id,
2155 					      IWL_MVM_TX_FIFO_MCAST);
2156 
2157 	return 0;
2158 }
2159 
2160 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2161 {
2162 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2163 	int ret;
2164 
2165 	lockdep_assert_held(&mvm->mutex);
2166 
2167 	/* Map snif queue to fifo - must happen before adding snif station */
2168 	if (!iwl_mvm_has_new_tx_api(mvm))
2169 		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2170 					      mvm->snif_sta.sta_id,
2171 					      IWL_MVM_TX_FIFO_BE);
2172 
2173 	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2174 					 mvmvif->id, 0);
2175 	if (ret)
2176 		return ret;
2177 
2178 	/*
2179 	 * For 22000 firmware and on we cannot add queue to a station unknown
2180 	 * to firmware so enable queue here - after the station was added
2181 	 */
2182 	if (iwl_mvm_has_new_tx_api(mvm))
2183 		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2184 					      mvm->snif_sta.sta_id,
2185 					      IWL_MVM_TX_FIFO_BE);
2186 
2187 	return 0;
2188 }
2189 
2190 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2191 {
2192 	int ret;
2193 
2194 	lockdep_assert_held(&mvm->mutex);
2195 
2196 	iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2197 			    IWL_MAX_TID_COUNT, 0);
2198 	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2199 	if (ret)
2200 		IWL_WARN(mvm, "Failed sending remove station\n");
2201 
2202 	return ret;
2203 }
2204 
2205 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2206 {
2207 	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2208 }
2209 
2210 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2211 {
2212 	lockdep_assert_held(&mvm->mutex);
2213 
2214 	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2215 }
2216 
2217 /*
2218  * Send the add station command for the vif's broadcast station.
2219  * Assumes that the station was already allocated.
2220  *
2221  * @mvm: the mvm component
2222  * @vif: the interface to which the broadcast station is added
2223  * @bsta: the broadcast station to add.
2224  */
2225 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2226 {
2227 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2228 	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2229 	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2230 	const u8 *baddr = _baddr;
2231 	int queue;
2232 	int ret;
2233 	unsigned int wdg_timeout =
2234 		iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2235 	struct iwl_trans_txq_scd_cfg cfg = {
2236 		.fifo = IWL_MVM_TX_FIFO_VO,
2237 		.sta_id = mvmvif->bcast_sta.sta_id,
2238 		.tid = IWL_MAX_TID_COUNT,
2239 		.aggregate = false,
2240 		.frame_limit = IWL_FRAME_LIMIT,
2241 	};
2242 
2243 	lockdep_assert_held(&mvm->mutex);
2244 
2245 	if (!iwl_mvm_has_new_tx_api(mvm)) {
2246 		if (vif->type == NL80211_IFTYPE_AP ||
2247 		    vif->type == NL80211_IFTYPE_ADHOC)
2248 			queue = mvm->probe_queue;
2249 		else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2250 			queue = mvm->p2p_dev_queue;
2251 		else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
2252 			return -EINVAL;
2253 
2254 		bsta->tfd_queue_msk |= BIT(queue);
2255 
2256 		iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2257 				   &cfg, wdg_timeout);
2258 	}
2259 
2260 	if (vif->type == NL80211_IFTYPE_ADHOC)
2261 		baddr = vif->bss_conf.bssid;
2262 
2263 	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2264 		return -ENOSPC;
2265 
2266 	ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2267 					 mvmvif->id, mvmvif->color);
2268 	if (ret)
2269 		return ret;
2270 
2271 	/*
2272 	 * For 22000 firmware and on we cannot add queue to a station unknown
2273 	 * to firmware so enable queue here - after the station was added
2274 	 */
2275 	if (iwl_mvm_has_new_tx_api(mvm)) {
2276 		queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2277 						bsta->sta_id,
2278 						IWL_MAX_TID_COUNT,
2279 						wdg_timeout);
2280 
2281 		if (vif->type == NL80211_IFTYPE_AP ||
2282 		    vif->type == NL80211_IFTYPE_ADHOC)
2283 			mvm->probe_queue = queue;
2284 		else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2285 			mvm->p2p_dev_queue = queue;
2286 	}
2287 
2288 	return 0;
2289 }
2290 
2291 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2292 					  struct ieee80211_vif *vif)
2293 {
2294 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2295 	int queue;
2296 
2297 	lockdep_assert_held(&mvm->mutex);
2298 
2299 	iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2300 
2301 	switch (vif->type) {
2302 	case NL80211_IFTYPE_AP:
2303 	case NL80211_IFTYPE_ADHOC:
2304 		queue = mvm->probe_queue;
2305 		break;
2306 	case NL80211_IFTYPE_P2P_DEVICE:
2307 		queue = mvm->p2p_dev_queue;
2308 		break;
2309 	default:
2310 		WARN(1, "Can't free bcast queue on vif type %d\n",
2311 		     vif->type);
2312 		return;
2313 	}
2314 
2315 	iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2316 	if (iwl_mvm_has_new_tx_api(mvm))
2317 		return;
2318 
2319 	WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2320 	mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2321 }
2322 
2323 /* Send the FW a request to remove the station from it's internal data
2324  * structures, but DO NOT remove the entry from the local data structures. */
2325 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2326 {
2327 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2328 	int ret;
2329 
2330 	lockdep_assert_held(&mvm->mutex);
2331 
2332 	iwl_mvm_free_bcast_sta_queues(mvm, vif);
2333 
2334 	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2335 	if (ret)
2336 		IWL_WARN(mvm, "Failed sending remove station\n");
2337 	return ret;
2338 }
2339 
2340 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2341 {
2342 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2343 
2344 	lockdep_assert_held(&mvm->mutex);
2345 
2346 	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2347 					ieee80211_vif_type_p2p(vif),
2348 					IWL_STA_GENERAL_PURPOSE);
2349 }
2350 
2351 /* Allocate a new station entry for the broadcast station to the given vif,
2352  * and send it to the FW.
2353  * Note that each P2P mac should have its own broadcast station.
2354  *
2355  * @mvm: the mvm component
2356  * @vif: the interface to which the broadcast station is added
2357  * @bsta: the broadcast station to add. */
2358 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2359 {
2360 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2361 	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2362 	int ret;
2363 
2364 	lockdep_assert_held(&mvm->mutex);
2365 
2366 	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2367 	if (ret)
2368 		return ret;
2369 
2370 	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2371 
2372 	if (ret)
2373 		iwl_mvm_dealloc_int_sta(mvm, bsta);
2374 
2375 	return ret;
2376 }
2377 
2378 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2379 {
2380 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2381 
2382 	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2383 }
2384 
2385 /*
2386  * Send the FW a request to remove the station from it's internal data
2387  * structures, and in addition remove it from the local data structure.
2388  */
2389 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2390 {
2391 	int ret;
2392 
2393 	lockdep_assert_held(&mvm->mutex);
2394 
2395 	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2396 
2397 	iwl_mvm_dealloc_bcast_sta(mvm, vif);
2398 
2399 	return ret;
2400 }
2401 
2402 /*
2403  * Allocate a new station entry for the multicast station to the given vif,
2404  * and send it to the FW.
2405  * Note that each AP/GO mac should have its own multicast station.
2406  *
2407  * @mvm: the mvm component
2408  * @vif: the interface to which the multicast station is added
2409  */
2410 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2411 {
2412 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2413 	struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2414 	static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2415 	const u8 *maddr = _maddr;
2416 	struct iwl_trans_txq_scd_cfg cfg = {
2417 		.fifo = IWL_MVM_TX_FIFO_MCAST,
2418 		.sta_id = msta->sta_id,
2419 		.tid = 0,
2420 		.aggregate = false,
2421 		.frame_limit = IWL_FRAME_LIMIT,
2422 	};
2423 	unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2424 	int ret;
2425 
2426 	lockdep_assert_held(&mvm->mutex);
2427 
2428 	if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2429 		    vif->type != NL80211_IFTYPE_ADHOC))
2430 		return -ENOTSUPP;
2431 
2432 	/*
2433 	 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2434 	 * invalid, so make sure we use the queue we want.
2435 	 * Note that this is done here as we want to avoid making DQA
2436 	 * changes in mac80211 layer.
2437 	 */
2438 	if (vif->type == NL80211_IFTYPE_ADHOC) {
2439 		vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2440 		mvmvif->cab_queue = vif->cab_queue;
2441 	}
2442 
2443 	/*
2444 	 * While in previous FWs we had to exclude cab queue from TFD queue
2445 	 * mask, now it is needed as any other queue.
2446 	 */
2447 	if (!iwl_mvm_has_new_tx_api(mvm) &&
2448 	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2449 		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2450 				   &cfg, timeout);
2451 		msta->tfd_queue_msk |= BIT(vif->cab_queue);
2452 	}
2453 	ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2454 					 mvmvif->id, mvmvif->color);
2455 	if (ret) {
2456 		iwl_mvm_dealloc_int_sta(mvm, msta);
2457 		return ret;
2458 	}
2459 
2460 	/*
2461 	 * Enable cab queue after the ADD_STA command is sent.
2462 	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2463 	 * command with unknown station id, and for FW that doesn't support
2464 	 * station API since the cab queue is not included in the
2465 	 * tfd_queue_mask.
2466 	 */
2467 	if (iwl_mvm_has_new_tx_api(mvm)) {
2468 		int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2469 						    msta->sta_id,
2470 						    0,
2471 						    timeout);
2472 		mvmvif->cab_queue = queue;
2473 	} else if (!fw_has_api(&mvm->fw->ucode_capa,
2474 			       IWL_UCODE_TLV_API_STA_TYPE))
2475 		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2476 				   &cfg, timeout);
2477 
2478 	if (mvmvif->ap_wep_key) {
2479 		u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2480 
2481 		if (key_offset == STA_KEY_IDX_INVALID)
2482 			return -ENOSPC;
2483 
2484 		ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2485 					   mvmvif->ap_wep_key, 1, 0, NULL, 0,
2486 					   key_offset, 0);
2487 		if (ret)
2488 			return ret;
2489 	}
2490 
2491 	return 0;
2492 }
2493 
2494 /*
2495  * Send the FW a request to remove the station from it's internal data
2496  * structures, and in addition remove it from the local data structure.
2497  */
2498 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2499 {
2500 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2501 	int ret;
2502 
2503 	lockdep_assert_held(&mvm->mutex);
2504 
2505 	iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2506 
2507 	iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2508 			    0, 0);
2509 
2510 	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2511 	if (ret)
2512 		IWL_WARN(mvm, "Failed sending remove station\n");
2513 
2514 	return ret;
2515 }
2516 
2517 #define IWL_MAX_RX_BA_SESSIONS 16
2518 
2519 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2520 {
2521 	struct iwl_mvm_delba_notif notif = {
2522 		.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2523 		.metadata.sync = 1,
2524 		.delba.baid = baid,
2525 	};
2526 	iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2527 };
2528 
2529 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2530 				 struct iwl_mvm_baid_data *data)
2531 {
2532 	int i;
2533 
2534 	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2535 
2536 	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2537 		int j;
2538 		struct iwl_mvm_reorder_buffer *reorder_buf =
2539 			&data->reorder_buf[i];
2540 		struct iwl_mvm_reorder_buf_entry *entries =
2541 			&data->entries[i * data->entries_per_queue];
2542 
2543 		spin_lock_bh(&reorder_buf->lock);
2544 		if (likely(!reorder_buf->num_stored)) {
2545 			spin_unlock_bh(&reorder_buf->lock);
2546 			continue;
2547 		}
2548 
2549 		/*
2550 		 * This shouldn't happen in regular DELBA since the internal
2551 		 * delBA notification should trigger a release of all frames in
2552 		 * the reorder buffer.
2553 		 */
2554 		WARN_ON(1);
2555 
2556 		for (j = 0; j < reorder_buf->buf_size; j++)
2557 			__skb_queue_purge(&entries[j].e.frames);
2558 		/*
2559 		 * Prevent timer re-arm. This prevents a very far fetched case
2560 		 * where we timed out on the notification. There may be prior
2561 		 * RX frames pending in the RX queue before the notification
2562 		 * that might get processed between now and the actual deletion
2563 		 * and we would re-arm the timer although we are deleting the
2564 		 * reorder buffer.
2565 		 */
2566 		reorder_buf->removed = true;
2567 		spin_unlock_bh(&reorder_buf->lock);
2568 		del_timer_sync(&reorder_buf->reorder_timer);
2569 	}
2570 }
2571 
2572 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2573 					struct iwl_mvm_baid_data *data,
2574 					u16 ssn, u16 buf_size)
2575 {
2576 	int i;
2577 
2578 	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2579 		struct iwl_mvm_reorder_buffer *reorder_buf =
2580 			&data->reorder_buf[i];
2581 		struct iwl_mvm_reorder_buf_entry *entries =
2582 			&data->entries[i * data->entries_per_queue];
2583 		int j;
2584 
2585 		reorder_buf->num_stored = 0;
2586 		reorder_buf->head_sn = ssn;
2587 		reorder_buf->buf_size = buf_size;
2588 		/* rx reorder timer */
2589 		timer_setup(&reorder_buf->reorder_timer,
2590 			    iwl_mvm_reorder_timer_expired, 0);
2591 		spin_lock_init(&reorder_buf->lock);
2592 		reorder_buf->mvm = mvm;
2593 		reorder_buf->queue = i;
2594 		reorder_buf->valid = false;
2595 		for (j = 0; j < reorder_buf->buf_size; j++)
2596 			__skb_queue_head_init(&entries[j].e.frames);
2597 	}
2598 }
2599 
2600 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2601 		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2602 {
2603 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2604 	struct iwl_mvm_add_sta_cmd cmd = {};
2605 	struct iwl_mvm_baid_data *baid_data = NULL;
2606 	int ret;
2607 	u32 status;
2608 
2609 	lockdep_assert_held(&mvm->mutex);
2610 
2611 	if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2612 		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2613 		return -ENOSPC;
2614 	}
2615 
2616 	if (iwl_mvm_has_new_rx_api(mvm) && start) {
2617 		u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2618 
2619 		/* sparse doesn't like the __align() so don't check */
2620 #ifndef __CHECKER__
2621 		/*
2622 		 * The division below will be OK if either the cache line size
2623 		 * can be divided by the entry size (ALIGN will round up) or if
2624 		 * if the entry size can be divided by the cache line size, in
2625 		 * which case the ALIGN() will do nothing.
2626 		 */
2627 		BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2628 			     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2629 #endif
2630 
2631 		/*
2632 		 * Upward align the reorder buffer size to fill an entire cache
2633 		 * line for each queue, to avoid sharing cache lines between
2634 		 * different queues.
2635 		 */
2636 		reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2637 
2638 		/*
2639 		 * Allocate here so if allocation fails we can bail out early
2640 		 * before starting the BA session in the firmware
2641 		 */
2642 		baid_data = kzalloc(sizeof(*baid_data) +
2643 				    mvm->trans->num_rx_queues *
2644 				    reorder_buf_size,
2645 				    GFP_KERNEL);
2646 		if (!baid_data)
2647 			return -ENOMEM;
2648 
2649 		/*
2650 		 * This division is why we need the above BUILD_BUG_ON(),
2651 		 * if that doesn't hold then this will not be right.
2652 		 */
2653 		baid_data->entries_per_queue =
2654 			reorder_buf_size / sizeof(baid_data->entries[0]);
2655 	}
2656 
2657 	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2658 	cmd.sta_id = mvm_sta->sta_id;
2659 	cmd.add_modify = STA_MODE_MODIFY;
2660 	if (start) {
2661 		cmd.add_immediate_ba_tid = (u8) tid;
2662 		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2663 		cmd.rx_ba_window = cpu_to_le16(buf_size);
2664 	} else {
2665 		cmd.remove_immediate_ba_tid = (u8) tid;
2666 	}
2667 	cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2668 				  STA_MODIFY_REMOVE_BA_TID;
2669 
2670 	status = ADD_STA_SUCCESS;
2671 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2672 					  iwl_mvm_add_sta_cmd_size(mvm),
2673 					  &cmd, &status);
2674 	if (ret)
2675 		goto out_free;
2676 
2677 	switch (status & IWL_ADD_STA_STATUS_MASK) {
2678 	case ADD_STA_SUCCESS:
2679 		IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2680 			     start ? "start" : "stopp");
2681 		break;
2682 	case ADD_STA_IMMEDIATE_BA_FAILURE:
2683 		IWL_WARN(mvm, "RX BA Session refused by fw\n");
2684 		ret = -ENOSPC;
2685 		break;
2686 	default:
2687 		ret = -EIO;
2688 		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2689 			start ? "start" : "stopp", status);
2690 		break;
2691 	}
2692 
2693 	if (ret)
2694 		goto out_free;
2695 
2696 	if (start) {
2697 		u8 baid;
2698 
2699 		mvm->rx_ba_sessions++;
2700 
2701 		if (!iwl_mvm_has_new_rx_api(mvm))
2702 			return 0;
2703 
2704 		if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2705 			ret = -EINVAL;
2706 			goto out_free;
2707 		}
2708 		baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2709 			    IWL_ADD_STA_BAID_SHIFT);
2710 		baid_data->baid = baid;
2711 		baid_data->timeout = timeout;
2712 		baid_data->last_rx = jiffies;
2713 		baid_data->rcu_ptr = &mvm->baid_map[baid];
2714 		timer_setup(&baid_data->session_timer,
2715 			    iwl_mvm_rx_agg_session_expired, 0);
2716 		baid_data->mvm = mvm;
2717 		baid_data->tid = tid;
2718 		baid_data->sta_id = mvm_sta->sta_id;
2719 
2720 		mvm_sta->tid_to_baid[tid] = baid;
2721 		if (timeout)
2722 			mod_timer(&baid_data->session_timer,
2723 				  TU_TO_EXP_TIME(timeout * 2));
2724 
2725 		iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2726 		/*
2727 		 * protect the BA data with RCU to cover a case where our
2728 		 * internal RX sync mechanism will timeout (not that it's
2729 		 * supposed to happen) and we will free the session data while
2730 		 * RX is being processed in parallel
2731 		 */
2732 		IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2733 			     mvm_sta->sta_id, tid, baid);
2734 		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2735 		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2736 	} else  {
2737 		u8 baid = mvm_sta->tid_to_baid[tid];
2738 
2739 		if (mvm->rx_ba_sessions > 0)
2740 			/* check that restart flow didn't zero the counter */
2741 			mvm->rx_ba_sessions--;
2742 		if (!iwl_mvm_has_new_rx_api(mvm))
2743 			return 0;
2744 
2745 		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2746 			return -EINVAL;
2747 
2748 		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2749 		if (WARN_ON(!baid_data))
2750 			return -EINVAL;
2751 
2752 		/* synchronize all rx queues so we can safely delete */
2753 		iwl_mvm_free_reorder(mvm, baid_data);
2754 		del_timer_sync(&baid_data->session_timer);
2755 		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2756 		kfree_rcu(baid_data, rcu_head);
2757 		IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2758 	}
2759 	return 0;
2760 
2761 out_free:
2762 	kfree(baid_data);
2763 	return ret;
2764 }
2765 
2766 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2767 		       int tid, u8 queue, bool start)
2768 {
2769 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2770 	struct iwl_mvm_add_sta_cmd cmd = {};
2771 	int ret;
2772 	u32 status;
2773 
2774 	lockdep_assert_held(&mvm->mutex);
2775 
2776 	if (start) {
2777 		mvm_sta->tfd_queue_msk |= BIT(queue);
2778 		mvm_sta->tid_disable_agg &= ~BIT(tid);
2779 	} else {
2780 		/* In DQA-mode the queue isn't removed on agg termination */
2781 		mvm_sta->tid_disable_agg |= BIT(tid);
2782 	}
2783 
2784 	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2785 	cmd.sta_id = mvm_sta->sta_id;
2786 	cmd.add_modify = STA_MODE_MODIFY;
2787 	if (!iwl_mvm_has_new_tx_api(mvm))
2788 		cmd.modify_mask = STA_MODIFY_QUEUES;
2789 	cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2790 	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2791 	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2792 
2793 	status = ADD_STA_SUCCESS;
2794 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2795 					  iwl_mvm_add_sta_cmd_size(mvm),
2796 					  &cmd, &status);
2797 	if (ret)
2798 		return ret;
2799 
2800 	switch (status & IWL_ADD_STA_STATUS_MASK) {
2801 	case ADD_STA_SUCCESS:
2802 		break;
2803 	default:
2804 		ret = -EIO;
2805 		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2806 			start ? "start" : "stopp", status);
2807 		break;
2808 	}
2809 
2810 	return ret;
2811 }
2812 
2813 const u8 tid_to_mac80211_ac[] = {
2814 	IEEE80211_AC_BE,
2815 	IEEE80211_AC_BK,
2816 	IEEE80211_AC_BK,
2817 	IEEE80211_AC_BE,
2818 	IEEE80211_AC_VI,
2819 	IEEE80211_AC_VI,
2820 	IEEE80211_AC_VO,
2821 	IEEE80211_AC_VO,
2822 	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2823 };
2824 
2825 static const u8 tid_to_ucode_ac[] = {
2826 	AC_BE,
2827 	AC_BK,
2828 	AC_BK,
2829 	AC_BE,
2830 	AC_VI,
2831 	AC_VI,
2832 	AC_VO,
2833 	AC_VO,
2834 };
2835 
2836 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2837 			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2838 {
2839 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2840 	struct iwl_mvm_tid_data *tid_data;
2841 	u16 normalized_ssn;
2842 	int txq_id;
2843 	int ret;
2844 
2845 	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2846 		return -EINVAL;
2847 
2848 	if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2849 	    mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2850 		IWL_ERR(mvm,
2851 			"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2852 			mvmsta->tid_data[tid].state);
2853 		return -ENXIO;
2854 	}
2855 
2856 	lockdep_assert_held(&mvm->mutex);
2857 
2858 	if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2859 	    iwl_mvm_has_new_tx_api(mvm)) {
2860 		u8 ac = tid_to_mac80211_ac[tid];
2861 
2862 		ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2863 		if (ret)
2864 			return ret;
2865 	}
2866 
2867 	spin_lock_bh(&mvmsta->lock);
2868 
2869 	/* possible race condition - we entered D0i3 while starting agg */
2870 	if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2871 		spin_unlock_bh(&mvmsta->lock);
2872 		IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2873 		return -EIO;
2874 	}
2875 
2876 	spin_lock(&mvm->queue_info_lock);
2877 
2878 	/*
2879 	 * Note the possible cases:
2880 	 *  1. An enabled TXQ - TXQ needs to become agg'ed
2881 	 *  2. The TXQ hasn't yet been enabled, so find a free one and mark
2882 	 *	it as reserved
2883 	 */
2884 	txq_id = mvmsta->tid_data[tid].txq_id;
2885 	if (txq_id == IWL_MVM_INVALID_QUEUE) {
2886 		txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2887 						 IWL_MVM_DQA_MIN_DATA_QUEUE,
2888 						 IWL_MVM_DQA_MAX_DATA_QUEUE);
2889 		if (txq_id < 0) {
2890 			ret = txq_id;
2891 			IWL_ERR(mvm, "Failed to allocate agg queue\n");
2892 			goto release_locks;
2893 		}
2894 
2895 		/* TXQ hasn't yet been enabled, so mark it only as reserved */
2896 		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2897 	} else if (unlikely(mvm->queue_info[txq_id].status ==
2898 			    IWL_MVM_QUEUE_SHARED)) {
2899 		ret = -ENXIO;
2900 		IWL_DEBUG_TX_QUEUES(mvm,
2901 				    "Can't start tid %d agg on shared queue!\n",
2902 				    tid);
2903 		goto release_locks;
2904 	}
2905 
2906 	spin_unlock(&mvm->queue_info_lock);
2907 
2908 	IWL_DEBUG_TX_QUEUES(mvm,
2909 			    "AGG for tid %d will be on queue #%d\n",
2910 			    tid, txq_id);
2911 
2912 	tid_data = &mvmsta->tid_data[tid];
2913 	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2914 	tid_data->txq_id = txq_id;
2915 	*ssn = tid_data->ssn;
2916 
2917 	IWL_DEBUG_TX_QUEUES(mvm,
2918 			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2919 			    mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2920 			    tid_data->next_reclaimed);
2921 
2922 	/*
2923 	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2924 	 * to align the wrap around of ssn so we compare relevant values.
2925 	 */
2926 	normalized_ssn = tid_data->ssn;
2927 	if (mvm->trans->cfg->gen2)
2928 		normalized_ssn &= 0xff;
2929 
2930 	if (normalized_ssn == tid_data->next_reclaimed) {
2931 		tid_data->state = IWL_AGG_STARTING;
2932 		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2933 	} else {
2934 		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2935 	}
2936 
2937 	ret = 0;
2938 	goto out;
2939 
2940 release_locks:
2941 	spin_unlock(&mvm->queue_info_lock);
2942 out:
2943 	spin_unlock_bh(&mvmsta->lock);
2944 
2945 	return ret;
2946 }
2947 
2948 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2949 			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2950 			    bool amsdu)
2951 {
2952 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2953 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2954 	unsigned int wdg_timeout =
2955 		iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2956 	int queue, ret;
2957 	bool alloc_queue = true;
2958 	enum iwl_mvm_queue_status queue_status;
2959 	u16 ssn;
2960 
2961 	struct iwl_trans_txq_scd_cfg cfg = {
2962 		.sta_id = mvmsta->sta_id,
2963 		.tid = tid,
2964 		.frame_limit = buf_size,
2965 		.aggregate = true,
2966 	};
2967 
2968 	/*
2969 	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2970 	 * manager, so this function should never be called in this case.
2971 	 */
2972 	if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2973 		return -EINVAL;
2974 
2975 	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2976 		     != IWL_MAX_TID_COUNT);
2977 
2978 	spin_lock_bh(&mvmsta->lock);
2979 	ssn = tid_data->ssn;
2980 	queue = tid_data->txq_id;
2981 	tid_data->state = IWL_AGG_ON;
2982 	mvmsta->agg_tids |= BIT(tid);
2983 	tid_data->ssn = 0xffff;
2984 	tid_data->amsdu_in_ampdu_allowed = amsdu;
2985 	spin_unlock_bh(&mvmsta->lock);
2986 
2987 	if (iwl_mvm_has_new_tx_api(mvm)) {
2988 		/*
2989 		 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2990 		 * would have failed, so if we are here there is no need to
2991 		 * allocate a queue.
2992 		 * However, if aggregation size is different than the default
2993 		 * size, the scheduler should be reconfigured.
2994 		 * We cannot do this with the new TX API, so return unsupported
2995 		 * for now, until it will be offloaded to firmware..
2996 		 * Note that if SCD default value changes - this condition
2997 		 * should be updated as well.
2998 		 */
2999 		if (buf_size < IWL_FRAME_LIMIT)
3000 			return -ENOTSUPP;
3001 
3002 		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3003 		if (ret)
3004 			return -EIO;
3005 		goto out;
3006 	}
3007 
3008 	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3009 
3010 	spin_lock_bh(&mvm->queue_info_lock);
3011 	queue_status = mvm->queue_info[queue].status;
3012 	spin_unlock_bh(&mvm->queue_info_lock);
3013 
3014 	/* Maybe there is no need to even alloc a queue... */
3015 	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3016 		alloc_queue = false;
3017 
3018 	/*
3019 	 * Only reconfig the SCD for the queue if the window size has
3020 	 * changed from current (become smaller)
3021 	 */
3022 	if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3023 		/*
3024 		 * If reconfiguring an existing queue, it first must be
3025 		 * drained
3026 		 */
3027 		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3028 						     BIT(queue));
3029 		if (ret) {
3030 			IWL_ERR(mvm,
3031 				"Error draining queue before reconfig\n");
3032 			return ret;
3033 		}
3034 
3035 		ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3036 					   mvmsta->sta_id, tid,
3037 					   buf_size, ssn);
3038 		if (ret) {
3039 			IWL_ERR(mvm,
3040 				"Error reconfiguring TXQ #%d\n", queue);
3041 			return ret;
3042 		}
3043 	}
3044 
3045 	if (alloc_queue)
3046 		iwl_mvm_enable_txq(mvm, queue,
3047 				   vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
3048 				   &cfg, wdg_timeout);
3049 
3050 	/* Send ADD_STA command to enable aggs only if the queue isn't shared */
3051 	if (queue_status != IWL_MVM_QUEUE_SHARED) {
3052 		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3053 		if (ret)
3054 			return -EIO;
3055 	}
3056 
3057 	/* No need to mark as reserved */
3058 	spin_lock_bh(&mvm->queue_info_lock);
3059 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3060 	spin_unlock_bh(&mvm->queue_info_lock);
3061 
3062 out:
3063 	/*
3064 	 * Even though in theory the peer could have different
3065 	 * aggregation reorder buffer sizes for different sessions,
3066 	 * our ucode doesn't allow for that and has a global limit
3067 	 * for each station. Therefore, use the minimum of all the
3068 	 * aggregation sessions and our default value.
3069 	 */
3070 	mvmsta->max_agg_bufsize =
3071 		min(mvmsta->max_agg_bufsize, buf_size);
3072 	mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3073 
3074 	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3075 		     sta->addr, tid);
3076 
3077 	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
3078 }
3079 
3080 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3081 					struct iwl_mvm_sta *mvmsta,
3082 					struct iwl_mvm_tid_data *tid_data)
3083 {
3084 	u16 txq_id = tid_data->txq_id;
3085 
3086 	if (iwl_mvm_has_new_tx_api(mvm))
3087 		return;
3088 
3089 	spin_lock_bh(&mvm->queue_info_lock);
3090 	/*
3091 	 * The TXQ is marked as reserved only if no traffic came through yet
3092 	 * This means no traffic has been sent on this TID (agg'd or not), so
3093 	 * we no longer have use for the queue. Since it hasn't even been
3094 	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3095 	 * free.
3096 	 */
3097 	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3098 		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3099 		tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3100 	}
3101 
3102 	spin_unlock_bh(&mvm->queue_info_lock);
3103 }
3104 
3105 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3106 			    struct ieee80211_sta *sta, u16 tid)
3107 {
3108 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3109 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3110 	u16 txq_id;
3111 	int err;
3112 
3113 	/*
3114 	 * If mac80211 is cleaning its state, then say that we finished since
3115 	 * our state has been cleared anyway.
3116 	 */
3117 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3118 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3119 		return 0;
3120 	}
3121 
3122 	spin_lock_bh(&mvmsta->lock);
3123 
3124 	txq_id = tid_data->txq_id;
3125 
3126 	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3127 			    mvmsta->sta_id, tid, txq_id, tid_data->state);
3128 
3129 	mvmsta->agg_tids &= ~BIT(tid);
3130 
3131 	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3132 
3133 	switch (tid_data->state) {
3134 	case IWL_AGG_ON:
3135 		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3136 
3137 		IWL_DEBUG_TX_QUEUES(mvm,
3138 				    "ssn = %d, next_recl = %d\n",
3139 				    tid_data->ssn, tid_data->next_reclaimed);
3140 
3141 		tid_data->ssn = 0xffff;
3142 		tid_data->state = IWL_AGG_OFF;
3143 		spin_unlock_bh(&mvmsta->lock);
3144 
3145 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3146 
3147 		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3148 		return 0;
3149 	case IWL_AGG_STARTING:
3150 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
3151 		/*
3152 		 * The agg session has been stopped before it was set up. This
3153 		 * can happen when the AddBA timer times out for example.
3154 		 */
3155 
3156 		/* No barriers since we are under mutex */
3157 		lockdep_assert_held(&mvm->mutex);
3158 
3159 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3160 		tid_data->state = IWL_AGG_OFF;
3161 		err = 0;
3162 		break;
3163 	default:
3164 		IWL_ERR(mvm,
3165 			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3166 			mvmsta->sta_id, tid, tid_data->state);
3167 		IWL_ERR(mvm,
3168 			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
3169 		err = -EINVAL;
3170 	}
3171 
3172 	spin_unlock_bh(&mvmsta->lock);
3173 
3174 	return err;
3175 }
3176 
3177 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3178 			    struct ieee80211_sta *sta, u16 tid)
3179 {
3180 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3181 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3182 	u16 txq_id;
3183 	enum iwl_mvm_agg_state old_state;
3184 
3185 	/*
3186 	 * First set the agg state to OFF to avoid calling
3187 	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3188 	 */
3189 	spin_lock_bh(&mvmsta->lock);
3190 	txq_id = tid_data->txq_id;
3191 	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3192 			    mvmsta->sta_id, tid, txq_id, tid_data->state);
3193 	old_state = tid_data->state;
3194 	tid_data->state = IWL_AGG_OFF;
3195 	mvmsta->agg_tids &= ~BIT(tid);
3196 	spin_unlock_bh(&mvmsta->lock);
3197 
3198 	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3199 
3200 	if (old_state >= IWL_AGG_ON) {
3201 		iwl_mvm_drain_sta(mvm, mvmsta, true);
3202 
3203 		if (iwl_mvm_has_new_tx_api(mvm)) {
3204 			if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3205 						   BIT(tid), 0))
3206 				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3207 			iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3208 		} else {
3209 			if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3210 				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3211 			iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3212 		}
3213 
3214 		iwl_mvm_drain_sta(mvm, mvmsta, false);
3215 
3216 		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3217 	}
3218 
3219 	return 0;
3220 }
3221 
3222 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3223 {
3224 	int i, max = -1, max_offs = -1;
3225 
3226 	lockdep_assert_held(&mvm->mutex);
3227 
3228 	/* Pick the unused key offset with the highest 'deleted'
3229 	 * counter. Every time a key is deleted, all the counters
3230 	 * are incremented and the one that was just deleted is
3231 	 * reset to zero. Thus, the highest counter is the one
3232 	 * that was deleted longest ago. Pick that one.
3233 	 */
3234 	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3235 		if (test_bit(i, mvm->fw_key_table))
3236 			continue;
3237 		if (mvm->fw_key_deleted[i] > max) {
3238 			max = mvm->fw_key_deleted[i];
3239 			max_offs = i;
3240 		}
3241 	}
3242 
3243 	if (max_offs < 0)
3244 		return STA_KEY_IDX_INVALID;
3245 
3246 	return max_offs;
3247 }
3248 
3249 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3250 					       struct ieee80211_vif *vif,
3251 					       struct ieee80211_sta *sta)
3252 {
3253 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3254 
3255 	if (sta)
3256 		return iwl_mvm_sta_from_mac80211(sta);
3257 
3258 	/*
3259 	 * The device expects GTKs for station interfaces to be
3260 	 * installed as GTKs for the AP station. If we have no
3261 	 * station ID, then use AP's station ID.
3262 	 */
3263 	if (vif->type == NL80211_IFTYPE_STATION &&
3264 	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3265 		u8 sta_id = mvmvif->ap_sta_id;
3266 
3267 		sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3268 					    lockdep_is_held(&mvm->mutex));
3269 
3270 		/*
3271 		 * It is possible that the 'sta' parameter is NULL,
3272 		 * for example when a GTK is removed - the sta_id will then
3273 		 * be the AP ID, and no station was passed by mac80211.
3274 		 */
3275 		if (IS_ERR_OR_NULL(sta))
3276 			return NULL;
3277 
3278 		return iwl_mvm_sta_from_mac80211(sta);
3279 	}
3280 
3281 	return NULL;
3282 }
3283 
3284 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3285 				u32 sta_id,
3286 				struct ieee80211_key_conf *key, bool mcast,
3287 				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3288 				u8 key_offset, bool mfp)
3289 {
3290 	union {
3291 		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3292 		struct iwl_mvm_add_sta_key_cmd cmd;
3293 	} u = {};
3294 	__le16 key_flags;
3295 	int ret;
3296 	u32 status;
3297 	u16 keyidx;
3298 	u64 pn = 0;
3299 	int i, size;
3300 	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3301 				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3302 
3303 	if (sta_id == IWL_MVM_INVALID_STA)
3304 		return -EINVAL;
3305 
3306 	keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3307 		 STA_KEY_FLG_KEYID_MSK;
3308 	key_flags = cpu_to_le16(keyidx);
3309 	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3310 
3311 	switch (key->cipher) {
3312 	case WLAN_CIPHER_SUITE_TKIP:
3313 		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3314 		if (new_api) {
3315 			memcpy((void *)&u.cmd.tx_mic_key,
3316 			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3317 			       IWL_MIC_KEY_SIZE);
3318 
3319 			memcpy((void *)&u.cmd.rx_mic_key,
3320 			       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3321 			       IWL_MIC_KEY_SIZE);
3322 			pn = atomic64_read(&key->tx_pn);
3323 
3324 		} else {
3325 			u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3326 			for (i = 0; i < 5; i++)
3327 				u.cmd_v1.tkip_rx_ttak[i] =
3328 					cpu_to_le16(tkip_p1k[i]);
3329 		}
3330 		memcpy(u.cmd.common.key, key->key, key->keylen);
3331 		break;
3332 	case WLAN_CIPHER_SUITE_CCMP:
3333 		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3334 		memcpy(u.cmd.common.key, key->key, key->keylen);
3335 		if (new_api)
3336 			pn = atomic64_read(&key->tx_pn);
3337 		break;
3338 	case WLAN_CIPHER_SUITE_WEP104:
3339 		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3340 		/* fall through */
3341 	case WLAN_CIPHER_SUITE_WEP40:
3342 		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3343 		memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3344 		break;
3345 	case WLAN_CIPHER_SUITE_GCMP_256:
3346 		key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3347 		/* fall through */
3348 	case WLAN_CIPHER_SUITE_GCMP:
3349 		key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3350 		memcpy(u.cmd.common.key, key->key, key->keylen);
3351 		if (new_api)
3352 			pn = atomic64_read(&key->tx_pn);
3353 		break;
3354 	default:
3355 		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3356 		memcpy(u.cmd.common.key, key->key, key->keylen);
3357 	}
3358 
3359 	if (mcast)
3360 		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3361 	if (mfp)
3362 		key_flags |= cpu_to_le16(STA_KEY_MFP);
3363 
3364 	u.cmd.common.key_offset = key_offset;
3365 	u.cmd.common.key_flags = key_flags;
3366 	u.cmd.common.sta_id = sta_id;
3367 
3368 	if (new_api) {
3369 		u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3370 		size = sizeof(u.cmd);
3371 	} else {
3372 		size = sizeof(u.cmd_v1);
3373 	}
3374 
3375 	status = ADD_STA_SUCCESS;
3376 	if (cmd_flags & CMD_ASYNC)
3377 		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3378 					   &u.cmd);
3379 	else
3380 		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3381 						  &u.cmd, &status);
3382 
3383 	switch (status) {
3384 	case ADD_STA_SUCCESS:
3385 		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3386 		break;
3387 	default:
3388 		ret = -EIO;
3389 		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3390 		break;
3391 	}
3392 
3393 	return ret;
3394 }
3395 
3396 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3397 				 struct ieee80211_key_conf *keyconf,
3398 				 u8 sta_id, bool remove_key)
3399 {
3400 	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3401 
3402 	/* verify the key details match the required command's expectations */
3403 	if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3404 		    (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3405 		    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3406 		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3407 		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3408 		return -EINVAL;
3409 
3410 	if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3411 		    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3412 		return -EINVAL;
3413 
3414 	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3415 	igtk_cmd.sta_id = cpu_to_le32(sta_id);
3416 
3417 	if (remove_key) {
3418 		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3419 	} else {
3420 		struct ieee80211_key_seq seq;
3421 		const u8 *pn;
3422 
3423 		switch (keyconf->cipher) {
3424 		case WLAN_CIPHER_SUITE_AES_CMAC:
3425 			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3426 			break;
3427 		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3428 		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3429 			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3430 			break;
3431 		default:
3432 			return -EINVAL;
3433 		}
3434 
3435 		memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3436 		if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3437 			igtk_cmd.ctrl_flags |=
3438 				cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3439 		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3440 		pn = seq.aes_cmac.pn;
3441 		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3442 						       ((u64) pn[4] << 8) |
3443 						       ((u64) pn[3] << 16) |
3444 						       ((u64) pn[2] << 24) |
3445 						       ((u64) pn[1] << 32) |
3446 						       ((u64) pn[0] << 40));
3447 	}
3448 
3449 	IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3450 		       remove_key ? "removing" : "installing",
3451 		       igtk_cmd.sta_id);
3452 
3453 	if (!iwl_mvm_has_new_rx_api(mvm)) {
3454 		struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3455 			.ctrl_flags = igtk_cmd.ctrl_flags,
3456 			.key_id = igtk_cmd.key_id,
3457 			.sta_id = igtk_cmd.sta_id,
3458 			.receive_seq_cnt = igtk_cmd.receive_seq_cnt
3459 		};
3460 
3461 		memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3462 		       ARRAY_SIZE(igtk_cmd_v1.igtk));
3463 		return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3464 					    sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3465 	}
3466 	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3467 				    sizeof(igtk_cmd), &igtk_cmd);
3468 }
3469 
3470 
3471 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3472 				       struct ieee80211_vif *vif,
3473 				       struct ieee80211_sta *sta)
3474 {
3475 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3476 
3477 	if (sta)
3478 		return sta->addr;
3479 
3480 	if (vif->type == NL80211_IFTYPE_STATION &&
3481 	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3482 		u8 sta_id = mvmvif->ap_sta_id;
3483 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3484 						lockdep_is_held(&mvm->mutex));
3485 		return sta->addr;
3486 	}
3487 
3488 
3489 	return NULL;
3490 }
3491 
3492 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3493 				 struct ieee80211_vif *vif,
3494 				 struct ieee80211_sta *sta,
3495 				 struct ieee80211_key_conf *keyconf,
3496 				 u8 key_offset,
3497 				 bool mcast)
3498 {
3499 	int ret;
3500 	const u8 *addr;
3501 	struct ieee80211_key_seq seq;
3502 	u16 p1k[5];
3503 	u32 sta_id;
3504 	bool mfp = false;
3505 
3506 	if (sta) {
3507 		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3508 
3509 		sta_id = mvm_sta->sta_id;
3510 		mfp = sta->mfp;
3511 	} else if (vif->type == NL80211_IFTYPE_AP &&
3512 		   !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3513 		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3514 
3515 		sta_id = mvmvif->mcast_sta.sta_id;
3516 	} else {
3517 		IWL_ERR(mvm, "Failed to find station id\n");
3518 		return -EINVAL;
3519 	}
3520 
3521 	switch (keyconf->cipher) {
3522 	case WLAN_CIPHER_SUITE_TKIP:
3523 		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3524 		/* get phase 1 key from mac80211 */
3525 		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3526 		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3527 		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3528 					   seq.tkip.iv32, p1k, 0, key_offset,
3529 					   mfp);
3530 		break;
3531 	case WLAN_CIPHER_SUITE_CCMP:
3532 	case WLAN_CIPHER_SUITE_WEP40:
3533 	case WLAN_CIPHER_SUITE_WEP104:
3534 	case WLAN_CIPHER_SUITE_GCMP:
3535 	case WLAN_CIPHER_SUITE_GCMP_256:
3536 		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3537 					   0, NULL, 0, key_offset, mfp);
3538 		break;
3539 	default:
3540 		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3541 					   0, NULL, 0, key_offset, mfp);
3542 	}
3543 
3544 	return ret;
3545 }
3546 
3547 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3548 				    struct ieee80211_key_conf *keyconf,
3549 				    bool mcast)
3550 {
3551 	union {
3552 		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3553 		struct iwl_mvm_add_sta_key_cmd cmd;
3554 	} u = {};
3555 	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3556 				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3557 	__le16 key_flags;
3558 	int ret, size;
3559 	u32 status;
3560 
3561 	/* This is a valid situation for GTK removal */
3562 	if (sta_id == IWL_MVM_INVALID_STA)
3563 		return 0;
3564 
3565 	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3566 				 STA_KEY_FLG_KEYID_MSK);
3567 	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3568 	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3569 
3570 	if (mcast)
3571 		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3572 
3573 	/*
3574 	 * The fields assigned here are in the same location at the start
3575 	 * of the command, so we can do this union trick.
3576 	 */
3577 	u.cmd.common.key_flags = key_flags;
3578 	u.cmd.common.key_offset = keyconf->hw_key_idx;
3579 	u.cmd.common.sta_id = sta_id;
3580 
3581 	size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3582 
3583 	status = ADD_STA_SUCCESS;
3584 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3585 					  &status);
3586 
3587 	switch (status) {
3588 	case ADD_STA_SUCCESS:
3589 		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3590 		break;
3591 	default:
3592 		ret = -EIO;
3593 		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3594 		break;
3595 	}
3596 
3597 	return ret;
3598 }
3599 
3600 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3601 			struct ieee80211_vif *vif,
3602 			struct ieee80211_sta *sta,
3603 			struct ieee80211_key_conf *keyconf,
3604 			u8 key_offset)
3605 {
3606 	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3607 	struct iwl_mvm_sta *mvm_sta;
3608 	u8 sta_id = IWL_MVM_INVALID_STA;
3609 	int ret;
3610 	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3611 
3612 	lockdep_assert_held(&mvm->mutex);
3613 
3614 	if (vif->type != NL80211_IFTYPE_AP ||
3615 	    keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3616 		/* Get the station id from the mvm local station table */
3617 		mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3618 		if (!mvm_sta) {
3619 			IWL_ERR(mvm, "Failed to find station\n");
3620 			return -EINVAL;
3621 		}
3622 		sta_id = mvm_sta->sta_id;
3623 
3624 		/*
3625 		 * It is possible that the 'sta' parameter is NULL, and thus
3626 		 * there is a need to retrieve the sta from the local station
3627 		 * table.
3628 		 */
3629 		if (!sta) {
3630 			sta = rcu_dereference_protected(
3631 				mvm->fw_id_to_mac_id[sta_id],
3632 				lockdep_is_held(&mvm->mutex));
3633 			if (IS_ERR_OR_NULL(sta)) {
3634 				IWL_ERR(mvm, "Invalid station id\n");
3635 				return -EINVAL;
3636 			}
3637 		}
3638 
3639 		if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3640 			return -EINVAL;
3641 	} else {
3642 		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3643 
3644 		sta_id = mvmvif->mcast_sta.sta_id;
3645 	}
3646 
3647 	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3648 	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3649 	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3650 		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3651 		goto end;
3652 	}
3653 
3654 	/* If the key_offset is not pre-assigned, we need to find a
3655 	 * new offset to use.  In normal cases, the offset is not
3656 	 * pre-assigned, but during HW_RESTART we want to reuse the
3657 	 * same indices, so we pass them when this function is called.
3658 	 *
3659 	 * In D3 entry, we need to hardcoded the indices (because the
3660 	 * firmware hardcodes the PTK offset to 0).  In this case, we
3661 	 * need to make sure we don't overwrite the hw_key_idx in the
3662 	 * keyconf structure, because otherwise we cannot configure
3663 	 * the original ones back when resuming.
3664 	 */
3665 	if (key_offset == STA_KEY_IDX_INVALID) {
3666 		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3667 		if (key_offset == STA_KEY_IDX_INVALID)
3668 			return -ENOSPC;
3669 		keyconf->hw_key_idx = key_offset;
3670 	}
3671 
3672 	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3673 	if (ret)
3674 		goto end;
3675 
3676 	/*
3677 	 * For WEP, the same key is used for multicast and unicast. Upload it
3678 	 * again, using the same key offset, and now pointing the other one
3679 	 * to the same key slot (offset).
3680 	 * If this fails, remove the original as well.
3681 	 */
3682 	if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3683 	     keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3684 	    sta) {
3685 		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3686 					    key_offset, !mcast);
3687 		if (ret) {
3688 			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3689 			goto end;
3690 		}
3691 	}
3692 
3693 	__set_bit(key_offset, mvm->fw_key_table);
3694 
3695 end:
3696 	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3697 		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3698 		      sta ? sta->addr : zero_addr, ret);
3699 	return ret;
3700 }
3701 
3702 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3703 			   struct ieee80211_vif *vif,
3704 			   struct ieee80211_sta *sta,
3705 			   struct ieee80211_key_conf *keyconf)
3706 {
3707 	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3708 	struct iwl_mvm_sta *mvm_sta;
3709 	u8 sta_id = IWL_MVM_INVALID_STA;
3710 	int ret, i;
3711 
3712 	lockdep_assert_held(&mvm->mutex);
3713 
3714 	/* Get the station from the mvm local station table */
3715 	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3716 	if (mvm_sta)
3717 		sta_id = mvm_sta->sta_id;
3718 	else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3719 		sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3720 
3721 
3722 	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3723 		      keyconf->keyidx, sta_id);
3724 
3725 	if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3726 			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3727 			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3728 		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3729 
3730 	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3731 		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3732 			keyconf->hw_key_idx);
3733 		return -ENOENT;
3734 	}
3735 
3736 	/* track which key was deleted last */
3737 	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3738 		if (mvm->fw_key_deleted[i] < U8_MAX)
3739 			mvm->fw_key_deleted[i]++;
3740 	}
3741 	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3742 
3743 	if (sta && !mvm_sta) {
3744 		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3745 		return 0;
3746 	}
3747 
3748 	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3749 	if (ret)
3750 		return ret;
3751 
3752 	/* delete WEP key twice to get rid of (now useless) offset */
3753 	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3754 	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3755 		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3756 
3757 	return ret;
3758 }
3759 
3760 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3761 			     struct ieee80211_vif *vif,
3762 			     struct ieee80211_key_conf *keyconf,
3763 			     struct ieee80211_sta *sta, u32 iv32,
3764 			     u16 *phase1key)
3765 {
3766 	struct iwl_mvm_sta *mvm_sta;
3767 	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3768 	bool mfp = sta ? sta->mfp : false;
3769 
3770 	rcu_read_lock();
3771 
3772 	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3773 	if (WARN_ON_ONCE(!mvm_sta))
3774 		goto unlock;
3775 	iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3776 			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3777 			     mfp);
3778 
3779  unlock:
3780 	rcu_read_unlock();
3781 }
3782 
3783 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3784 				struct ieee80211_sta *sta)
3785 {
3786 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3787 	struct iwl_mvm_add_sta_cmd cmd = {
3788 		.add_modify = STA_MODE_MODIFY,
3789 		.sta_id = mvmsta->sta_id,
3790 		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
3791 		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3792 	};
3793 	int ret;
3794 
3795 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3796 				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3797 	if (ret)
3798 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3799 }
3800 
3801 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3802 				       struct ieee80211_sta *sta,
3803 				       enum ieee80211_frame_release_type reason,
3804 				       u16 cnt, u16 tids, bool more_data,
3805 				       bool single_sta_queue)
3806 {
3807 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3808 	struct iwl_mvm_add_sta_cmd cmd = {
3809 		.add_modify = STA_MODE_MODIFY,
3810 		.sta_id = mvmsta->sta_id,
3811 		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3812 		.sleep_tx_count = cpu_to_le16(cnt),
3813 		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3814 	};
3815 	int tid, ret;
3816 	unsigned long _tids = tids;
3817 
3818 	/* convert TIDs to ACs - we don't support TSPEC so that's OK
3819 	 * Note that this field is reserved and unused by firmware not
3820 	 * supporting GO uAPSD, so it's safe to always do this.
3821 	 */
3822 	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3823 		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3824 
3825 	/* If we're releasing frames from aggregation or dqa queues then check
3826 	 * if all the queues that we're releasing frames from, combined, have:
3827 	 *  - more frames than the service period, in which case more_data
3828 	 *    needs to be set
3829 	 *  - fewer than 'cnt' frames, in which case we need to adjust the
3830 	 *    firmware command (but do that unconditionally)
3831 	 */
3832 	if (single_sta_queue) {
3833 		int remaining = cnt;
3834 		int sleep_tx_count;
3835 
3836 		spin_lock_bh(&mvmsta->lock);
3837 		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3838 			struct iwl_mvm_tid_data *tid_data;
3839 			u16 n_queued;
3840 
3841 			tid_data = &mvmsta->tid_data[tid];
3842 
3843 			n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3844 			if (n_queued > remaining) {
3845 				more_data = true;
3846 				remaining = 0;
3847 				break;
3848 			}
3849 			remaining -= n_queued;
3850 		}
3851 		sleep_tx_count = cnt - remaining;
3852 		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3853 			mvmsta->sleep_tx_count = sleep_tx_count;
3854 		spin_unlock_bh(&mvmsta->lock);
3855 
3856 		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3857 		if (WARN_ON(cnt - remaining == 0)) {
3858 			ieee80211_sta_eosp(sta);
3859 			return;
3860 		}
3861 	}
3862 
3863 	/* Note: this is ignored by firmware not supporting GO uAPSD */
3864 	if (more_data)
3865 		cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3866 
3867 	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3868 		mvmsta->next_status_eosp = true;
3869 		cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3870 	} else {
3871 		cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3872 	}
3873 
3874 	/* block the Tx queues until the FW updated the sleep Tx count */
3875 	iwl_trans_block_txq_ptrs(mvm->trans, true);
3876 
3877 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3878 				   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3879 				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3880 	if (ret)
3881 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3882 }
3883 
3884 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3885 			   struct iwl_rx_cmd_buffer *rxb)
3886 {
3887 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
3888 	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3889 	struct ieee80211_sta *sta;
3890 	u32 sta_id = le32_to_cpu(notif->sta_id);
3891 
3892 	if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3893 		return;
3894 
3895 	rcu_read_lock();
3896 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3897 	if (!IS_ERR_OR_NULL(sta))
3898 		ieee80211_sta_eosp(sta);
3899 	rcu_read_unlock();
3900 }
3901 
3902 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3903 				   struct iwl_mvm_sta *mvmsta, bool disable)
3904 {
3905 	struct iwl_mvm_add_sta_cmd cmd = {
3906 		.add_modify = STA_MODE_MODIFY,
3907 		.sta_id = mvmsta->sta_id,
3908 		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3909 		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3910 		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3911 	};
3912 	int ret;
3913 
3914 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3915 				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3916 	if (ret)
3917 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3918 }
3919 
3920 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3921 				      struct ieee80211_sta *sta,
3922 				      bool disable)
3923 {
3924 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3925 
3926 	spin_lock_bh(&mvm_sta->lock);
3927 
3928 	if (mvm_sta->disable_tx == disable) {
3929 		spin_unlock_bh(&mvm_sta->lock);
3930 		return;
3931 	}
3932 
3933 	mvm_sta->disable_tx = disable;
3934 
3935 	/* Tell mac80211 to start/stop queuing tx for this station */
3936 	ieee80211_sta_block_awake(mvm->hw, sta, disable);
3937 
3938 	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3939 
3940 	spin_unlock_bh(&mvm_sta->lock);
3941 }
3942 
3943 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3944 					      struct iwl_mvm_vif *mvmvif,
3945 					      struct iwl_mvm_int_sta *sta,
3946 					      bool disable)
3947 {
3948 	u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3949 	struct iwl_mvm_add_sta_cmd cmd = {
3950 		.add_modify = STA_MODE_MODIFY,
3951 		.sta_id = sta->sta_id,
3952 		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3953 		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3954 		.mac_id_n_color = cpu_to_le32(id),
3955 	};
3956 	int ret;
3957 
3958 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3959 				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3960 	if (ret)
3961 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3962 }
3963 
3964 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3965 				       struct iwl_mvm_vif *mvmvif,
3966 				       bool disable)
3967 {
3968 	struct ieee80211_sta *sta;
3969 	struct iwl_mvm_sta *mvm_sta;
3970 	int i;
3971 
3972 	lockdep_assert_held(&mvm->mutex);
3973 
3974 	/* Block/unblock all the stations of the given mvmvif */
3975 	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3976 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3977 						lockdep_is_held(&mvm->mutex));
3978 		if (IS_ERR_OR_NULL(sta))
3979 			continue;
3980 
3981 		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3982 		if (mvm_sta->mac_id_n_color !=
3983 		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3984 			continue;
3985 
3986 		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3987 	}
3988 
3989 	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3990 		return;
3991 
3992 	/* Need to block/unblock also multicast station */
3993 	if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3994 		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3995 						  &mvmvif->mcast_sta, disable);
3996 
3997 	/*
3998 	 * Only unblock the broadcast station (FW blocks it for immediate
3999 	 * quiet, not the driver)
4000 	 */
4001 	if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4002 		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4003 						  &mvmvif->bcast_sta, disable);
4004 }
4005 
4006 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4007 {
4008 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4009 	struct iwl_mvm_sta *mvmsta;
4010 
4011 	rcu_read_lock();
4012 
4013 	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4014 
4015 	if (!WARN_ON(!mvmsta))
4016 		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4017 
4018 	rcu_read_unlock();
4019 }
4020 
4021 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4022 {
4023 	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4024 
4025 	/*
4026 	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4027 	 * to align the wrap around of ssn so we compare relevant values.
4028 	 */
4029 	if (mvm->trans->cfg->gen2)
4030 		sn &= 0xff;
4031 
4032 	return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4033 }
4034