xref: /openbmc/linux/drivers/net/wireless/intel/iwlwifi/mvm/utils.c (revision 45cc842d5b75ba8f9a958f2dd12b95c6dd0452bd)
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 #include <net/mac80211.h>
68 
69 #include "iwl-debug.h"
70 #include "iwl-io.h"
71 #include "iwl-prph.h"
72 #include "iwl-csr.h"
73 #include "mvm.h"
74 #include "fw/api/rs.h"
75 
76 /*
77  * Will return 0 even if the cmd failed when RFKILL is asserted unless
78  * CMD_WANT_SKB is set in cmd->flags.
79  */
80 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
81 {
82 	int ret;
83 
84 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
85 	if (WARN_ON(mvm->d3_test_active))
86 		return -EIO;
87 #endif
88 
89 	/*
90 	 * Synchronous commands from this op-mode must hold
91 	 * the mutex, this ensures we don't try to send two
92 	 * (or more) synchronous commands at a time.
93 	 */
94 	if (!(cmd->flags & CMD_ASYNC)) {
95 		lockdep_assert_held(&mvm->mutex);
96 		if (!(cmd->flags & CMD_SEND_IN_IDLE))
97 			iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
98 	}
99 
100 	ret = iwl_trans_send_cmd(mvm->trans, cmd);
101 
102 	if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
103 		iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
104 
105 	/*
106 	 * If the caller wants the SKB, then don't hide any problems, the
107 	 * caller might access the response buffer which will be NULL if
108 	 * the command failed.
109 	 */
110 	if (cmd->flags & CMD_WANT_SKB)
111 		return ret;
112 
113 	/* Silently ignore failures if RFKILL is asserted */
114 	if (!ret || ret == -ERFKILL)
115 		return 0;
116 	return ret;
117 }
118 
119 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
120 			 u32 flags, u16 len, const void *data)
121 {
122 	struct iwl_host_cmd cmd = {
123 		.id = id,
124 		.len = { len, },
125 		.data = { data, },
126 		.flags = flags,
127 	};
128 
129 	return iwl_mvm_send_cmd(mvm, &cmd);
130 }
131 
132 /*
133  * We assume that the caller set the status to the success value
134  */
135 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
136 			    u32 *status)
137 {
138 	struct iwl_rx_packet *pkt;
139 	struct iwl_cmd_response *resp;
140 	int ret, resp_len;
141 
142 	lockdep_assert_held(&mvm->mutex);
143 
144 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
145 	if (WARN_ON(mvm->d3_test_active))
146 		return -EIO;
147 #endif
148 
149 	/*
150 	 * Only synchronous commands can wait for status,
151 	 * we use WANT_SKB so the caller can't.
152 	 */
153 	if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
154 		      "cmd flags %x", cmd->flags))
155 		return -EINVAL;
156 
157 	cmd->flags |= CMD_WANT_SKB;
158 
159 	ret = iwl_trans_send_cmd(mvm->trans, cmd);
160 	if (ret == -ERFKILL) {
161 		/*
162 		 * The command failed because of RFKILL, don't update
163 		 * the status, leave it as success and return 0.
164 		 */
165 		return 0;
166 	} else if (ret) {
167 		return ret;
168 	}
169 
170 	pkt = cmd->resp_pkt;
171 
172 	resp_len = iwl_rx_packet_payload_len(pkt);
173 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
174 		ret = -EIO;
175 		goto out_free_resp;
176 	}
177 
178 	resp = (void *)pkt->data;
179 	*status = le32_to_cpu(resp->status);
180  out_free_resp:
181 	iwl_free_resp(cmd);
182 	return ret;
183 }
184 
185 /*
186  * We assume that the caller set the status to the sucess value
187  */
188 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
189 				const void *data, u32 *status)
190 {
191 	struct iwl_host_cmd cmd = {
192 		.id = id,
193 		.len = { len, },
194 		.data = { data, },
195 	};
196 
197 	return iwl_mvm_send_cmd_status(mvm, &cmd, status);
198 }
199 
200 #define IWL_DECLARE_RATE_INFO(r) \
201 	[IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
202 
203 /*
204  * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
205  */
206 static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
207 	IWL_DECLARE_RATE_INFO(1),
208 	IWL_DECLARE_RATE_INFO(2),
209 	IWL_DECLARE_RATE_INFO(5),
210 	IWL_DECLARE_RATE_INFO(11),
211 	IWL_DECLARE_RATE_INFO(6),
212 	IWL_DECLARE_RATE_INFO(9),
213 	IWL_DECLARE_RATE_INFO(12),
214 	IWL_DECLARE_RATE_INFO(18),
215 	IWL_DECLARE_RATE_INFO(24),
216 	IWL_DECLARE_RATE_INFO(36),
217 	IWL_DECLARE_RATE_INFO(48),
218 	IWL_DECLARE_RATE_INFO(54),
219 };
220 
221 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
222 					enum nl80211_band band)
223 {
224 	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
225 	int idx;
226 	int band_offset = 0;
227 
228 	/* Legacy rate format, search for match in table */
229 	if (band == NL80211_BAND_5GHZ)
230 		band_offset = IWL_FIRST_OFDM_RATE;
231 	for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
232 		if (fw_rate_idx_to_plcp[idx] == rate)
233 			return idx - band_offset;
234 
235 	return -1;
236 }
237 
238 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
239 {
240 	/* Get PLCP rate for tx_cmd->rate_n_flags */
241 	return fw_rate_idx_to_plcp[rate_idx];
242 }
243 
244 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
245 {
246 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
247 	struct iwl_error_resp *err_resp = (void *)pkt->data;
248 
249 	IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
250 		le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
251 	IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
252 		le16_to_cpu(err_resp->bad_cmd_seq_num),
253 		le32_to_cpu(err_resp->error_service));
254 	IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
255 		le64_to_cpu(err_resp->timestamp));
256 }
257 
258 /*
259  * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
260  * The parameter should also be a combination of ANT_[ABC].
261  */
262 u8 first_antenna(u8 mask)
263 {
264 	BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
265 	if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
266 		return BIT(0);
267 	return BIT(ffs(mask) - 1);
268 }
269 
270 /*
271  * Toggles between TX antennas to send the probe request on.
272  * Receives the bitmask of valid TX antennas and the *index* used
273  * for the last TX, and returns the next valid *index* to use.
274  * In order to set it in the tx_cmd, must do BIT(idx).
275  */
276 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
277 {
278 	u8 ind = last_idx;
279 	int i;
280 
281 	for (i = 0; i < MAX_RS_ANT_NUM; i++) {
282 		ind = (ind + 1) % MAX_RS_ANT_NUM;
283 		if (valid & BIT(ind))
284 			return ind;
285 	}
286 
287 	WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
288 	return last_idx;
289 }
290 
291 static const struct {
292 	const char *name;
293 	u8 num;
294 } advanced_lookup[] = {
295 	{ "NMI_INTERRUPT_WDG", 0x34 },
296 	{ "SYSASSERT", 0x35 },
297 	{ "UCODE_VERSION_MISMATCH", 0x37 },
298 	{ "BAD_COMMAND", 0x38 },
299 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
300 	{ "FATAL_ERROR", 0x3D },
301 	{ "NMI_TRM_HW_ERR", 0x46 },
302 	{ "NMI_INTERRUPT_TRM", 0x4C },
303 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
304 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
305 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
306 	{ "NMI_INTERRUPT_HOST", 0x66 },
307 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
308 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
309 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
310 	{ "ADVANCED_SYSASSERT", 0 },
311 };
312 
313 static const char *desc_lookup(u32 num)
314 {
315 	int i;
316 
317 	for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
318 		if (advanced_lookup[i].num == num)
319 			return advanced_lookup[i].name;
320 
321 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
322 	return advanced_lookup[i].name;
323 }
324 
325 /*
326  * Note: This structure is read from the device with IO accesses,
327  * and the reading already does the endian conversion. As it is
328  * read with u32-sized accesses, any members with a different size
329  * need to be ordered correctly though!
330  */
331 struct iwl_error_event_table_v1 {
332 	u32 valid;		/* (nonzero) valid, (0) log is empty */
333 	u32 error_id;		/* type of error */
334 	u32 pc;			/* program counter */
335 	u32 blink1;		/* branch link */
336 	u32 blink2;		/* branch link */
337 	u32 ilink1;		/* interrupt link */
338 	u32 ilink2;		/* interrupt link */
339 	u32 data1;		/* error-specific data */
340 	u32 data2;		/* error-specific data */
341 	u32 data3;		/* error-specific data */
342 	u32 bcon_time;		/* beacon timer */
343 	u32 tsf_low;		/* network timestamp function timer */
344 	u32 tsf_hi;		/* network timestamp function timer */
345 	u32 gp1;		/* GP1 timer register */
346 	u32 gp2;		/* GP2 timer register */
347 	u32 gp3;		/* GP3 timer register */
348 	u32 ucode_ver;		/* uCode version */
349 	u32 hw_ver;		/* HW Silicon version */
350 	u32 brd_ver;		/* HW board version */
351 	u32 log_pc;		/* log program counter */
352 	u32 frame_ptr;		/* frame pointer */
353 	u32 stack_ptr;		/* stack pointer */
354 	u32 hcmd;		/* last host command header */
355 	u32 isr0;		/* isr status register LMPM_NIC_ISR0:
356 				 * rxtx_flag */
357 	u32 isr1;		/* isr status register LMPM_NIC_ISR1:
358 				 * host_flag */
359 	u32 isr2;		/* isr status register LMPM_NIC_ISR2:
360 				 * enc_flag */
361 	u32 isr3;		/* isr status register LMPM_NIC_ISR3:
362 				 * time_flag */
363 	u32 isr4;		/* isr status register LMPM_NIC_ISR4:
364 				 * wico interrupt */
365 	u32 isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
366 	u32 wait_event;		/* wait event() caller address */
367 	u32 l2p_control;	/* L2pControlField */
368 	u32 l2p_duration;	/* L2pDurationField */
369 	u32 l2p_mhvalid;	/* L2pMhValidBits */
370 	u32 l2p_addr_match;	/* L2pAddrMatchStat */
371 	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on
372 				 * (LMPM_PMG_SEL) */
373 	u32 u_timestamp;	/* indicate when the date and time of the
374 				 * compilation */
375 	u32 flow_handler;	/* FH read/write pointers, RX credit */
376 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
377 
378 struct iwl_error_event_table {
379 	u32 valid;		/* (nonzero) valid, (0) log is empty */
380 	u32 error_id;		/* type of error */
381 	u32 trm_hw_status0;	/* TRM HW status */
382 	u32 trm_hw_status1;	/* TRM HW status */
383 	u32 blink2;		/* branch link */
384 	u32 ilink1;		/* interrupt link */
385 	u32 ilink2;		/* interrupt link */
386 	u32 data1;		/* error-specific data */
387 	u32 data2;		/* error-specific data */
388 	u32 data3;		/* error-specific data */
389 	u32 bcon_time;		/* beacon timer */
390 	u32 tsf_low;		/* network timestamp function timer */
391 	u32 tsf_hi;		/* network timestamp function timer */
392 	u32 gp1;		/* GP1 timer register */
393 	u32 gp2;		/* GP2 timer register */
394 	u32 fw_rev_type;	/* firmware revision type */
395 	u32 major;		/* uCode version major */
396 	u32 minor;		/* uCode version minor */
397 	u32 hw_ver;		/* HW Silicon version */
398 	u32 brd_ver;		/* HW board version */
399 	u32 log_pc;		/* log program counter */
400 	u32 frame_ptr;		/* frame pointer */
401 	u32 stack_ptr;		/* stack pointer */
402 	u32 hcmd;		/* last host command header */
403 	u32 isr0;		/* isr status register LMPM_NIC_ISR0:
404 				 * rxtx_flag */
405 	u32 isr1;		/* isr status register LMPM_NIC_ISR1:
406 				 * host_flag */
407 	u32 isr2;		/* isr status register LMPM_NIC_ISR2:
408 				 * enc_flag */
409 	u32 isr3;		/* isr status register LMPM_NIC_ISR3:
410 				 * time_flag */
411 	u32 isr4;		/* isr status register LMPM_NIC_ISR4:
412 				 * wico interrupt */
413 	u32 last_cmd_id;	/* last HCMD id handled by the firmware */
414 	u32 wait_event;		/* wait event() caller address */
415 	u32 l2p_control;	/* L2pControlField */
416 	u32 l2p_duration;	/* L2pDurationField */
417 	u32 l2p_mhvalid;	/* L2pMhValidBits */
418 	u32 l2p_addr_match;	/* L2pAddrMatchStat */
419 	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on
420 				 * (LMPM_PMG_SEL) */
421 	u32 u_timestamp;	/* indicate when the date and time of the
422 				 * compilation */
423 	u32 flow_handler;	/* FH read/write pointers, RX credit */
424 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
425 
426 /*
427  * UMAC error struct - relevant starting from family 8000 chip.
428  * Note: This structure is read from the device with IO accesses,
429  * and the reading already does the endian conversion. As it is
430  * read with u32-sized accesses, any members with a different size
431  * need to be ordered correctly though!
432  */
433 struct iwl_umac_error_event_table {
434 	u32 valid;		/* (nonzero) valid, (0) log is empty */
435 	u32 error_id;		/* type of error */
436 	u32 blink1;		/* branch link */
437 	u32 blink2;		/* branch link */
438 	u32 ilink1;		/* interrupt link */
439 	u32 ilink2;		/* interrupt link */
440 	u32 data1;		/* error-specific data */
441 	u32 data2;		/* error-specific data */
442 	u32 data3;		/* error-specific data */
443 	u32 umac_major;
444 	u32 umac_minor;
445 	u32 frame_pointer;	/* core register 27*/
446 	u32 stack_pointer;	/* core register 28 */
447 	u32 cmd_header;		/* latest host cmd sent to UMAC */
448 	u32 nic_isr_pref;	/* ISR status register */
449 } __packed;
450 
451 #define ERROR_START_OFFSET  (1 * sizeof(u32))
452 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
453 
454 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
455 {
456 	struct iwl_trans *trans = mvm->trans;
457 	struct iwl_umac_error_event_table table;
458 
459 	if (!mvm->support_umac_log)
460 		return;
461 
462 	iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
463 				 sizeof(table));
464 
465 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
466 		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
467 		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
468 			mvm->status, table.valid);
469 	}
470 
471 	IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
472 		desc_lookup(table.error_id));
473 	IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
474 	IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
475 	IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
476 	IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
477 	IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
478 	IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
479 	IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
480 	IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
481 	IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
482 	IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
483 	IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
484 	IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
485 	IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
486 }
487 
488 static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
489 {
490 	struct iwl_trans *trans = mvm->trans;
491 	struct iwl_error_event_table table;
492 	u32 val;
493 
494 	if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
495 		if (!base)
496 			base = mvm->fw->init_errlog_ptr;
497 	} else {
498 		if (!base)
499 			base = mvm->fw->inst_errlog_ptr;
500 	}
501 
502 	if (base < 0x400000) {
503 		IWL_ERR(mvm,
504 			"Not valid error log pointer 0x%08X for %s uCode\n",
505 			base,
506 			(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
507 			? "Init" : "RT");
508 		return;
509 	}
510 
511 	/* check if there is a HW error */
512 	val = iwl_trans_read_mem32(trans, base);
513 	if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
514 		int err;
515 
516 		IWL_ERR(trans, "HW error, resetting before reading\n");
517 
518 		/* reset the device */
519 		iwl_trans_sw_reset(trans);
520 
521 		/* set INIT_DONE flag */
522 		iwl_set_bit(trans, CSR_GP_CNTRL,
523 			    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
524 
525 		/* and wait for clock stabilization */
526 		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
527 			udelay(2);
528 
529 		err = iwl_poll_bit(trans, CSR_GP_CNTRL,
530 				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
531 				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
532 				   25000);
533 		if (err < 0) {
534 			IWL_DEBUG_INFO(trans,
535 				       "Failed to reset the card for the dump\n");
536 			return;
537 		}
538 	}
539 
540 	iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
541 
542 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
543 		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
544 		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
545 			mvm->status, table.valid);
546 	}
547 
548 	/* Do not change this output - scripts rely on it */
549 
550 	IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
551 
552 	trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
553 				      table.data1, table.data2, table.data3,
554 				      table.blink2, table.ilink1,
555 				      table.ilink2, table.bcon_time, table.gp1,
556 				      table.gp2, table.fw_rev_type, table.major,
557 				      table.minor, table.hw_ver, table.brd_ver);
558 	IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
559 		desc_lookup(table.error_id));
560 	IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
561 	IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
562 	IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
563 	IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
564 	IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
565 	IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
566 	IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
567 	IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
568 	IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
569 	IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
570 	IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
571 	IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
572 	IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
573 	IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
574 	IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
575 	IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
576 	IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
577 	IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
578 	IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
579 	IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
580 	IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
581 	IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
582 	IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
583 	IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
584 	IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
585 	IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
586 	IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
587 	IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
588 	IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
589 	IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
590 	IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
591 	IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
592 	IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
593 }
594 
595 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
596 {
597 	if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
598 		IWL_ERR(mvm,
599 			"DEVICE_ENABLED bit is not set. Aborting dump.\n");
600 		return;
601 	}
602 
603 	iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
604 
605 	if (mvm->error_event_table[1])
606 		iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
607 
608 	iwl_mvm_dump_umac_error_log(mvm);
609 }
610 
611 int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
612 {
613 	int i;
614 
615 	lockdep_assert_held(&mvm->queue_info_lock);
616 
617 	/* This should not be hit with new TX path */
618 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
619 		return -ENOSPC;
620 
621 	/* Start by looking for a free queue */
622 	for (i = minq; i <= maxq; i++)
623 		if (mvm->queue_info[i].hw_queue_refcount == 0 &&
624 		    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
625 			return i;
626 
627 	/*
628 	 * If no free queue found - settle for an inactive one to reconfigure
629 	 * Make sure that the inactive queue either already belongs to this STA,
630 	 * or that if it belongs to another one - it isn't the reserved queue
631 	 */
632 	for (i = minq; i <= maxq; i++)
633 		if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
634 		    (sta_id == mvm->queue_info[i].ra_sta_id ||
635 		     !mvm->queue_info[i].reserved))
636 			return i;
637 
638 	return -ENOSPC;
639 }
640 
641 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
642 			 int tid, int frame_limit, u16 ssn)
643 {
644 	struct iwl_scd_txq_cfg_cmd cmd = {
645 		.scd_queue = queue,
646 		.action = SCD_CFG_ENABLE_QUEUE,
647 		.window = frame_limit,
648 		.sta_id = sta_id,
649 		.ssn = cpu_to_le16(ssn),
650 		.tx_fifo = fifo,
651 		.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
652 			      queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
653 		.tid = tid,
654 	};
655 	int ret;
656 
657 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
658 		return -EINVAL;
659 
660 	spin_lock_bh(&mvm->queue_info_lock);
661 	if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
662 		 "Trying to reconfig unallocated queue %d\n", queue)) {
663 		spin_unlock_bh(&mvm->queue_info_lock);
664 		return -ENXIO;
665 	}
666 	spin_unlock_bh(&mvm->queue_info_lock);
667 
668 	IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
669 
670 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
671 	WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
672 		  queue, fifo, ret);
673 
674 	return ret;
675 }
676 
677 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
678 				       int mac80211_queue, u8 sta_id, u8 tid)
679 {
680 	bool enable_queue = true;
681 
682 	spin_lock_bh(&mvm->queue_info_lock);
683 
684 	/* Make sure this TID isn't already enabled */
685 	if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
686 		spin_unlock_bh(&mvm->queue_info_lock);
687 		IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
688 			queue, tid);
689 		return false;
690 	}
691 
692 	/* Update mappings and refcounts */
693 	if (mvm->queue_info[queue].hw_queue_refcount > 0)
694 		enable_queue = false;
695 
696 	if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
697 		WARN(mac80211_queue >=
698 		     BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
699 		     "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
700 		     mac80211_queue, queue, sta_id, tid);
701 		mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
702 	}
703 
704 	mvm->queue_info[queue].hw_queue_refcount++;
705 	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
706 	mvm->queue_info[queue].ra_sta_id = sta_id;
707 
708 	if (enable_queue) {
709 		if (tid != IWL_MAX_TID_COUNT)
710 			mvm->queue_info[queue].mac80211_ac =
711 				tid_to_mac80211_ac[tid];
712 		else
713 			mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
714 
715 		mvm->queue_info[queue].txq_tid = tid;
716 	}
717 
718 	IWL_DEBUG_TX_QUEUES(mvm,
719 			    "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
720 			    queue, mvm->queue_info[queue].hw_queue_refcount,
721 			    mvm->hw_queue_to_mac80211[queue]);
722 
723 	spin_unlock_bh(&mvm->queue_info_lock);
724 
725 	return enable_queue;
726 }
727 
728 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
729 			    u8 sta_id, u8 tid, unsigned int timeout)
730 {
731 	struct iwl_tx_queue_cfg_cmd cmd = {
732 		.flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
733 		.sta_id = sta_id,
734 		.tid = tid,
735 	};
736 	int queue;
737 
738 	if (cmd.tid == IWL_MAX_TID_COUNT)
739 		cmd.tid = IWL_MGMT_TID;
740 	queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
741 				    SCD_QUEUE_CFG, timeout);
742 
743 	if (queue < 0) {
744 		IWL_DEBUG_TX_QUEUES(mvm,
745 				    "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
746 				    sta_id, tid, queue);
747 		return queue;
748 	}
749 
750 	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
751 			    queue, sta_id, tid);
752 
753 	mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
754 	IWL_DEBUG_TX_QUEUES(mvm,
755 			    "Enabling TXQ #%d (mac80211 map:0x%x)\n",
756 			    queue, mvm->hw_queue_to_mac80211[queue]);
757 
758 	return queue;
759 }
760 
761 bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
762 			u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
763 			unsigned int wdg_timeout)
764 {
765 	struct iwl_scd_txq_cfg_cmd cmd = {
766 		.scd_queue = queue,
767 		.action = SCD_CFG_ENABLE_QUEUE,
768 		.window = cfg->frame_limit,
769 		.sta_id = cfg->sta_id,
770 		.ssn = cpu_to_le16(ssn),
771 		.tx_fifo = cfg->fifo,
772 		.aggregate = cfg->aggregate,
773 		.tid = cfg->tid,
774 	};
775 	bool inc_ssn;
776 
777 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
778 		return false;
779 
780 	/* Send the enabling command if we need to */
781 	if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
782 					cfg->sta_id, cfg->tid))
783 		return false;
784 
785 	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
786 					   NULL, wdg_timeout);
787 	if (inc_ssn)
788 		le16_add_cpu(&cmd.ssn, 1);
789 
790 	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
791 	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
792 
793 	return inc_ssn;
794 }
795 
796 int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
797 			u8 tid, u8 flags)
798 {
799 	struct iwl_scd_txq_cfg_cmd cmd = {
800 		.scd_queue = queue,
801 		.action = SCD_CFG_DISABLE_QUEUE,
802 	};
803 	bool remove_mac_queue = true;
804 	int ret;
805 
806 	if (iwl_mvm_has_new_tx_api(mvm)) {
807 		spin_lock_bh(&mvm->queue_info_lock);
808 		mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue);
809 		spin_unlock_bh(&mvm->queue_info_lock);
810 
811 		iwl_trans_txq_free(mvm->trans, queue);
812 
813 		return 0;
814 	}
815 
816 	spin_lock_bh(&mvm->queue_info_lock);
817 
818 	if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
819 		spin_unlock_bh(&mvm->queue_info_lock);
820 		return 0;
821 	}
822 
823 	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
824 
825 	/*
826 	 * If there is another TID with the same AC - don't remove the MAC queue
827 	 * from the mapping
828 	 */
829 	if (tid < IWL_MAX_TID_COUNT) {
830 		unsigned long tid_bitmap =
831 			mvm->queue_info[queue].tid_bitmap;
832 		int ac = tid_to_mac80211_ac[tid];
833 		int i;
834 
835 		for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
836 			if (tid_to_mac80211_ac[i] == ac)
837 				remove_mac_queue = false;
838 		}
839 	}
840 
841 	if (remove_mac_queue)
842 		mvm->hw_queue_to_mac80211[queue] &=
843 			~BIT(mac80211_queue);
844 	mvm->queue_info[queue].hw_queue_refcount--;
845 
846 	cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
847 		SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
848 	if (cmd.action == SCD_CFG_DISABLE_QUEUE)
849 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
850 
851 	IWL_DEBUG_TX_QUEUES(mvm,
852 			    "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
853 			    queue,
854 			    mvm->queue_info[queue].hw_queue_refcount,
855 			    mvm->hw_queue_to_mac80211[queue]);
856 
857 	/* If the queue is still enabled - nothing left to do in this func */
858 	if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
859 		spin_unlock_bh(&mvm->queue_info_lock);
860 		return 0;
861 	}
862 
863 	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
864 	cmd.tid = mvm->queue_info[queue].txq_tid;
865 
866 	/* Make sure queue info is correct even though we overwrite it */
867 	WARN(mvm->queue_info[queue].hw_queue_refcount ||
868 	     mvm->queue_info[queue].tid_bitmap ||
869 	     mvm->hw_queue_to_mac80211[queue],
870 	     "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
871 	     queue, mvm->queue_info[queue].hw_queue_refcount,
872 	     mvm->hw_queue_to_mac80211[queue],
873 	     mvm->queue_info[queue].tid_bitmap);
874 
875 	/* If we are here - the queue is freed and we can zero out these vals */
876 	mvm->queue_info[queue].hw_queue_refcount = 0;
877 	mvm->queue_info[queue].tid_bitmap = 0;
878 	mvm->hw_queue_to_mac80211[queue] = 0;
879 
880 	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
881 	mvm->queue_info[queue].reserved = false;
882 
883 	spin_unlock_bh(&mvm->queue_info_lock);
884 
885 	iwl_trans_txq_disable(mvm->trans, queue, false);
886 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
887 				   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
888 
889 	if (ret)
890 		IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
891 			queue, ret);
892 	return ret;
893 }
894 
895 /**
896  * iwl_mvm_send_lq_cmd() - Send link quality command
897  * @init: This command is sent as part of station initialization right
898  *        after station has been added.
899  *
900  * The link quality command is sent as the last step of station creation.
901  * This is the special case in which init is set and we call a callback in
902  * this case to clear the state indicating that station creation is in
903  * progress.
904  */
905 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
906 {
907 	struct iwl_host_cmd cmd = {
908 		.id = LQ_CMD,
909 		.len = { sizeof(struct iwl_lq_cmd), },
910 		.flags = init ? 0 : CMD_ASYNC,
911 		.data = { lq, },
912 	};
913 
914 	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
915 		    iwl_mvm_has_tlc_offload(mvm)))
916 		return -EINVAL;
917 
918 	return iwl_mvm_send_cmd(mvm, &cmd);
919 }
920 
921 /**
922  * iwl_mvm_update_smps - Get a request to change the SMPS mode
923  * @req_type: The part of the driver who call for a change.
924  * @smps_requests: The request to change the SMPS mode.
925  *
926  * Get a requst to change the SMPS mode,
927  * and change it according to all other requests in the driver.
928  */
929 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
930 			 enum iwl_mvm_smps_type_request req_type,
931 			 enum ieee80211_smps_mode smps_request)
932 {
933 	struct iwl_mvm_vif *mvmvif;
934 	enum ieee80211_smps_mode smps_mode;
935 	int i;
936 
937 	lockdep_assert_held(&mvm->mutex);
938 
939 	/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
940 	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
941 		return;
942 
943 	if (vif->type == NL80211_IFTYPE_AP)
944 		smps_mode = IEEE80211_SMPS_OFF;
945 	else
946 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
947 
948 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
949 	mvmvif->smps_requests[req_type] = smps_request;
950 	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
951 		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
952 			smps_mode = IEEE80211_SMPS_STATIC;
953 			break;
954 		}
955 		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
956 			smps_mode = IEEE80211_SMPS_DYNAMIC;
957 	}
958 
959 	ieee80211_request_smps(vif, smps_mode);
960 }
961 
962 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
963 {
964 	struct iwl_statistics_cmd scmd = {
965 		.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
966 	};
967 	struct iwl_host_cmd cmd = {
968 		.id = STATISTICS_CMD,
969 		.len[0] = sizeof(scmd),
970 		.data[0] = &scmd,
971 		.flags = CMD_WANT_SKB,
972 	};
973 	int ret;
974 
975 	ret = iwl_mvm_send_cmd(mvm, &cmd);
976 	if (ret)
977 		return ret;
978 
979 	iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
980 	iwl_free_resp(&cmd);
981 
982 	if (clear)
983 		iwl_mvm_accu_radio_stats(mvm);
984 
985 	return 0;
986 }
987 
988 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
989 {
990 	mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
991 	mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
992 	mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
993 	mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
994 }
995 
996 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
997 				   struct ieee80211_vif *vif)
998 {
999 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1000 	bool *result = _data;
1001 	int i;
1002 
1003 	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
1004 		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
1005 		    mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
1006 			*result = false;
1007 	}
1008 }
1009 
1010 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
1011 {
1012 	bool result = true;
1013 
1014 	lockdep_assert_held(&mvm->mutex);
1015 
1016 	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
1017 		return false;
1018 
1019 	if (mvm->cfg->rx_with_siso_diversity)
1020 		return false;
1021 
1022 	ieee80211_iterate_active_interfaces_atomic(
1023 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1024 			iwl_mvm_diversity_iter, &result);
1025 
1026 	return result;
1027 }
1028 
1029 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1030 			       bool prev)
1031 {
1032 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1033 	int res;
1034 	bool low_latency;
1035 
1036 	lockdep_assert_held(&mvm->mutex);
1037 
1038 	low_latency = iwl_mvm_vif_low_latency(mvmvif);
1039 
1040 	if (low_latency == prev)
1041 		return 0;
1042 
1043 	if (fw_has_capa(&mvm->fw->ucode_capa,
1044 			IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
1045 		struct iwl_mac_low_latency_cmd cmd = {
1046 			.mac_id = cpu_to_le32(mvmvif->id)
1047 		};
1048 
1049 		if (low_latency) {
1050 			/* currently we don't care about the direction */
1051 			cmd.low_latency_rx = 1;
1052 			cmd.low_latency_tx = 1;
1053 		}
1054 		res = iwl_mvm_send_cmd_pdu(mvm,
1055 					   iwl_cmd_id(LOW_LATENCY_CMD,
1056 						      MAC_CONF_GROUP, 0),
1057 					   0, sizeof(cmd), &cmd);
1058 		if (res)
1059 			IWL_ERR(mvm, "Failed to send low latency command\n");
1060 	}
1061 
1062 	res = iwl_mvm_update_quotas(mvm, false, NULL);
1063 	if (res)
1064 		return res;
1065 
1066 	iwl_mvm_bt_coex_vif_change(mvm);
1067 
1068 	return iwl_mvm_power_update_mac(mvm);
1069 }
1070 
1071 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
1072 {
1073 	bool *result = _data;
1074 
1075 	if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
1076 		*result = true;
1077 }
1078 
1079 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
1080 {
1081 	bool result = false;
1082 
1083 	ieee80211_iterate_active_interfaces_atomic(
1084 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1085 			iwl_mvm_ll_iter, &result);
1086 
1087 	return result;
1088 }
1089 
1090 struct iwl_bss_iter_data {
1091 	struct ieee80211_vif *vif;
1092 	bool error;
1093 };
1094 
1095 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
1096 				       struct ieee80211_vif *vif)
1097 {
1098 	struct iwl_bss_iter_data *data = _data;
1099 
1100 	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
1101 		return;
1102 
1103 	if (data->vif) {
1104 		data->error = true;
1105 		return;
1106 	}
1107 
1108 	data->vif = vif;
1109 }
1110 
1111 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
1112 {
1113 	struct iwl_bss_iter_data bss_iter_data = {};
1114 
1115 	ieee80211_iterate_active_interfaces_atomic(
1116 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1117 		iwl_mvm_bss_iface_iterator, &bss_iter_data);
1118 
1119 	if (bss_iter_data.error) {
1120 		IWL_ERR(mvm, "More than one managed interface active!\n");
1121 		return ERR_PTR(-EINVAL);
1122 	}
1123 
1124 	return bss_iter_data.vif;
1125 }
1126 
1127 struct iwl_sta_iter_data {
1128 	bool assoc;
1129 };
1130 
1131 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
1132 				       struct ieee80211_vif *vif)
1133 {
1134 	struct iwl_sta_iter_data *data = _data;
1135 
1136 	if (vif->type != NL80211_IFTYPE_STATION)
1137 		return;
1138 
1139 	if (vif->bss_conf.assoc)
1140 		data->assoc = true;
1141 }
1142 
1143 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
1144 {
1145 	struct iwl_sta_iter_data data = {
1146 		.assoc = false,
1147 	};
1148 
1149 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1150 						   IEEE80211_IFACE_ITER_NORMAL,
1151 						   iwl_mvm_sta_iface_iterator,
1152 						   &data);
1153 	return data.assoc;
1154 }
1155 
1156 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1157 				    struct ieee80211_vif *vif,
1158 				    bool tdls, bool cmd_q)
1159 {
1160 	struct iwl_fw_dbg_trigger_tlv *trigger;
1161 	struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
1162 	unsigned int default_timeout =
1163 		cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
1164 
1165 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
1166 		/*
1167 		 * We can't know when the station is asleep or awake, so we
1168 		 * must disable the queue hang detection.
1169 		 */
1170 		if (fw_has_capa(&mvm->fw->ucode_capa,
1171 				IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
1172 		    vif && vif->type == NL80211_IFTYPE_AP)
1173 			return IWL_WATCHDOG_DISABLED;
1174 		return iwlmvm_mod_params.tfd_q_hang_detect ?
1175 			default_timeout : IWL_WATCHDOG_DISABLED;
1176 	}
1177 
1178 	trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
1179 	txq_timer = (void *)trigger->data;
1180 
1181 	if (tdls)
1182 		return le32_to_cpu(txq_timer->tdls);
1183 
1184 	if (cmd_q)
1185 		return le32_to_cpu(txq_timer->command_queue);
1186 
1187 	if (WARN_ON(!vif))
1188 		return default_timeout;
1189 
1190 	switch (ieee80211_vif_type_p2p(vif)) {
1191 	case NL80211_IFTYPE_ADHOC:
1192 		return le32_to_cpu(txq_timer->ibss);
1193 	case NL80211_IFTYPE_STATION:
1194 		return le32_to_cpu(txq_timer->bss);
1195 	case NL80211_IFTYPE_AP:
1196 		return le32_to_cpu(txq_timer->softap);
1197 	case NL80211_IFTYPE_P2P_CLIENT:
1198 		return le32_to_cpu(txq_timer->p2p_client);
1199 	case NL80211_IFTYPE_P2P_GO:
1200 		return le32_to_cpu(txq_timer->p2p_go);
1201 	case NL80211_IFTYPE_P2P_DEVICE:
1202 		return le32_to_cpu(txq_timer->p2p_device);
1203 	case NL80211_IFTYPE_MONITOR:
1204 		return default_timeout;
1205 	default:
1206 		WARN_ON(1);
1207 		return mvm->cfg->base_params->wd_timeout;
1208 	}
1209 }
1210 
1211 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1212 			     const char *errmsg)
1213 {
1214 	struct iwl_fw_dbg_trigger_tlv *trig;
1215 	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
1216 
1217 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
1218 		goto out;
1219 
1220 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
1221 	trig_mlme = (void *)trig->data;
1222 	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
1223 					   ieee80211_vif_to_wdev(vif), trig))
1224 		goto out;
1225 
1226 	if (trig_mlme->stop_connection_loss &&
1227 	    --trig_mlme->stop_connection_loss)
1228 		goto out;
1229 
1230 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
1231 
1232 out:
1233 	ieee80211_connection_loss(vif);
1234 }
1235 
1236 /*
1237  * Remove inactive TIDs of a given queue.
1238  * If all queue TIDs are inactive - mark the queue as inactive
1239  * If only some the queue TIDs are inactive - unmap them from the queue
1240  */
1241 static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1242 					 struct iwl_mvm_sta *mvmsta, int queue,
1243 					 unsigned long tid_bitmap)
1244 {
1245 	int tid;
1246 
1247 	lockdep_assert_held(&mvmsta->lock);
1248 	lockdep_assert_held(&mvm->queue_info_lock);
1249 
1250 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1251 		return;
1252 
1253 	/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1254 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1255 		/* If some TFDs are still queued - don't mark TID as inactive */
1256 		if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1257 			tid_bitmap &= ~BIT(tid);
1258 
1259 		/* Don't mark as inactive any TID that has an active BA */
1260 		if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1261 			tid_bitmap &= ~BIT(tid);
1262 	}
1263 
1264 	/* If all TIDs in the queue are inactive - mark queue as inactive. */
1265 	if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1266 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
1267 
1268 		for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
1269 			mvmsta->tid_data[tid].is_tid_active = false;
1270 
1271 		IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
1272 				    queue);
1273 		return;
1274 	}
1275 
1276 	/*
1277 	 * If we are here, this is a shared queue and not all TIDs timed-out.
1278 	 * Remove the ones that did.
1279 	 */
1280 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1281 		int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1282 
1283 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1284 		mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1285 		mvm->queue_info[queue].hw_queue_refcount--;
1286 		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1287 		mvmsta->tid_data[tid].is_tid_active = false;
1288 
1289 		IWL_DEBUG_TX_QUEUES(mvm,
1290 				    "Removing inactive TID %d from shared Q:%d\n",
1291 				    tid, queue);
1292 	}
1293 
1294 	IWL_DEBUG_TX_QUEUES(mvm,
1295 			    "TXQ #%d left with tid bitmap 0x%x\n", queue,
1296 			    mvm->queue_info[queue].tid_bitmap);
1297 
1298 	/*
1299 	 * There may be different TIDs with the same mac queues, so make
1300 	 * sure all TIDs have existing corresponding mac queues enabled
1301 	 */
1302 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1303 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1304 		mvm->hw_queue_to_mac80211[queue] |=
1305 			BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1306 	}
1307 
1308 	/* If the queue is marked as shared - "unshare" it */
1309 	if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
1310 	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1311 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
1312 		IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1313 				    queue);
1314 	}
1315 }
1316 
1317 void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
1318 {
1319 	unsigned long timeout_queues_map = 0;
1320 	unsigned long now = jiffies;
1321 	int i;
1322 
1323 	if (iwl_mvm_has_new_tx_api(mvm))
1324 		return;
1325 
1326 	spin_lock_bh(&mvm->queue_info_lock);
1327 	for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
1328 		if (mvm->queue_info[i].hw_queue_refcount > 0)
1329 			timeout_queues_map |= BIT(i);
1330 	spin_unlock_bh(&mvm->queue_info_lock);
1331 
1332 	rcu_read_lock();
1333 
1334 	/*
1335 	 * If a queue time outs - mark it as INACTIVE (don't remove right away
1336 	 * if we don't have to.) This is an optimization in case traffic comes
1337 	 * later, and we don't HAVE to use a currently-inactive queue
1338 	 */
1339 	for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
1340 		struct ieee80211_sta *sta;
1341 		struct iwl_mvm_sta *mvmsta;
1342 		u8 sta_id;
1343 		int tid;
1344 		unsigned long inactive_tid_bitmap = 0;
1345 		unsigned long queue_tid_bitmap;
1346 
1347 		spin_lock_bh(&mvm->queue_info_lock);
1348 		queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1349 
1350 		/* If TXQ isn't in active use anyway - nothing to do here... */
1351 		if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1352 		    mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
1353 			spin_unlock_bh(&mvm->queue_info_lock);
1354 			continue;
1355 		}
1356 
1357 		/* Check to see if there are inactive TIDs on this queue */
1358 		for_each_set_bit(tid, &queue_tid_bitmap,
1359 				 IWL_MAX_TID_COUNT + 1) {
1360 			if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1361 				       IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1362 				continue;
1363 
1364 			inactive_tid_bitmap |= BIT(tid);
1365 		}
1366 		spin_unlock_bh(&mvm->queue_info_lock);
1367 
1368 		/* If all TIDs are active - finish check on this queue */
1369 		if (!inactive_tid_bitmap)
1370 			continue;
1371 
1372 		/*
1373 		 * If we are here - the queue hadn't been served recently and is
1374 		 * in use
1375 		 */
1376 
1377 		sta_id = mvm->queue_info[i].ra_sta_id;
1378 		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1379 
1380 		/*
1381 		 * If the STA doesn't exist anymore, it isn't an error. It could
1382 		 * be that it was removed since getting the queues, and in this
1383 		 * case it should've inactivated its queues anyway.
1384 		 */
1385 		if (IS_ERR_OR_NULL(sta))
1386 			continue;
1387 
1388 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1389 
1390 		spin_lock_bh(&mvmsta->lock);
1391 		spin_lock(&mvm->queue_info_lock);
1392 		iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1393 					     inactive_tid_bitmap);
1394 		spin_unlock(&mvm->queue_info_lock);
1395 		spin_unlock_bh(&mvmsta->lock);
1396 	}
1397 
1398 	rcu_read_unlock();
1399 }
1400 
1401 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
1402 					  struct ieee80211_vif *vif,
1403 					  const struct ieee80211_sta *sta,
1404 					  u16 tid)
1405 {
1406 	struct iwl_fw_dbg_trigger_tlv *trig;
1407 	struct iwl_fw_dbg_trigger_ba *ba_trig;
1408 
1409 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
1410 		return;
1411 
1412 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
1413 	ba_trig = (void *)trig->data;
1414 	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
1415 					   ieee80211_vif_to_wdev(vif), trig))
1416 		return;
1417 
1418 	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
1419 		return;
1420 
1421 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1422 				"Frame from %pM timed out, tid %d",
1423 				sta->addr, tid);
1424 }
1425 
1426 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
1427 {
1428 	bool ps_disabled;
1429 
1430 	lockdep_assert_held(&mvm->mutex);
1431 
1432 	/* Disable power save when reading GP2 */
1433 	ps_disabled = mvm->ps_disabled;
1434 	if (!ps_disabled) {
1435 		mvm->ps_disabled = true;
1436 		iwl_mvm_power_update_device(mvm);
1437 	}
1438 
1439 	*gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
1440 	*boottime = ktime_get_boot_ns();
1441 
1442 	if (!ps_disabled) {
1443 		mvm->ps_disabled = ps_disabled;
1444 		iwl_mvm_power_update_device(mvm);
1445 	}
1446 }
1447