1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
33  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <net/mac80211.h>
65 
66 #include "iwl-debug.h"
67 #include "iwl-io.h"
68 #include "iwl-prph.h"
69 #include "iwl-csr.h"
70 #include "mvm.h"
71 #include "fw/api/rs.h"
72 
73 /*
74  * Will return 0 even if the cmd failed when RFKILL is asserted unless
75  * CMD_WANT_SKB is set in cmd->flags.
76  */
77 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
78 {
79 	int ret;
80 
81 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
82 	if (WARN_ON(mvm->d3_test_active))
83 		return -EIO;
84 #endif
85 
86 	/*
87 	 * Synchronous commands from this op-mode must hold
88 	 * the mutex, this ensures we don't try to send two
89 	 * (or more) synchronous commands at a time.
90 	 */
91 	if (!(cmd->flags & CMD_ASYNC))
92 		lockdep_assert_held(&mvm->mutex);
93 
94 	ret = iwl_trans_send_cmd(mvm->trans, cmd);
95 
96 	/*
97 	 * If the caller wants the SKB, then don't hide any problems, the
98 	 * caller might access the response buffer which will be NULL if
99 	 * the command failed.
100 	 */
101 	if (cmd->flags & CMD_WANT_SKB)
102 		return ret;
103 
104 	/* Silently ignore failures if RFKILL is asserted */
105 	if (!ret || ret == -ERFKILL)
106 		return 0;
107 	return ret;
108 }
109 
110 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
111 			 u32 flags, u16 len, const void *data)
112 {
113 	struct iwl_host_cmd cmd = {
114 		.id = id,
115 		.len = { len, },
116 		.data = { data, },
117 		.flags = flags,
118 	};
119 
120 	return iwl_mvm_send_cmd(mvm, &cmd);
121 }
122 
123 /*
124  * We assume that the caller set the status to the success value
125  */
126 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
127 			    u32 *status)
128 {
129 	struct iwl_rx_packet *pkt;
130 	struct iwl_cmd_response *resp;
131 	int ret, resp_len;
132 
133 	lockdep_assert_held(&mvm->mutex);
134 
135 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
136 	if (WARN_ON(mvm->d3_test_active))
137 		return -EIO;
138 #endif
139 
140 	/*
141 	 * Only synchronous commands can wait for status,
142 	 * we use WANT_SKB so the caller can't.
143 	 */
144 	if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
145 		      "cmd flags %x", cmd->flags))
146 		return -EINVAL;
147 
148 	cmd->flags |= CMD_WANT_SKB;
149 
150 	ret = iwl_trans_send_cmd(mvm->trans, cmd);
151 	if (ret == -ERFKILL) {
152 		/*
153 		 * The command failed because of RFKILL, don't update
154 		 * the status, leave it as success and return 0.
155 		 */
156 		return 0;
157 	} else if (ret) {
158 		return ret;
159 	}
160 
161 	pkt = cmd->resp_pkt;
162 
163 	resp_len = iwl_rx_packet_payload_len(pkt);
164 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
165 		ret = -EIO;
166 		goto out_free_resp;
167 	}
168 
169 	resp = (void *)pkt->data;
170 	*status = le32_to_cpu(resp->status);
171  out_free_resp:
172 	iwl_free_resp(cmd);
173 	return ret;
174 }
175 
176 /*
177  * We assume that the caller set the status to the sucess value
178  */
179 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
180 				const void *data, u32 *status)
181 {
182 	struct iwl_host_cmd cmd = {
183 		.id = id,
184 		.len = { len, },
185 		.data = { data, },
186 	};
187 
188 	return iwl_mvm_send_cmd_status(mvm, &cmd, status);
189 }
190 
191 #define IWL_DECLARE_RATE_INFO(r) \
192 	[IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
193 
194 /*
195  * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
196  */
197 static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
198 	IWL_DECLARE_RATE_INFO(1),
199 	IWL_DECLARE_RATE_INFO(2),
200 	IWL_DECLARE_RATE_INFO(5),
201 	IWL_DECLARE_RATE_INFO(11),
202 	IWL_DECLARE_RATE_INFO(6),
203 	IWL_DECLARE_RATE_INFO(9),
204 	IWL_DECLARE_RATE_INFO(12),
205 	IWL_DECLARE_RATE_INFO(18),
206 	IWL_DECLARE_RATE_INFO(24),
207 	IWL_DECLARE_RATE_INFO(36),
208 	IWL_DECLARE_RATE_INFO(48),
209 	IWL_DECLARE_RATE_INFO(54),
210 };
211 
212 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
213 					enum nl80211_band band)
214 {
215 	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
216 	int idx;
217 	int band_offset = 0;
218 
219 	/* Legacy rate format, search for match in table */
220 	if (band != NL80211_BAND_2GHZ)
221 		band_offset = IWL_FIRST_OFDM_RATE;
222 	for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
223 		if (fw_rate_idx_to_plcp[idx] == rate)
224 			return idx - band_offset;
225 
226 	return -1;
227 }
228 
229 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
230 {
231 	/* Get PLCP rate for tx_cmd->rate_n_flags */
232 	return fw_rate_idx_to_plcp[rate_idx];
233 }
234 
235 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
236 {
237 	static const u8 mac80211_ac_to_ucode_ac[] = {
238 		AC_VO,
239 		AC_VI,
240 		AC_BE,
241 		AC_BK
242 	};
243 
244 	return mac80211_ac_to_ucode_ac[ac];
245 }
246 
247 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
248 {
249 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
250 	struct iwl_error_resp *err_resp = (void *)pkt->data;
251 
252 	IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
253 		le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
254 	IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
255 		le16_to_cpu(err_resp->bad_cmd_seq_num),
256 		le32_to_cpu(err_resp->error_service));
257 	IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
258 		le64_to_cpu(err_resp->timestamp));
259 }
260 
261 /*
262  * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
263  * The parameter should also be a combination of ANT_[ABC].
264  */
265 u8 first_antenna(u8 mask)
266 {
267 	BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
268 	if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
269 		return BIT(0);
270 	return BIT(ffs(mask) - 1);
271 }
272 
273 /*
274  * Toggles between TX antennas to send the probe request on.
275  * Receives the bitmask of valid TX antennas and the *index* used
276  * for the last TX, and returns the next valid *index* to use.
277  * In order to set it in the tx_cmd, must do BIT(idx).
278  */
279 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
280 {
281 	u8 ind = last_idx;
282 	int i;
283 
284 	for (i = 0; i < MAX_ANT_NUM; i++) {
285 		ind = (ind + 1) % MAX_ANT_NUM;
286 		if (valid & BIT(ind))
287 			return ind;
288 	}
289 
290 	WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
291 	return last_idx;
292 }
293 
294 #define FW_SYSASSERT_CPU_MASK 0xf0000000
295 static const struct {
296 	const char *name;
297 	u8 num;
298 } advanced_lookup[] = {
299 	{ "NMI_INTERRUPT_WDG", 0x34 },
300 	{ "SYSASSERT", 0x35 },
301 	{ "UCODE_VERSION_MISMATCH", 0x37 },
302 	{ "BAD_COMMAND", 0x38 },
303 	{ "BAD_COMMAND", 0x39 },
304 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
305 	{ "FATAL_ERROR", 0x3D },
306 	{ "NMI_TRM_HW_ERR", 0x46 },
307 	{ "NMI_INTERRUPT_TRM", 0x4C },
308 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
309 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
310 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
311 	{ "NMI_INTERRUPT_HOST", 0x66 },
312 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
313 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
314 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
315 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
316 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
317 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
318 	{ "ADVANCED_SYSASSERT", 0 },
319 };
320 
321 static const char *desc_lookup(u32 num)
322 {
323 	int i;
324 
325 	for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
326 		if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
327 			return advanced_lookup[i].name;
328 
329 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
330 	return advanced_lookup[i].name;
331 }
332 
333 /*
334  * Note: This structure is read from the device with IO accesses,
335  * and the reading already does the endian conversion. As it is
336  * read with u32-sized accesses, any members with a different size
337  * need to be ordered correctly though!
338  */
339 struct iwl_error_event_table_v1 {
340 	u32 valid;		/* (nonzero) valid, (0) log is empty */
341 	u32 error_id;		/* type of error */
342 	u32 pc;			/* program counter */
343 	u32 blink1;		/* branch link */
344 	u32 blink2;		/* branch link */
345 	u32 ilink1;		/* interrupt link */
346 	u32 ilink2;		/* interrupt link */
347 	u32 data1;		/* error-specific data */
348 	u32 data2;		/* error-specific data */
349 	u32 data3;		/* error-specific data */
350 	u32 bcon_time;		/* beacon timer */
351 	u32 tsf_low;		/* network timestamp function timer */
352 	u32 tsf_hi;		/* network timestamp function timer */
353 	u32 gp1;		/* GP1 timer register */
354 	u32 gp2;		/* GP2 timer register */
355 	u32 gp3;		/* GP3 timer register */
356 	u32 ucode_ver;		/* uCode version */
357 	u32 hw_ver;		/* HW Silicon version */
358 	u32 brd_ver;		/* HW board version */
359 	u32 log_pc;		/* log program counter */
360 	u32 frame_ptr;		/* frame pointer */
361 	u32 stack_ptr;		/* stack pointer */
362 	u32 hcmd;		/* last host command header */
363 	u32 isr0;		/* isr status register LMPM_NIC_ISR0:
364 				 * rxtx_flag */
365 	u32 isr1;		/* isr status register LMPM_NIC_ISR1:
366 				 * host_flag */
367 	u32 isr2;		/* isr status register LMPM_NIC_ISR2:
368 				 * enc_flag */
369 	u32 isr3;		/* isr status register LMPM_NIC_ISR3:
370 				 * time_flag */
371 	u32 isr4;		/* isr status register LMPM_NIC_ISR4:
372 				 * wico interrupt */
373 	u32 isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
374 	u32 wait_event;		/* wait event() caller address */
375 	u32 l2p_control;	/* L2pControlField */
376 	u32 l2p_duration;	/* L2pDurationField */
377 	u32 l2p_mhvalid;	/* L2pMhValidBits */
378 	u32 l2p_addr_match;	/* L2pAddrMatchStat */
379 	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on
380 				 * (LMPM_PMG_SEL) */
381 	u32 u_timestamp;	/* indicate when the date and time of the
382 				 * compilation */
383 	u32 flow_handler;	/* FH read/write pointers, RX credit */
384 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
385 
386 struct iwl_error_event_table {
387 	u32 valid;		/* (nonzero) valid, (0) log is empty */
388 	u32 error_id;		/* type of error */
389 	u32 trm_hw_status0;	/* TRM HW status */
390 	u32 trm_hw_status1;	/* TRM HW status */
391 	u32 blink2;		/* branch link */
392 	u32 ilink1;		/* interrupt link */
393 	u32 ilink2;		/* interrupt link */
394 	u32 data1;		/* error-specific data */
395 	u32 data2;		/* error-specific data */
396 	u32 data3;		/* error-specific data */
397 	u32 bcon_time;		/* beacon timer */
398 	u32 tsf_low;		/* network timestamp function timer */
399 	u32 tsf_hi;		/* network timestamp function timer */
400 	u32 gp1;		/* GP1 timer register */
401 	u32 gp2;		/* GP2 timer register */
402 	u32 fw_rev_type;	/* firmware revision type */
403 	u32 major;		/* uCode version major */
404 	u32 minor;		/* uCode version minor */
405 	u32 hw_ver;		/* HW Silicon version */
406 	u32 brd_ver;		/* HW board version */
407 	u32 log_pc;		/* log program counter */
408 	u32 frame_ptr;		/* frame pointer */
409 	u32 stack_ptr;		/* stack pointer */
410 	u32 hcmd;		/* last host command header */
411 	u32 isr0;		/* isr status register LMPM_NIC_ISR0:
412 				 * rxtx_flag */
413 	u32 isr1;		/* isr status register LMPM_NIC_ISR1:
414 				 * host_flag */
415 	u32 isr2;		/* isr status register LMPM_NIC_ISR2:
416 				 * enc_flag */
417 	u32 isr3;		/* isr status register LMPM_NIC_ISR3:
418 				 * time_flag */
419 	u32 isr4;		/* isr status register LMPM_NIC_ISR4:
420 				 * wico interrupt */
421 	u32 last_cmd_id;	/* last HCMD id handled by the firmware */
422 	u32 wait_event;		/* wait event() caller address */
423 	u32 l2p_control;	/* L2pControlField */
424 	u32 l2p_duration;	/* L2pDurationField */
425 	u32 l2p_mhvalid;	/* L2pMhValidBits */
426 	u32 l2p_addr_match;	/* L2pAddrMatchStat */
427 	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on
428 				 * (LMPM_PMG_SEL) */
429 	u32 u_timestamp;	/* indicate when the date and time of the
430 				 * compilation */
431 	u32 flow_handler;	/* FH read/write pointers, RX credit */
432 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
433 
434 /*
435  * UMAC error struct - relevant starting from family 8000 chip.
436  * Note: This structure is read from the device with IO accesses,
437  * and the reading already does the endian conversion. As it is
438  * read with u32-sized accesses, any members with a different size
439  * need to be ordered correctly though!
440  */
441 struct iwl_umac_error_event_table {
442 	u32 valid;		/* (nonzero) valid, (0) log is empty */
443 	u32 error_id;		/* type of error */
444 	u32 blink1;		/* branch link */
445 	u32 blink2;		/* branch link */
446 	u32 ilink1;		/* interrupt link */
447 	u32 ilink2;		/* interrupt link */
448 	u32 data1;		/* error-specific data */
449 	u32 data2;		/* error-specific data */
450 	u32 data3;		/* error-specific data */
451 	u32 umac_major;
452 	u32 umac_minor;
453 	u32 frame_pointer;	/* core register 27*/
454 	u32 stack_pointer;	/* core register 28 */
455 	u32 cmd_header;		/* latest host cmd sent to UMAC */
456 	u32 nic_isr_pref;	/* ISR status register */
457 } __packed;
458 
459 #define ERROR_START_OFFSET  (1 * sizeof(u32))
460 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
461 
462 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
463 {
464 	struct iwl_trans *trans = mvm->trans;
465 	struct iwl_umac_error_event_table table;
466 	u32 base = mvm->trans->dbg.umac_error_event_table;
467 
468 	if (!mvm->support_umac_log &&
469 	    !(mvm->trans->dbg.error_event_table_tlv_status &
470 	      IWL_ERROR_EVENT_TABLE_UMAC))
471 		return;
472 
473 	iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
474 
475 	if (table.valid)
476 		mvm->fwrt.dump.umac_err_id = table.error_id;
477 
478 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
479 		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
480 		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
481 			mvm->status, table.valid);
482 	}
483 
484 	IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
485 		desc_lookup(table.error_id));
486 	IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
487 	IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
488 	IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
489 	IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
490 	IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
491 	IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
492 	IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
493 	IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
494 	IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
495 	IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
496 	IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
497 	IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
498 	IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
499 }
500 
501 static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
502 {
503 	struct iwl_trans *trans = mvm->trans;
504 	struct iwl_error_event_table table;
505 	u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num];
506 
507 	if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
508 		if (!base)
509 			base = mvm->fw->init_errlog_ptr;
510 	} else {
511 		if (!base)
512 			base = mvm->fw->inst_errlog_ptr;
513 	}
514 
515 	if (base < 0x400000) {
516 		IWL_ERR(mvm,
517 			"Not valid error log pointer 0x%08X for %s uCode\n",
518 			base,
519 			(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
520 			? "Init" : "RT");
521 		return;
522 	}
523 
524 	/* check if there is a HW error */
525 	val = iwl_trans_read_mem32(trans, base);
526 	if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
527 		int err;
528 
529 		IWL_ERR(trans, "HW error, resetting before reading\n");
530 
531 		/* reset the device */
532 		iwl_trans_sw_reset(trans);
533 
534 		err = iwl_finish_nic_init(trans, trans->trans_cfg);
535 		if (err)
536 			return;
537 	}
538 
539 	iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
540 
541 	if (table.valid)
542 		mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
543 
544 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
545 		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
546 		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
547 			mvm->status, table.valid);
548 	}
549 
550 	/* Do not change this output - scripts rely on it */
551 
552 	IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
553 
554 	IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
555 		desc_lookup(table.error_id));
556 	IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
557 	IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
558 	IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
559 	IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
560 	IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
561 	IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
562 	IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
563 	IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
564 	IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
565 	IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
566 	IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
567 	IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
568 	IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
569 	IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
570 	IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
571 	IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
572 	IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
573 	IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
574 	IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
575 	IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
576 	IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
577 	IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
578 	IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
579 	IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
580 	IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
581 	IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
582 	IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
583 	IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
584 	IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
585 	IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
586 	IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
587 	IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
588 	IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
589 }
590 
591 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
592 {
593 	if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
594 		IWL_ERR(mvm,
595 			"DEVICE_ENABLED bit is not set. Aborting dump.\n");
596 		return;
597 	}
598 
599 	iwl_mvm_dump_lmac_error_log(mvm, 0);
600 
601 	if (mvm->trans->dbg.lmac_error_event_table[1])
602 		iwl_mvm_dump_lmac_error_log(mvm, 1);
603 
604 	iwl_mvm_dump_umac_error_log(mvm);
605 
606 	iwl_fw_error_print_fseq_regs(&mvm->fwrt);
607 }
608 
609 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
610 			 int tid, int frame_limit, u16 ssn)
611 {
612 	struct iwl_scd_txq_cfg_cmd cmd = {
613 		.scd_queue = queue,
614 		.action = SCD_CFG_ENABLE_QUEUE,
615 		.window = frame_limit,
616 		.sta_id = sta_id,
617 		.ssn = cpu_to_le16(ssn),
618 		.tx_fifo = fifo,
619 		.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
620 			      queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
621 		.tid = tid,
622 	};
623 	int ret;
624 
625 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
626 		return -EINVAL;
627 
628 	if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
629 		 "Trying to reconfig unallocated queue %d\n", queue))
630 		return -ENXIO;
631 
632 	IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
633 
634 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
635 	WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
636 		  queue, fifo, ret);
637 
638 	return ret;
639 }
640 
641 /**
642  * iwl_mvm_send_lq_cmd() - Send link quality command
643  * @sync: This command can be sent synchronously.
644  *
645  * The link quality command is sent as the last step of station creation.
646  * This is the special case in which init is set and we call a callback in
647  * this case to clear the state indicating that station creation is in
648  * progress.
649  */
650 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
651 {
652 	struct iwl_host_cmd cmd = {
653 		.id = LQ_CMD,
654 		.len = { sizeof(struct iwl_lq_cmd), },
655 		.flags = CMD_ASYNC,
656 		.data = { lq, },
657 	};
658 
659 	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
660 		    iwl_mvm_has_tlc_offload(mvm)))
661 		return -EINVAL;
662 
663 	return iwl_mvm_send_cmd(mvm, &cmd);
664 }
665 
666 /**
667  * iwl_mvm_update_smps - Get a request to change the SMPS mode
668  * @req_type: The part of the driver who call for a change.
669  * @smps_requests: The request to change the SMPS mode.
670  *
671  * Get a requst to change the SMPS mode,
672  * and change it according to all other requests in the driver.
673  */
674 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
675 			 enum iwl_mvm_smps_type_request req_type,
676 			 enum ieee80211_smps_mode smps_request)
677 {
678 	struct iwl_mvm_vif *mvmvif;
679 	enum ieee80211_smps_mode smps_mode;
680 	int i;
681 
682 	lockdep_assert_held(&mvm->mutex);
683 
684 	/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
685 	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
686 		return;
687 
688 	if (vif->type == NL80211_IFTYPE_AP)
689 		smps_mode = IEEE80211_SMPS_OFF;
690 	else
691 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
692 
693 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
694 	mvmvif->smps_requests[req_type] = smps_request;
695 	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
696 		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
697 			smps_mode = IEEE80211_SMPS_STATIC;
698 			break;
699 		}
700 		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
701 			smps_mode = IEEE80211_SMPS_DYNAMIC;
702 	}
703 
704 	ieee80211_request_smps(vif, smps_mode);
705 }
706 
707 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
708 {
709 	struct iwl_statistics_cmd scmd = {
710 		.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
711 	};
712 	struct iwl_host_cmd cmd = {
713 		.id = STATISTICS_CMD,
714 		.len[0] = sizeof(scmd),
715 		.data[0] = &scmd,
716 		.flags = CMD_WANT_SKB,
717 	};
718 	int ret;
719 
720 	ret = iwl_mvm_send_cmd(mvm, &cmd);
721 	if (ret)
722 		return ret;
723 
724 	iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
725 	iwl_free_resp(&cmd);
726 
727 	if (clear)
728 		iwl_mvm_accu_radio_stats(mvm);
729 
730 	return 0;
731 }
732 
733 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
734 {
735 	mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
736 	mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
737 	mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
738 	mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
739 }
740 
741 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
742 				   struct ieee80211_vif *vif)
743 {
744 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
745 	bool *result = _data;
746 	int i;
747 
748 	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
749 		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
750 		    mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
751 			*result = false;
752 	}
753 }
754 
755 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
756 {
757 	bool result = true;
758 
759 	lockdep_assert_held(&mvm->mutex);
760 
761 	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
762 		return false;
763 
764 	if (mvm->cfg->rx_with_siso_diversity)
765 		return false;
766 
767 	ieee80211_iterate_active_interfaces_atomic(
768 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
769 			iwl_mvm_diversity_iter, &result);
770 
771 	return result;
772 }
773 
774 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
775 				  bool low_latency, u16 mac_id)
776 {
777 	struct iwl_mac_low_latency_cmd cmd = {
778 		.mac_id = cpu_to_le32(mac_id)
779 	};
780 
781 	if (!fw_has_capa(&mvm->fw->ucode_capa,
782 			 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
783 		return;
784 
785 	if (low_latency) {
786 		/* currently we don't care about the direction */
787 		cmd.low_latency_rx = 1;
788 		cmd.low_latency_tx = 1;
789 	}
790 
791 	if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
792 						 MAC_CONF_GROUP, 0),
793 				 0, sizeof(cmd), &cmd))
794 		IWL_ERR(mvm, "Failed to send low latency command\n");
795 }
796 
797 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
798 			       bool low_latency,
799 			       enum iwl_mvm_low_latency_cause cause)
800 {
801 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
802 	int res;
803 	bool prev;
804 
805 	lockdep_assert_held(&mvm->mutex);
806 
807 	prev = iwl_mvm_vif_low_latency(mvmvif);
808 	iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
809 
810 	low_latency = iwl_mvm_vif_low_latency(mvmvif);
811 
812 	if (low_latency == prev)
813 		return 0;
814 
815 	iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
816 
817 	res = iwl_mvm_update_quotas(mvm, false, NULL);
818 	if (res)
819 		return res;
820 
821 	iwl_mvm_bt_coex_vif_change(mvm);
822 
823 	return iwl_mvm_power_update_mac(mvm);
824 }
825 
826 struct iwl_mvm_low_latency_iter {
827 	bool result;
828 	bool result_per_band[NUM_NL80211_BANDS];
829 };
830 
831 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
832 {
833 	struct iwl_mvm_low_latency_iter *result = _data;
834 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
835 	enum nl80211_band band;
836 
837 	if (iwl_mvm_vif_low_latency(mvmvif)) {
838 		result->result = true;
839 
840 		if (!mvmvif->phy_ctxt)
841 			return;
842 
843 		band = mvmvif->phy_ctxt->channel->band;
844 		result->result_per_band[band] = true;
845 	}
846 }
847 
848 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
849 {
850 	struct iwl_mvm_low_latency_iter data = {};
851 
852 	ieee80211_iterate_active_interfaces_atomic(
853 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
854 			iwl_mvm_ll_iter, &data);
855 
856 	return data.result;
857 }
858 
859 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
860 {
861 	struct iwl_mvm_low_latency_iter data = {};
862 
863 	ieee80211_iterate_active_interfaces_atomic(
864 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
865 			iwl_mvm_ll_iter, &data);
866 
867 	return data.result_per_band[band];
868 }
869 
870 struct iwl_bss_iter_data {
871 	struct ieee80211_vif *vif;
872 	bool error;
873 };
874 
875 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
876 				       struct ieee80211_vif *vif)
877 {
878 	struct iwl_bss_iter_data *data = _data;
879 
880 	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
881 		return;
882 
883 	if (data->vif) {
884 		data->error = true;
885 		return;
886 	}
887 
888 	data->vif = vif;
889 }
890 
891 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
892 {
893 	struct iwl_bss_iter_data bss_iter_data = {};
894 
895 	ieee80211_iterate_active_interfaces_atomic(
896 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
897 		iwl_mvm_bss_iface_iterator, &bss_iter_data);
898 
899 	if (bss_iter_data.error) {
900 		IWL_ERR(mvm, "More than one managed interface active!\n");
901 		return ERR_PTR(-EINVAL);
902 	}
903 
904 	return bss_iter_data.vif;
905 }
906 
907 struct iwl_sta_iter_data {
908 	bool assoc;
909 };
910 
911 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
912 				       struct ieee80211_vif *vif)
913 {
914 	struct iwl_sta_iter_data *data = _data;
915 
916 	if (vif->type != NL80211_IFTYPE_STATION)
917 		return;
918 
919 	if (vif->bss_conf.assoc)
920 		data->assoc = true;
921 }
922 
923 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
924 {
925 	struct iwl_sta_iter_data data = {
926 		.assoc = false,
927 	};
928 
929 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
930 						   IEEE80211_IFACE_ITER_NORMAL,
931 						   iwl_mvm_sta_iface_iterator,
932 						   &data);
933 	return data.assoc;
934 }
935 
936 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
937 				    struct ieee80211_vif *vif,
938 				    bool tdls, bool cmd_q)
939 {
940 	struct iwl_fw_dbg_trigger_tlv *trigger;
941 	struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
942 	unsigned int default_timeout = cmd_q ?
943 		IWL_DEF_WD_TIMEOUT :
944 		mvm->trans->trans_cfg->base_params->wd_timeout;
945 
946 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
947 		/*
948 		 * We can't know when the station is asleep or awake, so we
949 		 * must disable the queue hang detection.
950 		 */
951 		if (fw_has_capa(&mvm->fw->ucode_capa,
952 				IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
953 		    vif && vif->type == NL80211_IFTYPE_AP)
954 			return IWL_WATCHDOG_DISABLED;
955 		return iwlmvm_mod_params.tfd_q_hang_detect ?
956 			default_timeout : IWL_WATCHDOG_DISABLED;
957 	}
958 
959 	trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
960 	txq_timer = (void *)trigger->data;
961 
962 	if (tdls)
963 		return le32_to_cpu(txq_timer->tdls);
964 
965 	if (cmd_q)
966 		return le32_to_cpu(txq_timer->command_queue);
967 
968 	if (WARN_ON(!vif))
969 		return default_timeout;
970 
971 	switch (ieee80211_vif_type_p2p(vif)) {
972 	case NL80211_IFTYPE_ADHOC:
973 		return le32_to_cpu(txq_timer->ibss);
974 	case NL80211_IFTYPE_STATION:
975 		return le32_to_cpu(txq_timer->bss);
976 	case NL80211_IFTYPE_AP:
977 		return le32_to_cpu(txq_timer->softap);
978 	case NL80211_IFTYPE_P2P_CLIENT:
979 		return le32_to_cpu(txq_timer->p2p_client);
980 	case NL80211_IFTYPE_P2P_GO:
981 		return le32_to_cpu(txq_timer->p2p_go);
982 	case NL80211_IFTYPE_P2P_DEVICE:
983 		return le32_to_cpu(txq_timer->p2p_device);
984 	case NL80211_IFTYPE_MONITOR:
985 		return default_timeout;
986 	default:
987 		WARN_ON(1);
988 		return mvm->trans->trans_cfg->base_params->wd_timeout;
989 	}
990 }
991 
992 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
993 			     const char *errmsg)
994 {
995 	struct iwl_fw_dbg_trigger_tlv *trig;
996 	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
997 
998 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
999 				     FW_DBG_TRIGGER_MLME);
1000 	if (!trig)
1001 		goto out;
1002 
1003 	trig_mlme = (void *)trig->data;
1004 
1005 	if (trig_mlme->stop_connection_loss &&
1006 	    --trig_mlme->stop_connection_loss)
1007 		goto out;
1008 
1009 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
1010 
1011 out:
1012 	ieee80211_connection_loss(vif);
1013 }
1014 
1015 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
1016 					  struct ieee80211_vif *vif,
1017 					  const struct ieee80211_sta *sta,
1018 					  u16 tid)
1019 {
1020 	struct iwl_fw_dbg_trigger_tlv *trig;
1021 	struct iwl_fw_dbg_trigger_ba *ba_trig;
1022 
1023 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
1024 				     FW_DBG_TRIGGER_BA);
1025 	if (!trig)
1026 		return;
1027 
1028 	ba_trig = (void *)trig->data;
1029 
1030 	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
1031 		return;
1032 
1033 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1034 				"Frame from %pM timed out, tid %d",
1035 				sta->addr, tid);
1036 }
1037 
1038 u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
1039 {
1040 	if (!elapsed)
1041 		return 0;
1042 
1043 	return (100 * airtime / elapsed) / USEC_PER_MSEC;
1044 }
1045 
1046 static enum iwl_mvm_traffic_load
1047 iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
1048 {
1049 	u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
1050 
1051 	if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
1052 		return IWL_MVM_TRAFFIC_HIGH;
1053 	if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
1054 		return IWL_MVM_TRAFFIC_MEDIUM;
1055 
1056 	return IWL_MVM_TRAFFIC_LOW;
1057 }
1058 
1059 struct iwl_mvm_tcm_iter_data {
1060 	struct iwl_mvm *mvm;
1061 	bool any_sent;
1062 };
1063 
1064 static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
1065 {
1066 	struct iwl_mvm_tcm_iter_data *data = _data;
1067 	struct iwl_mvm *mvm = data->mvm;
1068 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1069 	bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
1070 
1071 	if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
1072 		return;
1073 
1074 	low_latency = mvm->tcm.result.low_latency[mvmvif->id];
1075 
1076 	if (!mvm->tcm.result.change[mvmvif->id] &&
1077 	    prev == low_latency) {
1078 		iwl_mvm_update_quotas(mvm, false, NULL);
1079 		return;
1080 	}
1081 
1082 	if (prev != low_latency) {
1083 		/* this sends traffic load and updates quota as well */
1084 		iwl_mvm_update_low_latency(mvm, vif, low_latency,
1085 					   LOW_LATENCY_TRAFFIC);
1086 	} else {
1087 		iwl_mvm_update_quotas(mvm, false, NULL);
1088 	}
1089 
1090 	data->any_sent = true;
1091 }
1092 
1093 static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
1094 {
1095 	struct iwl_mvm_tcm_iter_data data = {
1096 		.mvm = mvm,
1097 		.any_sent = false,
1098 	};
1099 
1100 	mutex_lock(&mvm->mutex);
1101 
1102 	ieee80211_iterate_active_interfaces(
1103 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1104 		iwl_mvm_tcm_iter, &data);
1105 
1106 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1107 		iwl_mvm_config_scan(mvm);
1108 
1109 	mutex_unlock(&mvm->mutex);
1110 }
1111 
1112 static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
1113 {
1114 	struct iwl_mvm *mvm;
1115 	struct iwl_mvm_vif *mvmvif;
1116 	struct ieee80211_vif *vif;
1117 
1118 	mvmvif = container_of(wk, struct iwl_mvm_vif,
1119 			      uapsd_nonagg_detected_wk.work);
1120 	vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
1121 	mvm = mvmvif->mvm;
1122 
1123 	if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
1124 		return;
1125 
1126 	/* remember that this AP is broken */
1127 	memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
1128 	       vif->bss_conf.bssid, ETH_ALEN);
1129 	mvm->uapsd_noagg_bssid_write_idx++;
1130 	if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
1131 		mvm->uapsd_noagg_bssid_write_idx = 0;
1132 
1133 	iwl_mvm_connection_loss(mvm, vif,
1134 				"AP isn't using AMPDU with uAPSD enabled");
1135 }
1136 
1137 static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
1138 					 struct ieee80211_vif *vif)
1139 {
1140 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1141 
1142 	if (vif->type != NL80211_IFTYPE_STATION)
1143 		return;
1144 
1145 	if (!vif->bss_conf.assoc)
1146 		return;
1147 
1148 	if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
1149 	    !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
1150 	    !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
1151 	    !mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
1152 		return;
1153 
1154 	if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
1155 		return;
1156 
1157 	mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
1158 	IWL_INFO(mvm,
1159 		 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
1160 	schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
1161 }
1162 
1163 static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
1164 						 unsigned int elapsed,
1165 						 int mac)
1166 {
1167 	u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
1168 	u64 tpt;
1169 	unsigned long rate;
1170 	struct ieee80211_vif *vif;
1171 
1172 	rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
1173 
1174 	if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
1175 	    mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
1176 		return;
1177 
1178 	if (iwl_mvm_has_new_rx_api(mvm)) {
1179 		tpt = 8 * bytes; /* kbps */
1180 		do_div(tpt, elapsed);
1181 		rate *= 1000; /* kbps */
1182 		if (tpt < 22 * rate / 100)
1183 			return;
1184 	} else {
1185 		/*
1186 		 * the rate here is actually the threshold, in 100Kbps units,
1187 		 * so do the needed conversion from bytes to 100Kbps:
1188 		 * 100kb = bits / (100 * 1000),
1189 		 * 100kbps = 100kb / (msecs / 1000) ==
1190 		 *           (bits / (100 * 1000)) / (msecs / 1000) ==
1191 		 *           bits / (100 * msecs)
1192 		 */
1193 		tpt = (8 * bytes);
1194 		do_div(tpt, elapsed * 100);
1195 		if (tpt < rate)
1196 			return;
1197 	}
1198 
1199 	rcu_read_lock();
1200 	vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
1201 	if (vif)
1202 		iwl_mvm_uapsd_agg_disconnect(mvm, vif);
1203 	rcu_read_unlock();
1204 }
1205 
1206 static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
1207 				 struct ieee80211_vif *vif)
1208 {
1209 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1210 	u32 *band = _data;
1211 
1212 	if (!mvmvif->phy_ctxt)
1213 		return;
1214 
1215 	band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
1216 }
1217 
1218 static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
1219 					    unsigned long ts,
1220 					    bool handle_uapsd)
1221 {
1222 	unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
1223 	unsigned int uapsd_elapsed =
1224 		jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
1225 	u32 total_airtime = 0;
1226 	u32 band_airtime[NUM_NL80211_BANDS] = {0};
1227 	u32 band[NUM_MAC_INDEX_DRIVER] = {0};
1228 	int ac, mac, i;
1229 	bool low_latency = false;
1230 	enum iwl_mvm_traffic_load load, band_load;
1231 	bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
1232 
1233 	if (handle_ll)
1234 		mvm->tcm.ll_ts = ts;
1235 	if (handle_uapsd)
1236 		mvm->tcm.uapsd_nonagg_ts = ts;
1237 
1238 	mvm->tcm.result.elapsed = elapsed;
1239 
1240 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1241 						   IEEE80211_IFACE_ITER_NORMAL,
1242 						   iwl_mvm_tcm_iterator,
1243 						   &band);
1244 
1245 	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1246 		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1247 		u32 vo_vi_pkts = 0;
1248 		u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
1249 
1250 		total_airtime += airtime;
1251 		band_airtime[band[mac]] += airtime;
1252 
1253 		load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
1254 		mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
1255 		mvm->tcm.result.load[mac] = load;
1256 		mvm->tcm.result.airtime[mac] = airtime;
1257 
1258 		for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1259 			vo_vi_pkts += mdata->rx.pkts[ac] +
1260 				      mdata->tx.pkts[ac];
1261 
1262 		/* enable immediately with enough packets but defer disabling */
1263 		if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1264 			mvm->tcm.result.low_latency[mac] = true;
1265 		else if (handle_ll)
1266 			mvm->tcm.result.low_latency[mac] = false;
1267 
1268 		if (handle_ll) {
1269 			/* clear old data */
1270 			memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1271 			memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1272 		}
1273 		low_latency |= mvm->tcm.result.low_latency[mac];
1274 
1275 		if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1276 			iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1277 							     mac);
1278 		/* clear old data */
1279 		if (handle_uapsd)
1280 			mdata->uapsd_nonagg_detect.rx_bytes = 0;
1281 		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1282 		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1283 	}
1284 
1285 	load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1286 	mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
1287 	mvm->tcm.result.global_load = load;
1288 
1289 	for (i = 0; i < NUM_NL80211_BANDS; i++) {
1290 		band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1291 		mvm->tcm.result.band_load[i] = band_load;
1292 	}
1293 
1294 	/*
1295 	 * If the current load isn't low we need to force re-evaluation
1296 	 * in the TCM period, so that we can return to low load if there
1297 	 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1298 	 * triggered by traffic).
1299 	 */
1300 	if (load != IWL_MVM_TRAFFIC_LOW)
1301 		return MVM_TCM_PERIOD;
1302 	/*
1303 	 * If low-latency is active we need to force re-evaluation after
1304 	 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1305 	 * when there's no traffic at all.
1306 	 */
1307 	if (low_latency)
1308 		return MVM_LL_PERIOD;
1309 	/*
1310 	 * Otherwise, we don't need to run the work struct because we're
1311 	 * in the default "idle" state - traffic indication is low (which
1312 	 * also covers the "no traffic" case) and low-latency is disabled
1313 	 * so there's no state that may need to be disabled when there's
1314 	 * no traffic at all.
1315 	 *
1316 	 * Note that this has no impact on the regular scheduling of the
1317 	 * updates triggered by traffic - those happen whenever one of the
1318 	 * two timeouts expire (if there's traffic at all.)
1319 	 */
1320 	return 0;
1321 }
1322 
1323 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1324 {
1325 	unsigned long ts = jiffies;
1326 	bool handle_uapsd =
1327 		time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1328 			       msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1329 
1330 	spin_lock(&mvm->tcm.lock);
1331 	if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1332 		spin_unlock(&mvm->tcm.lock);
1333 		return;
1334 	}
1335 	spin_unlock(&mvm->tcm.lock);
1336 
1337 	if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1338 		mutex_lock(&mvm->mutex);
1339 		if (iwl_mvm_request_statistics(mvm, true))
1340 			handle_uapsd = false;
1341 		mutex_unlock(&mvm->mutex);
1342 	}
1343 
1344 	spin_lock(&mvm->tcm.lock);
1345 	/* re-check if somebody else won the recheck race */
1346 	if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1347 		/* calculate statistics */
1348 		unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1349 								  handle_uapsd);
1350 
1351 		/* the memset needs to be visible before the timestamp */
1352 		smp_mb();
1353 		mvm->tcm.ts = ts;
1354 		if (work_delay)
1355 			schedule_delayed_work(&mvm->tcm.work, work_delay);
1356 	}
1357 	spin_unlock(&mvm->tcm.lock);
1358 
1359 	iwl_mvm_tcm_results(mvm);
1360 }
1361 
1362 void iwl_mvm_tcm_work(struct work_struct *work)
1363 {
1364 	struct delayed_work *delayed_work = to_delayed_work(work);
1365 	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1366 					   tcm.work);
1367 
1368 	iwl_mvm_recalc_tcm(mvm);
1369 }
1370 
1371 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1372 {
1373 	spin_lock_bh(&mvm->tcm.lock);
1374 	mvm->tcm.paused = true;
1375 	spin_unlock_bh(&mvm->tcm.lock);
1376 	if (with_cancel)
1377 		cancel_delayed_work_sync(&mvm->tcm.work);
1378 }
1379 
1380 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1381 {
1382 	int mac;
1383 	bool low_latency = false;
1384 
1385 	spin_lock_bh(&mvm->tcm.lock);
1386 	mvm->tcm.ts = jiffies;
1387 	mvm->tcm.ll_ts = jiffies;
1388 	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1389 		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1390 
1391 		memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1392 		memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1393 		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1394 		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1395 
1396 		if (mvm->tcm.result.low_latency[mac])
1397 			low_latency = true;
1398 	}
1399 	/* The TCM data needs to be reset before "paused" flag changes */
1400 	smp_mb();
1401 	mvm->tcm.paused = false;
1402 
1403 	/*
1404 	 * if the current load is not low or low latency is active, force
1405 	 * re-evaluation to cover the case of no traffic.
1406 	 */
1407 	if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1408 		schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1409 	else if (low_latency)
1410 		schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1411 
1412 	spin_unlock_bh(&mvm->tcm.lock);
1413 }
1414 
1415 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1416 {
1417 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1418 
1419 	INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1420 			  iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1421 }
1422 
1423 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1424 {
1425 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1426 
1427 	cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1428 }
1429 
1430 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1431 {
1432 	u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1433 
1434 	if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1435 	    mvm->trans->cfg->gp2_reg_addr)
1436 		reg_addr = mvm->trans->cfg->gp2_reg_addr;
1437 
1438 	return iwl_read_prph(mvm->trans, reg_addr);
1439 }
1440 
1441 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
1442 {
1443 	bool ps_disabled;
1444 
1445 	lockdep_assert_held(&mvm->mutex);
1446 
1447 	/* Disable power save when reading GP2 */
1448 	ps_disabled = mvm->ps_disabled;
1449 	if (!ps_disabled) {
1450 		mvm->ps_disabled = true;
1451 		iwl_mvm_power_update_device(mvm);
1452 	}
1453 
1454 	*gp2 = iwl_mvm_get_systime(mvm);
1455 	*boottime = ktime_get_boottime_ns();
1456 
1457 	if (!ps_disabled) {
1458 		mvm->ps_disabled = ps_disabled;
1459 		iwl_mvm_power_update_device(mvm);
1460 	}
1461 }
1462