1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 #include <linux/module.h>
68 #include <linux/vmalloc.h>
69 #include <net/mac80211.h>
70 
71 #include "fw/notif-wait.h"
72 #include "iwl-trans.h"
73 #include "iwl-op-mode.h"
74 #include "fw/img.h"
75 #include "iwl-debug.h"
76 #include "iwl-drv.h"
77 #include "iwl-modparams.h"
78 #include "mvm.h"
79 #include "iwl-phy-db.h"
80 #include "iwl-eeprom-parse.h"
81 #include "iwl-csr.h"
82 #include "iwl-io.h"
83 #include "iwl-prph.h"
84 #include "rs.h"
85 #include "fw-api-scan.h"
86 #include "time-event.h"
87 #include "fw-dbg.h"
88 #include "fw-api.h"
89 #include "fw-api-scan.h"
90 
91 #define DRV_DESCRIPTION	"The new Intel(R) wireless AGN driver for Linux"
92 MODULE_DESCRIPTION(DRV_DESCRIPTION);
93 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
94 MODULE_LICENSE("GPL");
95 
96 static const struct iwl_op_mode_ops iwl_mvm_ops;
97 static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
98 
99 struct iwl_mvm_mod_params iwlmvm_mod_params = {
100 	.power_scheme = IWL_POWER_SCHEME_BPS,
101 	.tfd_q_hang_detect = true
102 	/* rest of fields are 0 by default */
103 };
104 
105 module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
106 MODULE_PARM_DESC(init_dbg,
107 		 "set to true to debug an ASSERT in INIT fw (default: false");
108 module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
109 MODULE_PARM_DESC(power_scheme,
110 		 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
111 module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
112 		   bool, S_IRUGO);
113 MODULE_PARM_DESC(tfd_q_hang_detect,
114 		 "TFD queues hang detection (default: true");
115 
116 /*
117  * module init and exit functions
118  */
119 static int __init iwl_mvm_init(void)
120 {
121 	int ret;
122 
123 	ret = iwl_mvm_rate_control_register();
124 	if (ret) {
125 		pr_err("Unable to register rate control algorithm: %d\n", ret);
126 		return ret;
127 	}
128 
129 	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
130 
131 	if (ret) {
132 		pr_err("Unable to register MVM op_mode: %d\n", ret);
133 		iwl_mvm_rate_control_unregister();
134 	}
135 
136 	return ret;
137 }
138 module_init(iwl_mvm_init);
139 
140 static void __exit iwl_mvm_exit(void)
141 {
142 	iwl_opmode_deregister("iwlmvm");
143 	iwl_mvm_rate_control_unregister();
144 }
145 module_exit(iwl_mvm_exit);
146 
147 static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
148 {
149 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
150 	u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
151 	u32 reg_val = 0;
152 	u32 phy_config = iwl_mvm_get_phy_config(mvm);
153 
154 	radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
155 			 FW_PHY_CFG_RADIO_TYPE_POS;
156 	radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
157 			 FW_PHY_CFG_RADIO_STEP_POS;
158 	radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
159 			 FW_PHY_CFG_RADIO_DASH_POS;
160 
161 	/* SKU control */
162 	reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
163 				CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
164 	reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
165 				CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
166 
167 	/* radio configuration */
168 	reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
169 	reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
170 	reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
171 
172 	WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
173 		 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
174 
175 	/*
176 	 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
177 	 * sampling, and shouldn't be set to any non-zero value.
178 	 * The same is supposed to be true of the other HW, but unsetting
179 	 * them (such as the 7260) causes automatic tests to fail on seemingly
180 	 * unrelated errors. Need to further investigate this, but for now
181 	 * we'll separate cases.
182 	 */
183 	if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
184 		reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
185 
186 	iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
187 				CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
188 				CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
189 				CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
190 				CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
191 				CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
192 				CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
193 				CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
194 				reg_val);
195 
196 	IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
197 		       radio_cfg_step, radio_cfg_dash);
198 
199 	/*
200 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
201 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
202 	 * to lose ownership and not being able to obtain it back.
203 	 */
204 	if (!mvm->trans->cfg->apmg_not_supported)
205 		iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
206 				       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
207 				       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
208 }
209 
210 /**
211  * enum iwl_rx_handler_context context for Rx handler
212  * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
213  *	which can't acquire mvm->mutex.
214  * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
215  *	(and only in this case!), it should be set as ASYNC. In that case,
216  *	it will be called from a worker with mvm->mutex held.
217  * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
218  *	mutex itself, it will be called from a worker without mvm->mutex held.
219  */
220 enum iwl_rx_handler_context {
221 	RX_HANDLER_SYNC,
222 	RX_HANDLER_ASYNC_LOCKED,
223 	RX_HANDLER_ASYNC_UNLOCKED,
224 };
225 
226 /**
227  * struct iwl_rx_handlers handler for FW notification
228  * @cmd_id: command id
229  * @context: see &iwl_rx_handler_context
230  * @fn: the function is called when notification is received
231  */
232 struct iwl_rx_handlers {
233 	u16 cmd_id;
234 	enum iwl_rx_handler_context context;
235 	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
236 };
237 
238 #define RX_HANDLER(_cmd_id, _fn, _context)	\
239 	{ .cmd_id = _cmd_id, .fn = _fn, .context = _context }
240 #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context)	\
241 	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
242 
243 /*
244  * Handlers for fw notifications
245  * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
246  * This list should be in order of frequency for performance purposes.
247  *
248  * The handler can be one from three contexts, see &iwl_rx_handler_context
249  */
250 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
251 	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
252 	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
253 
254 	RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
255 		   RX_HANDLER_ASYNC_LOCKED),
256 	RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
257 		   RX_HANDLER_ASYNC_LOCKED),
258 	RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
259 		   RX_HANDLER_ASYNC_LOCKED),
260 	RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
261 		   iwl_mvm_rx_ant_coupling_notif, RX_HANDLER_ASYNC_LOCKED),
262 
263 	RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
264 		   iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
265 
266 	RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
267 		   RX_HANDLER_SYNC),
268 	RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
269 		   RX_HANDLER_ASYNC_LOCKED),
270 
271 	RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
272 
273 	RX_HANDLER(SCAN_ITERATION_COMPLETE,
274 		   iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
275 	RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
276 		   iwl_mvm_rx_lmac_scan_complete_notif,
277 		   RX_HANDLER_ASYNC_LOCKED),
278 	RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
279 		   RX_HANDLER_SYNC),
280 	RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
281 		   RX_HANDLER_ASYNC_LOCKED),
282 	RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
283 		   iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
284 
285 	RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
286 		   RX_HANDLER_SYNC),
287 
288 	RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
289 		   RX_HANDLER_SYNC),
290 
291 	RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
292 	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
293 		   iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
294 	RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
295 		   RX_HANDLER_ASYNC_LOCKED),
296 	RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
297 		       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
298 	RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
299 		       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
300 
301 	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
302 		   RX_HANDLER_ASYNC_LOCKED),
303 	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
304 		   RX_HANDLER_SYNC),
305 	RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
306 		   RX_HANDLER_ASYNC_LOCKED),
307 	RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
308 		       iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
309 	RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
310 		       iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
311 	RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
312 		       iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
313 	RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
314 		       iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
315 };
316 #undef RX_HANDLER
317 #undef RX_HANDLER_GRP
318 
319 /* Please keep this array *SORTED* by hex value.
320  * Access is done through binary search
321  */
322 static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
323 	HCMD_NAME(MVM_ALIVE),
324 	HCMD_NAME(REPLY_ERROR),
325 	HCMD_NAME(ECHO_CMD),
326 	HCMD_NAME(INIT_COMPLETE_NOTIF),
327 	HCMD_NAME(PHY_CONTEXT_CMD),
328 	HCMD_NAME(DBG_CFG),
329 	HCMD_NAME(ANTENNA_COUPLING_NOTIFICATION),
330 	HCMD_NAME(SCAN_CFG_CMD),
331 	HCMD_NAME(SCAN_REQ_UMAC),
332 	HCMD_NAME(SCAN_ABORT_UMAC),
333 	HCMD_NAME(SCAN_COMPLETE_UMAC),
334 	HCMD_NAME(TOF_CMD),
335 	HCMD_NAME(TOF_NOTIFICATION),
336 	HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
337 	HCMD_NAME(ADD_STA_KEY),
338 	HCMD_NAME(ADD_STA),
339 	HCMD_NAME(REMOVE_STA),
340 	HCMD_NAME(FW_GET_ITEM_CMD),
341 	HCMD_NAME(TX_CMD),
342 	HCMD_NAME(SCD_QUEUE_CFG),
343 	HCMD_NAME(TXPATH_FLUSH),
344 	HCMD_NAME(MGMT_MCAST_KEY),
345 	HCMD_NAME(WEP_KEY),
346 	HCMD_NAME(SHARED_MEM_CFG),
347 	HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
348 	HCMD_NAME(MAC_CONTEXT_CMD),
349 	HCMD_NAME(TIME_EVENT_CMD),
350 	HCMD_NAME(TIME_EVENT_NOTIFICATION),
351 	HCMD_NAME(BINDING_CONTEXT_CMD),
352 	HCMD_NAME(TIME_QUOTA_CMD),
353 	HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
354 	HCMD_NAME(LQ_CMD),
355 	HCMD_NAME(FW_PAGING_BLOCK_CMD),
356 	HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
357 	HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
358 	HCMD_NAME(HOT_SPOT_CMD),
359 	HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
360 	HCMD_NAME(BT_COEX_UPDATE_CORUN_LUT),
361 	HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
362 	HCMD_NAME(BT_COEX_CI),
363 	HCMD_NAME(PHY_CONFIGURATION_CMD),
364 	HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
365 	HCMD_NAME(PHY_DB_CMD),
366 	HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
367 	HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
368 	HCMD_NAME(POWER_TABLE_CMD),
369 	HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
370 	HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
371 	HCMD_NAME(DC2DC_CONFIG_CMD),
372 	HCMD_NAME(NVM_ACCESS_CMD),
373 	HCMD_NAME(BEACON_NOTIFICATION),
374 	HCMD_NAME(BEACON_TEMPLATE_CMD),
375 	HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
376 	HCMD_NAME(BT_CONFIG),
377 	HCMD_NAME(STATISTICS_CMD),
378 	HCMD_NAME(STATISTICS_NOTIFICATION),
379 	HCMD_NAME(EOSP_NOTIFICATION),
380 	HCMD_NAME(REDUCE_TX_POWER_CMD),
381 	HCMD_NAME(CARD_STATE_NOTIFICATION),
382 	HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
383 	HCMD_NAME(TDLS_CONFIG_CMD),
384 	HCMD_NAME(MAC_PM_POWER_TABLE),
385 	HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
386 	HCMD_NAME(MFUART_LOAD_NOTIFICATION),
387 	HCMD_NAME(RSS_CONFIG_CMD),
388 	HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
389 	HCMD_NAME(REPLY_RX_PHY_CMD),
390 	HCMD_NAME(REPLY_RX_MPDU_CMD),
391 	HCMD_NAME(BA_NOTIF),
392 	HCMD_NAME(MCC_UPDATE_CMD),
393 	HCMD_NAME(MCC_CHUB_UPDATE_CMD),
394 	HCMD_NAME(MARKER_CMD),
395 	HCMD_NAME(BT_PROFILE_NOTIFICATION),
396 	HCMD_NAME(BCAST_FILTER_CMD),
397 	HCMD_NAME(MCAST_FILTER_CMD),
398 	HCMD_NAME(REPLY_SF_CFG_CMD),
399 	HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
400 	HCMD_NAME(D3_CONFIG_CMD),
401 	HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
402 	HCMD_NAME(OFFLOADS_QUERY_CMD),
403 	HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
404 	HCMD_NAME(MATCH_FOUND_NOTIFICATION),
405 	HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
406 	HCMD_NAME(WOWLAN_PATTERNS),
407 	HCMD_NAME(WOWLAN_CONFIGURATION),
408 	HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
409 	HCMD_NAME(WOWLAN_TKIP_PARAM),
410 	HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
411 	HCMD_NAME(WOWLAN_GET_STATUSES),
412 	HCMD_NAME(SCAN_ITERATION_COMPLETE),
413 	HCMD_NAME(D0I3_END_CMD),
414 	HCMD_NAME(LTR_CONFIG),
415 };
416 
417 /* Please keep this array *SORTED* by hex value.
418  * Access is done through binary search
419  */
420 static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
421 	HCMD_NAME(SHARED_MEM_CFG_CMD),
422 	HCMD_NAME(INIT_EXTENDED_CFG_CMD),
423 };
424 
425 /* Please keep this array *SORTED* by hex value.
426  * Access is done through binary search
427  */
428 static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
429 	HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
430 	HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
431 	HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
432 };
433 
434 /* Please keep this array *SORTED* by hex value.
435  * Access is done through binary search
436  */
437 static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
438 	HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
439 	HCMD_NAME(CTDP_CONFIG_CMD),
440 	HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
441 	HCMD_NAME(GEO_TX_POWER_LIMIT),
442 	HCMD_NAME(CT_KILL_NOTIFICATION),
443 	HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
444 };
445 
446 /* Please keep this array *SORTED* by hex value.
447  * Access is done through binary search
448  */
449 static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
450 	HCMD_NAME(DQA_ENABLE_CMD),
451 	HCMD_NAME(UPDATE_MU_GROUPS_CMD),
452 	HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
453 	HCMD_NAME(STA_PM_NOTIF),
454 	HCMD_NAME(MU_GROUP_MGMT_NOTIF),
455 	HCMD_NAME(RX_QUEUES_NOTIFICATION),
456 };
457 
458 /* Please keep this array *SORTED* by hex value.
459  * Access is done through binary search
460  */
461 static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
462 	HCMD_NAME(MFU_ASSERT_DUMP_NTF),
463 };
464 
465 /* Please keep this array *SORTED* by hex value.
466  * Access is done through binary search
467  */
468 static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
469 	HCMD_NAME(STORED_BEACON_NTF),
470 };
471 
472 /* Please keep this array *SORTED* by hex value.
473  * Access is done through binary search
474  */
475 static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
476 	HCMD_NAME(NVM_ACCESS_COMPLETE),
477 	HCMD_NAME(NVM_GET_INFO),
478 };
479 
480 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
481 	[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
482 	[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
483 	[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
484 	[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
485 	[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
486 	[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
487 	[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
488 	[REGULATORY_AND_NVM_GROUP] =
489 		HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
490 };
491 
492 /* this forward declaration can avoid to export the function */
493 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
494 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
495 
496 static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
497 {
498 	const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
499 
500 	if (!pwr_tx_backoff)
501 		return 0;
502 
503 	while (pwr_tx_backoff->pwr) {
504 		if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
505 			return pwr_tx_backoff->backoff;
506 
507 		pwr_tx_backoff++;
508 	}
509 
510 	return 0;
511 }
512 
513 static void iwl_mvm_fw_error_dump_wk(struct work_struct *work);
514 
515 static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
516 {
517 	struct iwl_mvm *mvm =
518 		container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
519 	struct ieee80211_vif *tx_blocked_vif;
520 	struct iwl_mvm_vif *mvmvif;
521 
522 	mutex_lock(&mvm->mutex);
523 
524 	tx_blocked_vif =
525 		rcu_dereference_protected(mvm->csa_tx_blocked_vif,
526 					  lockdep_is_held(&mvm->mutex));
527 
528 	if (!tx_blocked_vif)
529 		goto unlock;
530 
531 	mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
532 	iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
533 	RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
534 unlock:
535 	mutex_unlock(&mvm->mutex);
536 }
537 
538 static struct iwl_op_mode *
539 iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
540 		      const struct iwl_fw *fw, struct dentry *dbgfs_dir)
541 {
542 	struct ieee80211_hw *hw;
543 	struct iwl_op_mode *op_mode;
544 	struct iwl_mvm *mvm;
545 	struct iwl_trans_config trans_cfg = {};
546 	static const u8 no_reclaim_cmds[] = {
547 		TX_CMD,
548 	};
549 	int err, scan_size;
550 	u32 min_backoff;
551 
552 	/*
553 	 * We use IWL_MVM_STATION_COUNT to check the validity of the station
554 	 * index all over the driver - check that its value corresponds to the
555 	 * array size.
556 	 */
557 	BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
558 
559 	/********************************
560 	 * 1. Allocating and configuring HW data
561 	 ********************************/
562 	hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
563 				sizeof(struct iwl_mvm),
564 				&iwl_mvm_hw_ops);
565 	if (!hw)
566 		return NULL;
567 
568 	if (cfg->max_rx_agg_size)
569 		hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
570 
571 	if (cfg->max_tx_agg_size)
572 		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
573 
574 	op_mode = hw->priv;
575 
576 	mvm = IWL_OP_MODE_GET_MVM(op_mode);
577 	mvm->dev = trans->dev;
578 	mvm->trans = trans;
579 	mvm->cfg = cfg;
580 	mvm->fw = fw;
581 	mvm->hw = hw;
582 
583 	mvm->init_status = 0;
584 
585 	if (iwl_mvm_has_new_rx_api(mvm)) {
586 		op_mode->ops = &iwl_mvm_ops_mq;
587 		trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
588 	} else {
589 		op_mode->ops = &iwl_mvm_ops;
590 		trans->rx_mpdu_cmd_hdr_size =
591 			sizeof(struct iwl_rx_mpdu_res_start);
592 
593 		if (WARN_ON(trans->num_rx_queues > 1))
594 			goto out_free;
595 	}
596 
597 	mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
598 
599 	if (!iwl_mvm_is_dqa_supported(mvm)) {
600 		mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
601 
602 		if (mvm->cfg->base_params->num_of_queues == 16) {
603 			mvm->aux_queue = 11;
604 			mvm->first_agg_queue = 12;
605 			BUILD_BUG_ON(BITS_PER_BYTE *
606 				     sizeof(mvm->hw_queue_to_mac80211[0]) < 12);
607 		} else {
608 			mvm->aux_queue = 15;
609 			mvm->first_agg_queue = 16;
610 			BUILD_BUG_ON(BITS_PER_BYTE *
611 				     sizeof(mvm->hw_queue_to_mac80211[0]) < 16);
612 		}
613 	} else {
614 		mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
615 		mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
616 		mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
617 		mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
618 		mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
619 	}
620 	mvm->sf_state = SF_UNINIT;
621 	if (iwl_mvm_has_new_tx_api(mvm))
622 		mvm->cur_ucode = IWL_UCODE_REGULAR;
623 	else
624 		mvm->cur_ucode = IWL_UCODE_INIT;
625 	mvm->drop_bcn_ap_mode = true;
626 
627 	mutex_init(&mvm->mutex);
628 	mutex_init(&mvm->d0i3_suspend_mutex);
629 	spin_lock_init(&mvm->async_handlers_lock);
630 	INIT_LIST_HEAD(&mvm->time_event_list);
631 	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
632 	INIT_LIST_HEAD(&mvm->async_handlers_list);
633 	spin_lock_init(&mvm->time_event_lock);
634 	spin_lock_init(&mvm->queue_info_lock);
635 
636 	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
637 	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
638 	INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
639 	INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
640 	INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
641 	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
642 	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
643 	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
644 
645 	spin_lock_init(&mvm->d0i3_tx_lock);
646 	spin_lock_init(&mvm->refs_lock);
647 	skb_queue_head_init(&mvm->d0i3_tx);
648 	init_waitqueue_head(&mvm->d0i3_exit_waitq);
649 	init_waitqueue_head(&mvm->rx_sync_waitq);
650 
651 	atomic_set(&mvm->queue_sync_counter, 0);
652 
653 	SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
654 
655 	INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
656 
657 	/*
658 	 * Populate the state variables that the transport layer needs
659 	 * to know about.
660 	 */
661 	trans_cfg.op_mode = op_mode;
662 	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
663 	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
664 	switch (iwlwifi_mod_params.amsdu_size) {
665 	case IWL_AMSDU_DEF:
666 	case IWL_AMSDU_4K:
667 		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
668 		break;
669 	case IWL_AMSDU_8K:
670 		trans_cfg.rx_buf_size = IWL_AMSDU_8K;
671 		break;
672 	case IWL_AMSDU_12K:
673 		trans_cfg.rx_buf_size = IWL_AMSDU_12K;
674 		break;
675 	default:
676 		pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
677 		       iwlwifi_mod_params.amsdu_size);
678 		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
679 	}
680 
681 	/* the hardware splits the A-MSDU */
682 	if (mvm->cfg->mq_rx_supported)
683 		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
684 
685 	trans->wide_cmd_header = true;
686 	trans_cfg.bc_table_dword = true;
687 
688 	trans_cfg.command_groups = iwl_mvm_groups;
689 	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
690 
691 	if (iwl_mvm_is_dqa_supported(mvm))
692 		trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
693 	else
694 		trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
695 	trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
696 	trans_cfg.scd_set_active = true;
697 
698 	trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
699 					  driver_data[2]);
700 
701 	trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
702 	trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
703 
704 	/* Set a short watchdog for the command queue */
705 	trans_cfg.cmd_q_wdg_timeout =
706 		iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
707 
708 	snprintf(mvm->hw->wiphy->fw_version,
709 		 sizeof(mvm->hw->wiphy->fw_version),
710 		 "%s", fw->fw_version);
711 
712 	/* Configure transport layer */
713 	iwl_trans_configure(mvm->trans, &trans_cfg);
714 
715 	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
716 	trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
717 	trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
718 	memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
719 	       sizeof(trans->dbg_conf_tlv));
720 	trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
721 
722 	/* set up notification wait support */
723 	iwl_notification_wait_init(&mvm->notif_wait);
724 
725 	/* Init phy db */
726 	mvm->phy_db = iwl_phy_db_init(trans);
727 	if (!mvm->phy_db) {
728 		IWL_ERR(mvm, "Cannot init phy_db\n");
729 		goto out_free;
730 	}
731 
732 	IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
733 		 mvm->cfg->name, mvm->trans->hw_rev);
734 
735 	if (iwlwifi_mod_params.nvm_file)
736 		mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
737 	else
738 		IWL_DEBUG_EEPROM(mvm->trans->dev,
739 				 "working without external nvm file\n");
740 
741 	err = iwl_trans_start_hw(mvm->trans);
742 	if (err)
743 		goto out_free;
744 
745 	mutex_lock(&mvm->mutex);
746 	iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
747 	err = iwl_run_init_mvm_ucode(mvm, true);
748 	if (!iwlmvm_mod_params.init_dbg)
749 		iwl_mvm_stop_device(mvm);
750 	iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
751 	mutex_unlock(&mvm->mutex);
752 	/* returns 0 if successful, 1 if success but in rfkill */
753 	if (err < 0) {
754 		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
755 		goto out_free;
756 	}
757 
758 	scan_size = iwl_mvm_scan_size(mvm);
759 
760 	mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
761 	if (!mvm->scan_cmd)
762 		goto out_free;
763 
764 	/* Set EBS as successful as long as not stated otherwise by the FW. */
765 	mvm->last_ebs_successful = true;
766 
767 	err = iwl_mvm_mac_setup_register(mvm);
768 	if (err)
769 		goto out_free;
770 	mvm->hw_registered = true;
771 
772 	min_backoff = calc_min_backoff(trans, cfg);
773 	iwl_mvm_thermal_initialize(mvm, min_backoff);
774 
775 	err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
776 	if (err)
777 		goto out_unregister;
778 
779 	if (!iwl_mvm_has_new_rx_stats_api(mvm))
780 		memset(&mvm->rx_stats_v3, 0,
781 		       sizeof(struct mvm_statistics_rx_v3));
782 	else
783 		memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
784 
785 	/* The transport always starts with a taken reference, we can
786 	 * release it now if d0i3 is supported */
787 	if (iwl_mvm_is_d0i3_supported(mvm))
788 		iwl_trans_unref(mvm->trans);
789 
790 	iwl_mvm_tof_init(mvm);
791 
792 	return op_mode;
793 
794  out_unregister:
795 	if (iwlmvm_mod_params.init_dbg)
796 		return op_mode;
797 
798 	ieee80211_unregister_hw(mvm->hw);
799 	mvm->hw_registered = false;
800 	iwl_mvm_leds_exit(mvm);
801 	iwl_mvm_thermal_exit(mvm);
802  out_free:
803 	flush_delayed_work(&mvm->fw_dump_wk);
804 
805 	if (iwlmvm_mod_params.init_dbg)
806 		return op_mode;
807 	iwl_phy_db_free(mvm->phy_db);
808 	kfree(mvm->scan_cmd);
809 	iwl_trans_op_mode_leave(trans);
810 
811 	ieee80211_free_hw(mvm->hw);
812 	return NULL;
813 }
814 
815 static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
816 {
817 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
818 	int i;
819 
820 	/* If d0i3 is supported, we have released the reference that
821 	 * the transport started with, so we should take it back now
822 	 * that we are leaving.
823 	 */
824 	if (iwl_mvm_is_d0i3_supported(mvm))
825 		iwl_trans_ref(mvm->trans);
826 
827 	iwl_mvm_leds_exit(mvm);
828 
829 	iwl_mvm_thermal_exit(mvm);
830 
831 	if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
832 		ieee80211_unregister_hw(mvm->hw);
833 		mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
834 	}
835 
836 	kfree(mvm->scan_cmd);
837 	kfree(mvm->mcast_filter_cmd);
838 	mvm->mcast_filter_cmd = NULL;
839 
840 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
841 	kfree(mvm->d3_resume_sram);
842 #endif
843 
844 	iwl_trans_op_mode_leave(mvm->trans);
845 
846 	iwl_phy_db_free(mvm->phy_db);
847 	mvm->phy_db = NULL;
848 
849 	kfree(mvm->nvm_data);
850 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
851 		kfree(mvm->nvm_sections[i].data);
852 
853 	iwl_mvm_tof_clean(mvm);
854 
855 	mutex_destroy(&mvm->mutex);
856 	mutex_destroy(&mvm->d0i3_suspend_mutex);
857 
858 	ieee80211_free_hw(mvm->hw);
859 }
860 
861 struct iwl_async_handler_entry {
862 	struct list_head list;
863 	struct iwl_rx_cmd_buffer rxb;
864 	enum iwl_rx_handler_context context;
865 	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
866 };
867 
868 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
869 {
870 	struct iwl_async_handler_entry *entry, *tmp;
871 
872 	spin_lock_bh(&mvm->async_handlers_lock);
873 	list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
874 		iwl_free_rxb(&entry->rxb);
875 		list_del(&entry->list);
876 		kfree(entry);
877 	}
878 	spin_unlock_bh(&mvm->async_handlers_lock);
879 }
880 
881 static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
882 {
883 	struct iwl_mvm *mvm =
884 		container_of(wk, struct iwl_mvm, async_handlers_wk);
885 	struct iwl_async_handler_entry *entry, *tmp;
886 	LIST_HEAD(local_list);
887 
888 	/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
889 
890 	/*
891 	 * Sync with Rx path with a lock. Remove all the entries from this list,
892 	 * add them to a local one (lock free), and then handle them.
893 	 */
894 	spin_lock_bh(&mvm->async_handlers_lock);
895 	list_splice_init(&mvm->async_handlers_list, &local_list);
896 	spin_unlock_bh(&mvm->async_handlers_lock);
897 
898 	list_for_each_entry_safe(entry, tmp, &local_list, list) {
899 		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
900 			mutex_lock(&mvm->mutex);
901 		entry->fn(mvm, &entry->rxb);
902 		iwl_free_rxb(&entry->rxb);
903 		list_del(&entry->list);
904 		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
905 			mutex_unlock(&mvm->mutex);
906 		kfree(entry);
907 	}
908 }
909 
910 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
911 					    struct iwl_rx_packet *pkt)
912 {
913 	struct iwl_fw_dbg_trigger_tlv *trig;
914 	struct iwl_fw_dbg_trigger_cmd *cmds_trig;
915 	int i;
916 
917 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
918 		return;
919 
920 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
921 	cmds_trig = (void *)trig->data;
922 
923 	if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
924 		return;
925 
926 	for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
927 		/* don't collect on CMD 0 */
928 		if (!cmds_trig->cmds[i].cmd_id)
929 			break;
930 
931 		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
932 		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
933 			continue;
934 
935 		iwl_mvm_fw_dbg_collect_trig(mvm, trig,
936 					    "CMD 0x%02x.%02x received",
937 					    pkt->hdr.group_id, pkt->hdr.cmd);
938 		break;
939 	}
940 }
941 
942 static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
943 			      struct iwl_rx_cmd_buffer *rxb,
944 			      struct iwl_rx_packet *pkt)
945 {
946 	int i;
947 
948 	iwl_mvm_rx_check_trigger(mvm, pkt);
949 
950 	/*
951 	 * Do the notification wait before RX handlers so
952 	 * even if the RX handler consumes the RXB we have
953 	 * access to it in the notification wait entry.
954 	 */
955 	iwl_notification_wait_notify(&mvm->notif_wait, pkt);
956 
957 	for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
958 		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
959 		struct iwl_async_handler_entry *entry;
960 
961 		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
962 			continue;
963 
964 		if (rx_h->context == RX_HANDLER_SYNC) {
965 			rx_h->fn(mvm, rxb);
966 			return;
967 		}
968 
969 		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
970 		/* we can't do much... */
971 		if (!entry)
972 			return;
973 
974 		entry->rxb._page = rxb_steal_page(rxb);
975 		entry->rxb._offset = rxb->_offset;
976 		entry->rxb._rx_page_order = rxb->_rx_page_order;
977 		entry->fn = rx_h->fn;
978 		entry->context = rx_h->context;
979 		spin_lock(&mvm->async_handlers_lock);
980 		list_add_tail(&entry->list, &mvm->async_handlers_list);
981 		spin_unlock(&mvm->async_handlers_lock);
982 		schedule_work(&mvm->async_handlers_wk);
983 		break;
984 	}
985 }
986 
987 static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
988 		       struct napi_struct *napi,
989 		       struct iwl_rx_cmd_buffer *rxb)
990 {
991 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
992 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
993 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
994 
995 	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
996 		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
997 	else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
998 		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
999 	else
1000 		iwl_mvm_rx_common(mvm, rxb, pkt);
1001 }
1002 
1003 static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
1004 			  struct napi_struct *napi,
1005 			  struct iwl_rx_cmd_buffer *rxb)
1006 {
1007 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1008 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1009 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1010 
1011 	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1012 		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1013 	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1014 					 RX_QUEUES_NOTIFICATION)))
1015 		iwl_mvm_rx_queue_notif(mvm, rxb, 0);
1016 	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
1017 		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1018 	else
1019 		iwl_mvm_rx_common(mvm, rxb, pkt);
1020 }
1021 
1022 void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
1023 {
1024 	int q;
1025 
1026 	if (WARN_ON_ONCE(!mq))
1027 		return;
1028 
1029 	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
1030 		if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
1031 			IWL_DEBUG_TX_QUEUES(mvm,
1032 					    "mac80211 %d already stopped\n", q);
1033 			continue;
1034 		}
1035 
1036 		ieee80211_stop_queue(mvm->hw, q);
1037 	}
1038 }
1039 
1040 static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
1041 			     const struct iwl_device_cmd *cmd)
1042 {
1043 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1044 
1045 	/*
1046 	 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
1047 	 * commands that need to block the Tx queues.
1048 	 */
1049 	iwl_trans_block_txq_ptrs(mvm->trans, false);
1050 }
1051 
1052 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1053 {
1054 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1055 	unsigned long mq;
1056 
1057 	spin_lock_bh(&mvm->queue_info_lock);
1058 	mq = mvm->hw_queue_to_mac80211[hw_queue];
1059 	spin_unlock_bh(&mvm->queue_info_lock);
1060 
1061 	iwl_mvm_stop_mac_queues(mvm, mq);
1062 }
1063 
1064 void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
1065 {
1066 	int q;
1067 
1068 	if (WARN_ON_ONCE(!mq))
1069 		return;
1070 
1071 	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
1072 		if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
1073 			IWL_DEBUG_TX_QUEUES(mvm,
1074 					    "mac80211 %d still stopped\n", q);
1075 			continue;
1076 		}
1077 
1078 		ieee80211_wake_queue(mvm->hw, q);
1079 	}
1080 }
1081 
1082 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1083 {
1084 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1085 	unsigned long mq;
1086 
1087 	spin_lock_bh(&mvm->queue_info_lock);
1088 	mq = mvm->hw_queue_to_mac80211[hw_queue];
1089 	spin_unlock_bh(&mvm->queue_info_lock);
1090 
1091 	iwl_mvm_start_mac_queues(mvm, mq);
1092 }
1093 
1094 static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
1095 {
1096 	bool state = iwl_mvm_is_radio_killed(mvm);
1097 
1098 	if (state)
1099 		wake_up(&mvm->rx_sync_waitq);
1100 
1101 	wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
1102 }
1103 
1104 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
1105 {
1106 	if (state)
1107 		set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1108 	else
1109 		clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1110 
1111 	iwl_mvm_set_rfkill_state(mvm);
1112 }
1113 
1114 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
1115 {
1116 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1117 	bool calibrating = ACCESS_ONCE(mvm->calibrating);
1118 
1119 	if (state)
1120 		set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1121 	else
1122 		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1123 
1124 	iwl_mvm_set_rfkill_state(mvm);
1125 
1126 	/* iwl_run_init_mvm_ucode is waiting for results, abort it */
1127 	if (calibrating)
1128 		iwl_abort_notification_waits(&mvm->notif_wait);
1129 
1130 	/*
1131 	 * Stop the device if we run OPERATIONAL firmware or if we are in the
1132 	 * middle of the calibrations.
1133 	 */
1134 	return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
1135 }
1136 
1137 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1138 {
1139 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1140 	struct ieee80211_tx_info *info;
1141 
1142 	info = IEEE80211_SKB_CB(skb);
1143 	iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1144 	ieee80211_free_txskb(mvm->hw, skb);
1145 }
1146 
1147 struct iwl_mvm_reprobe {
1148 	struct device *dev;
1149 	struct work_struct work;
1150 };
1151 
1152 static void iwl_mvm_reprobe_wk(struct work_struct *wk)
1153 {
1154 	struct iwl_mvm_reprobe *reprobe;
1155 
1156 	reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
1157 	if (device_reprobe(reprobe->dev))
1158 		dev_err(reprobe->dev, "reprobe failed!\n");
1159 	kfree(reprobe);
1160 	module_put(THIS_MODULE);
1161 }
1162 
1163 static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
1164 {
1165 	struct iwl_mvm *mvm =
1166 		container_of(work, struct iwl_mvm, fw_dump_wk.work);
1167 
1168 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
1169 		return;
1170 
1171 	mutex_lock(&mvm->mutex);
1172 
1173 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1174 		/* stop recording */
1175 		iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
1176 
1177 		iwl_mvm_fw_error_dump(mvm);
1178 
1179 		/* start recording again if the firmware is not crashed */
1180 		if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
1181 		    mvm->fw->dbg_dest_tlv) {
1182 			iwl_clear_bits_prph(mvm->trans,
1183 					    MON_BUFF_SAMPLE_CTL, 0x100);
1184 			iwl_clear_bits_prph(mvm->trans,
1185 					    MON_BUFF_SAMPLE_CTL, 0x1);
1186 			iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x1);
1187 		}
1188 	} else {
1189 		u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
1190 		u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
1191 
1192 		/* stop recording */
1193 		iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
1194 		udelay(100);
1195 		iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
1196 		/* wait before we collect the data till the DBGC stop */
1197 		udelay(500);
1198 
1199 		iwl_mvm_fw_error_dump(mvm);
1200 
1201 		/* start recording again if the firmware is not crashed */
1202 		if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
1203 		    mvm->fw->dbg_dest_tlv) {
1204 			iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
1205 			iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
1206 		}
1207 	}
1208 
1209 	mutex_unlock(&mvm->mutex);
1210 
1211 	iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
1212 }
1213 
1214 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
1215 {
1216 	iwl_abort_notification_waits(&mvm->notif_wait);
1217 
1218 	/*
1219 	 * This is a bit racy, but worst case we tell mac80211 about
1220 	 * a stopped/aborted scan when that was already done which
1221 	 * is not a problem. It is necessary to abort any os scan
1222 	 * here because mac80211 requires having the scan cleared
1223 	 * before restarting.
1224 	 * We'll reset the scan_status to NONE in restart cleanup in
1225 	 * the next start() call from mac80211. If restart isn't called
1226 	 * (no fw restart) scan status will stay busy.
1227 	 */
1228 	iwl_mvm_report_scan_aborted(mvm);
1229 
1230 	/*
1231 	 * If we're restarting already, don't cycle restarts.
1232 	 * If INIT fw asserted, it will likely fail again.
1233 	 * If WoWLAN fw asserted, don't restart either, mac80211
1234 	 * can't recover this since we're already half suspended.
1235 	 */
1236 	if (!mvm->fw_restart && fw_error) {
1237 		iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
1238 					NULL);
1239 	} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1240 		struct iwl_mvm_reprobe *reprobe;
1241 
1242 		IWL_ERR(mvm,
1243 			"Firmware error during reconfiguration - reprobe!\n");
1244 
1245 		/*
1246 		 * get a module reference to avoid doing this while unloading
1247 		 * anyway and to avoid scheduling a work with code that's
1248 		 * being removed.
1249 		 */
1250 		if (!try_module_get(THIS_MODULE)) {
1251 			IWL_ERR(mvm, "Module is being unloaded - abort\n");
1252 			return;
1253 		}
1254 
1255 		reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
1256 		if (!reprobe) {
1257 			module_put(THIS_MODULE);
1258 			return;
1259 		}
1260 		reprobe->dev = mvm->trans->dev;
1261 		INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
1262 		schedule_work(&reprobe->work);
1263 	} else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
1264 		   mvm->hw_registered) {
1265 		/* don't let the transport/FW power down */
1266 		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1267 
1268 		if (fw_error && mvm->fw_restart > 0)
1269 			mvm->fw_restart--;
1270 		set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1271 		ieee80211_restart_hw(mvm->hw);
1272 	}
1273 }
1274 
1275 static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
1276 {
1277 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1278 
1279 	iwl_mvm_dump_nic_error_log(mvm);
1280 
1281 	iwl_mvm_nic_restart(mvm, true);
1282 }
1283 
1284 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
1285 {
1286 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1287 
1288 	WARN_ON(1);
1289 	iwl_mvm_nic_restart(mvm, true);
1290 }
1291 
1292 struct iwl_d0i3_iter_data {
1293 	struct iwl_mvm *mvm;
1294 	struct ieee80211_vif *connected_vif;
1295 	u8 ap_sta_id;
1296 	u8 vif_count;
1297 	u8 offloading_tid;
1298 	bool disable_offloading;
1299 };
1300 
1301 static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
1302 					struct ieee80211_vif *vif,
1303 					struct iwl_d0i3_iter_data *iter_data)
1304 {
1305 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1306 	struct iwl_mvm_sta *mvmsta;
1307 	u32 available_tids = 0;
1308 	u8 tid;
1309 
1310 	if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
1311 		    mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
1312 		return false;
1313 
1314 	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
1315 	if (!mvmsta)
1316 		return false;
1317 
1318 	spin_lock_bh(&mvmsta->lock);
1319 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1320 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1321 
1322 		/*
1323 		 * in case of pending tx packets, don't use this tid
1324 		 * for offloading in order to prevent reuse of the same
1325 		 * qos seq counters.
1326 		 */
1327 		if (iwl_mvm_tid_queued(mvm, tid_data))
1328 			continue;
1329 
1330 		if (tid_data->state != IWL_AGG_OFF)
1331 			continue;
1332 
1333 		available_tids |= BIT(tid);
1334 	}
1335 	spin_unlock_bh(&mvmsta->lock);
1336 
1337 	/*
1338 	 * disallow protocol offloading if we have no available tid
1339 	 * (with no pending frames and no active aggregation,
1340 	 * as we don't handle "holes" properly - the scheduler needs the
1341 	 * frame's seq number and TFD index to match)
1342 	 */
1343 	if (!available_tids)
1344 		return true;
1345 
1346 	/* for simplicity, just use the first available tid */
1347 	iter_data->offloading_tid = ffs(available_tids) - 1;
1348 	return false;
1349 }
1350 
1351 static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
1352 					struct ieee80211_vif *vif)
1353 {
1354 	struct iwl_d0i3_iter_data *data = _data;
1355 	struct iwl_mvm *mvm = data->mvm;
1356 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1357 	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1358 
1359 	IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
1360 	if (vif->type != NL80211_IFTYPE_STATION ||
1361 	    !vif->bss_conf.assoc)
1362 		return;
1363 
1364 	/*
1365 	 * in case of pending tx packets or active aggregations,
1366 	 * avoid offloading features in order to prevent reuse of
1367 	 * the same qos seq counters.
1368 	 */
1369 	if (iwl_mvm_disallow_offloading(mvm, vif, data))
1370 		data->disable_offloading = true;
1371 
1372 	iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
1373 	iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
1374 				   false, flags);
1375 
1376 	/*
1377 	 * on init/association, mvm already configures POWER_TABLE_CMD
1378 	 * and REPLY_MCAST_FILTER_CMD, so currently don't
1379 	 * reconfigure them (we might want to use different
1380 	 * params later on, though).
1381 	 */
1382 	data->ap_sta_id = mvmvif->ap_sta_id;
1383 	data->vif_count++;
1384 
1385 	/*
1386 	 * no new commands can be sent at this stage, so it's safe
1387 	 * to save the vif pointer during d0i3 entrance.
1388 	 */
1389 	data->connected_vif = vif;
1390 }
1391 
1392 static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
1393 				    struct iwl_wowlan_config_cmd *cmd,
1394 				    struct iwl_d0i3_iter_data *iter_data)
1395 {
1396 	struct ieee80211_sta *ap_sta;
1397 	struct iwl_mvm_sta *mvm_ap_sta;
1398 
1399 	if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
1400 		return;
1401 
1402 	rcu_read_lock();
1403 
1404 	ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
1405 	if (IS_ERR_OR_NULL(ap_sta))
1406 		goto out;
1407 
1408 	mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1409 	cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
1410 	cmd->offloading_tid = iter_data->offloading_tid;
1411 	cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
1412 		ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
1413 	/*
1414 	 * The d0i3 uCode takes care of the nonqos counters,
1415 	 * so configure only the qos seq ones.
1416 	 */
1417 	iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
1418 out:
1419 	rcu_read_unlock();
1420 }
1421 
1422 int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
1423 {
1424 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1425 	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1426 	int ret;
1427 	struct iwl_d0i3_iter_data d0i3_iter_data = {
1428 		.mvm = mvm,
1429 	};
1430 	struct iwl_wowlan_config_cmd wowlan_config_cmd = {
1431 		.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
1432 					     IWL_WOWLAN_WAKEUP_BEACON_MISS |
1433 					     IWL_WOWLAN_WAKEUP_LINK_CHANGE),
1434 	};
1435 	struct iwl_d3_manager_config d3_cfg_cmd = {
1436 		.min_sleep_time = cpu_to_le32(1000),
1437 		.wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
1438 	};
1439 
1440 	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
1441 
1442 	if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR))
1443 		return -EINVAL;
1444 
1445 	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1446 
1447 	/*
1448 	 * iwl_mvm_ref_sync takes a reference before checking the flag.
1449 	 * so by checking there is no held reference we prevent a state
1450 	 * in which iwl_mvm_ref_sync continues successfully while we
1451 	 * configure the firmware to enter d0i3
1452 	 */
1453 	if (iwl_mvm_ref_taken(mvm)) {
1454 		IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
1455 		clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1456 		wake_up(&mvm->d0i3_exit_waitq);
1457 		return 1;
1458 	}
1459 
1460 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1461 						   IEEE80211_IFACE_ITER_NORMAL,
1462 						   iwl_mvm_enter_d0i3_iterator,
1463 						   &d0i3_iter_data);
1464 	if (d0i3_iter_data.vif_count == 1) {
1465 		mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
1466 		mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
1467 	} else {
1468 		WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
1469 		mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1470 		mvm->d0i3_offloading = false;
1471 	}
1472 
1473 	/* make sure we have no running tx while configuring the seqno */
1474 	synchronize_net();
1475 
1476 	/* Flush the hw queues, in case something got queued during entry */
1477 	/* TODO new tx api */
1478 	if (iwl_mvm_has_new_tx_api(mvm)) {
1479 		WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
1480 	} else {
1481 		ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
1482 					    flags);
1483 		if (ret)
1484 			return ret;
1485 	}
1486 
1487 	/* configure wowlan configuration only if needed */
1488 	if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
1489 		/* wake on beacons only if beacon storing isn't supported */
1490 		if (!fw_has_capa(&mvm->fw->ucode_capa,
1491 				 IWL_UCODE_TLV_CAPA_BEACON_STORING))
1492 			wowlan_config_cmd.wakeup_filter |=
1493 				cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
1494 
1495 		iwl_mvm_wowlan_config_key_params(mvm,
1496 						 d0i3_iter_data.connected_vif,
1497 						 true, flags);
1498 
1499 		iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
1500 					&d0i3_iter_data);
1501 
1502 		ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
1503 					   sizeof(wowlan_config_cmd),
1504 					   &wowlan_config_cmd);
1505 		if (ret)
1506 			return ret;
1507 	}
1508 
1509 	return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
1510 				    flags | CMD_MAKE_TRANS_IDLE,
1511 				    sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1512 }
1513 
1514 static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
1515 				       struct ieee80211_vif *vif)
1516 {
1517 	struct iwl_mvm *mvm = _data;
1518 	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
1519 
1520 	IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
1521 	if (vif->type != NL80211_IFTYPE_STATION ||
1522 	    !vif->bss_conf.assoc)
1523 		return;
1524 
1525 	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
1526 }
1527 
1528 struct iwl_mvm_d0i3_exit_work_iter_data {
1529 	struct iwl_mvm *mvm;
1530 	struct iwl_wowlan_status *status;
1531 	u32 wakeup_reasons;
1532 };
1533 
1534 static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
1535 					struct ieee80211_vif *vif)
1536 {
1537 	struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
1538 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1539 	u32 reasons = data->wakeup_reasons;
1540 
1541 	/* consider only the relevant station interface */
1542 	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1543 	    data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
1544 		return;
1545 
1546 	if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
1547 		iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
1548 	else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
1549 		ieee80211_beacon_loss(vif);
1550 	else
1551 		iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
1552 }
1553 
1554 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1555 {
1556 	struct ieee80211_sta *sta = NULL;
1557 	struct iwl_mvm_sta *mvm_ap_sta;
1558 	int i;
1559 	bool wake_queues = false;
1560 
1561 	lockdep_assert_held(&mvm->mutex);
1562 
1563 	spin_lock_bh(&mvm->d0i3_tx_lock);
1564 
1565 	if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
1566 		goto out;
1567 
1568 	IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
1569 
1570 	/* get the sta in order to update seq numbers and re-enqueue skbs */
1571 	sta = rcu_dereference_protected(
1572 			mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
1573 			lockdep_is_held(&mvm->mutex));
1574 
1575 	if (IS_ERR_OR_NULL(sta)) {
1576 		sta = NULL;
1577 		goto out;
1578 	}
1579 
1580 	if (mvm->d0i3_offloading && qos_seq) {
1581 		/* update qos seq numbers if offloading was enabled */
1582 		mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
1583 		for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1584 			u16 seq = le16_to_cpu(qos_seq[i]);
1585 			/* firmware stores last-used one, we store next one */
1586 			seq += 0x10;
1587 			mvm_ap_sta->tid_data[i].seq_number = seq;
1588 		}
1589 	}
1590 out:
1591 	/* re-enqueue (or drop) all packets */
1592 	while (!skb_queue_empty(&mvm->d0i3_tx)) {
1593 		struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
1594 
1595 		if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
1596 			ieee80211_free_txskb(mvm->hw, skb);
1597 
1598 		/* if the skb_queue is not empty, we need to wake queues */
1599 		wake_queues = true;
1600 	}
1601 	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1602 	wake_up(&mvm->d0i3_exit_waitq);
1603 	mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1604 	if (wake_queues)
1605 		ieee80211_wake_queues(mvm->hw);
1606 
1607 	spin_unlock_bh(&mvm->d0i3_tx_lock);
1608 }
1609 
1610 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1611 {
1612 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1613 	struct iwl_host_cmd get_status_cmd = {
1614 		.id = WOWLAN_GET_STATUSES,
1615 		.flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
1616 	};
1617 	struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
1618 		.mvm = mvm,
1619 	};
1620 
1621 	struct iwl_wowlan_status *status;
1622 	int ret;
1623 	u32 wakeup_reasons = 0;
1624 	__le16 *qos_seq = NULL;
1625 
1626 	mutex_lock(&mvm->mutex);
1627 	ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
1628 	if (ret)
1629 		goto out;
1630 
1631 	status = (void *)get_status_cmd.resp_pkt->data;
1632 	wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
1633 	qos_seq = status->qos_seq_ctr;
1634 
1635 	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
1636 
1637 	iter_data.wakeup_reasons = wakeup_reasons;
1638 	iter_data.status = status;
1639 	ieee80211_iterate_active_interfaces(mvm->hw,
1640 					    IEEE80211_IFACE_ITER_NORMAL,
1641 					    iwl_mvm_d0i3_exit_work_iter,
1642 					    &iter_data);
1643 out:
1644 	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1645 
1646 	IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
1647 		       wakeup_reasons);
1648 
1649 	/* qos_seq might point inside resp_pkt, so free it only now */
1650 	if (get_status_cmd.resp_pkt)
1651 		iwl_free_resp(&get_status_cmd);
1652 
1653 	/* the FW might have updated the regdomain */
1654 	iwl_mvm_update_changed_regdom(mvm);
1655 
1656 	iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1657 	mutex_unlock(&mvm->mutex);
1658 }
1659 
1660 int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
1661 {
1662 	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1663 		    CMD_WAKE_UP_TRANS;
1664 	int ret;
1665 
1666 	IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1667 
1668 	if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR))
1669 		return -EINVAL;
1670 
1671 	mutex_lock(&mvm->d0i3_suspend_mutex);
1672 	if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1673 		IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1674 		__set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1675 		mutex_unlock(&mvm->d0i3_suspend_mutex);
1676 		return 0;
1677 	}
1678 	mutex_unlock(&mvm->d0i3_suspend_mutex);
1679 
1680 	ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1681 	if (ret)
1682 		goto out;
1683 
1684 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1685 						   IEEE80211_IFACE_ITER_NORMAL,
1686 						   iwl_mvm_exit_d0i3_iterator,
1687 						   mvm);
1688 out:
1689 	schedule_work(&mvm->d0i3_exit_work);
1690 	return ret;
1691 }
1692 
1693 int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1694 {
1695 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1696 
1697 	iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1698 	return _iwl_mvm_exit_d0i3(mvm);
1699 }
1700 
1701 #define IWL_MVM_COMMON_OPS					\
1702 	/* these could be differentiated */			\
1703 	.async_cb = iwl_mvm_async_cb,				\
1704 	.queue_full = iwl_mvm_stop_sw_queue,			\
1705 	.queue_not_full = iwl_mvm_wake_sw_queue,		\
1706 	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\
1707 	.free_skb = iwl_mvm_free_skb,				\
1708 	.nic_error = iwl_mvm_nic_error,				\
1709 	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\
1710 	.nic_config = iwl_mvm_nic_config,			\
1711 	.enter_d0i3 = iwl_mvm_enter_d0i3,			\
1712 	.exit_d0i3 = iwl_mvm_exit_d0i3,				\
1713 	/* as we only register one, these MUST be common! */	\
1714 	.start = iwl_op_mode_mvm_start,				\
1715 	.stop = iwl_op_mode_mvm_stop
1716 
1717 static const struct iwl_op_mode_ops iwl_mvm_ops = {
1718 	IWL_MVM_COMMON_OPS,
1719 	.rx = iwl_mvm_rx,
1720 };
1721 
1722 static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1723 			      struct napi_struct *napi,
1724 			      struct iwl_rx_cmd_buffer *rxb,
1725 			      unsigned int queue)
1726 {
1727 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1728 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1729 	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1730 
1731 	if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
1732 		iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
1733 	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1734 					 RX_QUEUES_NOTIFICATION)))
1735 		iwl_mvm_rx_queue_notif(mvm, rxb, queue);
1736 	else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1737 		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
1738 }
1739 
1740 static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
1741 	IWL_MVM_COMMON_OPS,
1742 	.rx = iwl_mvm_rx_mq,
1743 	.rx_rss = iwl_mvm_rx_mq_rss,
1744 };
1745