xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/wmi.c (revision e2f1cf25)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/skbuff.h>
19 #include <linux/ctype.h>
20 
21 #include "core.h"
22 #include "htc.h"
23 #include "debug.h"
24 #include "wmi.h"
25 #include "wmi-tlv.h"
26 #include "mac.h"
27 #include "testmode.h"
28 #include "wmi-ops.h"
29 #include "p2p.h"
30 #include "hw.h"
31 
32 /* MAIN WMI cmd track */
33 static struct wmi_cmd_map wmi_cmd_map = {
34 	.init_cmdid = WMI_INIT_CMDID,
35 	.start_scan_cmdid = WMI_START_SCAN_CMDID,
36 	.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
37 	.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
38 	.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
39 	.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
40 	.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
41 	.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
42 	.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
43 	.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
44 	.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
45 	.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
46 	.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
47 	.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
48 	.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
49 	.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
50 	.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
51 	.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
52 	.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
53 	.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
54 	.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
55 	.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
56 	.vdev_up_cmdid = WMI_VDEV_UP_CMDID,
57 	.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
58 	.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
59 	.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
60 	.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
61 	.peer_create_cmdid = WMI_PEER_CREATE_CMDID,
62 	.peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
63 	.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
64 	.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
65 	.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
66 	.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
67 	.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
68 	.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
69 	.bcn_tx_cmdid = WMI_BCN_TX_CMDID,
70 	.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
71 	.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
72 	.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
73 	.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
74 	.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
75 	.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
76 	.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
77 	.addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
78 	.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
79 	.delba_send_cmdid = WMI_DELBA_SEND_CMDID,
80 	.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
81 	.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
82 	.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
83 	.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
84 	.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
85 	.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
86 	.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
87 	.roam_scan_mode = WMI_ROAM_SCAN_MODE,
88 	.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
89 	.roam_scan_period = WMI_ROAM_SCAN_PERIOD,
90 	.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
91 	.roam_ap_profile = WMI_ROAM_AP_PROFILE,
92 	.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
93 	.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
94 	.ofl_scan_period = WMI_OFL_SCAN_PERIOD,
95 	.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
96 	.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
97 	.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
98 	.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
99 	.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
100 	.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
101 	.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
102 	.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
103 	.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
104 	.wlan_profile_set_hist_intvl_cmdid =
105 				WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
106 	.wlan_profile_get_profile_data_cmdid =
107 				WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
108 	.wlan_profile_enable_profile_id_cmdid =
109 				WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
110 	.wlan_profile_list_profile_id_cmdid =
111 				WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
112 	.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
113 	.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
114 	.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
115 	.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
116 	.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
117 	.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
118 	.wow_enable_disable_wake_event_cmdid =
119 				WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
120 	.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
121 	.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
122 	.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
123 	.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
124 	.vdev_spectral_scan_configure_cmdid =
125 				WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
126 	.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
127 	.request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
128 	.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
129 	.network_list_offload_config_cmdid =
130 				WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
131 	.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
132 	.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
133 	.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
134 	.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
135 	.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
136 	.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
137 	.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
138 	.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
139 	.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
140 	.echo_cmdid = WMI_ECHO_CMDID,
141 	.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
142 	.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
143 	.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
144 	.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
145 	.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
146 	.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
147 	.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
148 	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
149 	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
150 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
151 };
152 
153 /* 10.X WMI cmd track */
154 static struct wmi_cmd_map wmi_10x_cmd_map = {
155 	.init_cmdid = WMI_10X_INIT_CMDID,
156 	.start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
157 	.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
158 	.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
159 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
160 	.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
161 	.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
162 	.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
163 	.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
164 	.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
165 	.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
166 	.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
167 	.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
168 	.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
169 	.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
170 	.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
171 	.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
172 	.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
173 	.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
174 	.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
175 	.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
176 	.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
177 	.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
178 	.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
179 	.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
180 	.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
181 	.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
182 	.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
183 	.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
184 	.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
185 	.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
186 	.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
187 	.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
188 	.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
189 	.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
190 	.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
191 	.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
192 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
193 	.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
194 	.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
195 	.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
196 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
197 	.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
198 	.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
199 	.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
200 	.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
201 	.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
202 	.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
203 	.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
204 	.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
205 	.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
206 	.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
207 	.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
208 	.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
209 	.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
210 	.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
211 	.roam_scan_rssi_change_threshold =
212 				WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
213 	.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
214 	.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
215 	.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
216 	.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
217 	.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
218 	.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
219 	.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
220 	.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
221 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
222 	.ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
223 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
224 	.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
225 	.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
226 	.wlan_profile_set_hist_intvl_cmdid =
227 				WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
228 	.wlan_profile_get_profile_data_cmdid =
229 				WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
230 	.wlan_profile_enable_profile_id_cmdid =
231 				WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
232 	.wlan_profile_list_profile_id_cmdid =
233 				WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
234 	.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
235 	.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
236 	.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
237 	.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
238 	.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
239 	.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
240 	.wow_enable_disable_wake_event_cmdid =
241 				WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
242 	.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
243 	.wow_hostwakeup_from_sleep_cmdid =
244 				WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
245 	.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
246 	.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
247 	.vdev_spectral_scan_configure_cmdid =
248 				WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
249 	.vdev_spectral_scan_enable_cmdid =
250 				WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
251 	.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
252 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
253 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
254 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
255 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
256 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
257 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
258 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
259 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
260 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
261 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
262 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
263 	.echo_cmdid = WMI_10X_ECHO_CMDID,
264 	.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
265 	.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
266 	.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
267 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
268 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
269 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
270 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
271 	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
272 	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
273 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
274 };
275 
276 /* 10.2.4 WMI cmd track */
277 static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
278 	.init_cmdid = WMI_10_2_INIT_CMDID,
279 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
280 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
281 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
282 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
283 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
284 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
285 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
286 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
287 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
288 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
289 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
290 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
291 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
292 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
293 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
294 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
295 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
296 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
297 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
298 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
299 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
300 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
301 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
302 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
303 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
304 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
305 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
306 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
307 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
308 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
309 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
310 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
311 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
312 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
313 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
314 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
315 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
316 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
317 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
318 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
319 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
320 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
321 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
322 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
323 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
324 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
325 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
326 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
327 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
328 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
329 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
330 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
331 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
332 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
333 	.roam_scan_rssi_change_threshold =
334 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
335 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
336 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
337 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
338 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
339 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
340 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
341 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
342 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
343 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
344 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
345 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
346 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
347 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
348 	.wlan_profile_set_hist_intvl_cmdid =
349 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
350 	.wlan_profile_get_profile_data_cmdid =
351 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
352 	.wlan_profile_enable_profile_id_cmdid =
353 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
354 	.wlan_profile_list_profile_id_cmdid =
355 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
356 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
357 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
358 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
359 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
360 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
361 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
362 	.wow_enable_disable_wake_event_cmdid =
363 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
364 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
365 	.wow_hostwakeup_from_sleep_cmdid =
366 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
367 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
368 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
369 	.vdev_spectral_scan_configure_cmdid =
370 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
371 	.vdev_spectral_scan_enable_cmdid =
372 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
373 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
374 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
375 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
376 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
377 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
378 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
379 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
380 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
381 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
382 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
383 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
384 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
385 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
386 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
387 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
388 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
389 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
390 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
391 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
392 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
393 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
394 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
395 	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
396 };
397 
398 /* MAIN WMI VDEV param map */
399 static struct wmi_vdev_param_map wmi_vdev_param_map = {
400 	.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
401 	.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
402 	.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
403 	.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
404 	.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
405 	.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
406 	.slot_time = WMI_VDEV_PARAM_SLOT_TIME,
407 	.preamble = WMI_VDEV_PARAM_PREAMBLE,
408 	.swba_time = WMI_VDEV_PARAM_SWBA_TIME,
409 	.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
410 	.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
411 	.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
412 	.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
413 	.wmi_vdev_oc_scheduler_air_time_limit =
414 					WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
415 	.wds = WMI_VDEV_PARAM_WDS,
416 	.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
417 	.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
418 	.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
419 	.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
420 	.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
421 	.chwidth = WMI_VDEV_PARAM_CHWIDTH,
422 	.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
423 	.disable_htprotection =	WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
424 	.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
425 	.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
426 	.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
427 	.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
428 	.sgi = WMI_VDEV_PARAM_SGI,
429 	.ldpc = WMI_VDEV_PARAM_LDPC,
430 	.tx_stbc = WMI_VDEV_PARAM_TX_STBC,
431 	.rx_stbc = WMI_VDEV_PARAM_RX_STBC,
432 	.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
433 	.def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
434 	.nss = WMI_VDEV_PARAM_NSS,
435 	.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
436 	.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
437 	.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
438 	.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
439 	.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
440 	.ap_keepalive_min_idle_inactive_time_secs =
441 			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
442 	.ap_keepalive_max_idle_inactive_time_secs =
443 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
444 	.ap_keepalive_max_unresponsive_time_secs =
445 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
446 	.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
447 	.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
448 	.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
449 	.txbf = WMI_VDEV_PARAM_TXBF,
450 	.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
451 	.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
452 	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
453 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
454 					WMI_VDEV_PARAM_UNSUPPORTED,
455 };
456 
457 /* 10.X WMI VDEV param map */
458 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
459 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
460 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
461 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
462 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
463 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
464 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
465 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
466 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
467 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
468 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
469 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
470 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
471 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
472 	.wmi_vdev_oc_scheduler_air_time_limit =
473 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
474 	.wds = WMI_10X_VDEV_PARAM_WDS,
475 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
476 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
477 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
478 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
479 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
480 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
481 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
482 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
483 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
484 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
485 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
486 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
487 	.sgi = WMI_10X_VDEV_PARAM_SGI,
488 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
489 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
490 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
491 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
492 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
493 	.nss = WMI_10X_VDEV_PARAM_NSS,
494 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
495 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
496 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
497 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
498 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
499 	.ap_keepalive_min_idle_inactive_time_secs =
500 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
501 	.ap_keepalive_max_idle_inactive_time_secs =
502 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
503 	.ap_keepalive_max_unresponsive_time_secs =
504 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
505 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
506 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
507 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
508 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
509 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
510 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
511 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
512 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
513 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
514 };
515 
516 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
517 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
518 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
519 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
520 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
521 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
522 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
523 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
524 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
525 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
526 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
527 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
528 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
529 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
530 	.wmi_vdev_oc_scheduler_air_time_limit =
531 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
532 	.wds = WMI_10X_VDEV_PARAM_WDS,
533 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
534 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
535 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
536 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
537 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
538 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
539 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
540 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
541 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
542 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
543 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
544 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
545 	.sgi = WMI_10X_VDEV_PARAM_SGI,
546 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
547 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
548 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
549 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
550 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
551 	.nss = WMI_10X_VDEV_PARAM_NSS,
552 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
553 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
554 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
555 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
556 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
557 	.ap_keepalive_min_idle_inactive_time_secs =
558 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
559 	.ap_keepalive_max_idle_inactive_time_secs =
560 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
561 	.ap_keepalive_max_unresponsive_time_secs =
562 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
563 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
564 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
565 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
566 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
567 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
568 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
569 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
570 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
571 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
572 };
573 
574 static struct wmi_pdev_param_map wmi_pdev_param_map = {
575 	.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
576 	.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
577 	.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
578 	.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
579 	.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
580 	.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
581 	.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
582 	.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
583 	.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
584 	.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
585 	.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
586 	.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
587 	.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
588 	.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
589 	.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
590 	.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
591 	.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
592 	.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
593 	.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
594 	.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
595 	.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
596 	.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
597 	.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
598 	.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
599 	.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
600 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
601 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
602 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
603 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
604 	.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
605 	.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
606 	.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
607 	.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
608 	.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
609 	.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
610 	.dcs = WMI_PDEV_PARAM_DCS,
611 	.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
612 	.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
613 	.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
614 	.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
615 	.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
616 	.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
617 	.proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
618 	.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
619 	.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
620 	.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
621 	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
622 	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
623 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
624 };
625 
626 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
627 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
628 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
629 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
630 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
631 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
632 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
633 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
634 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
635 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
636 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
637 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
638 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
639 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
640 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
641 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
642 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
643 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
644 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
645 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
646 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
647 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
648 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
649 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
650 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
651 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
652 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
653 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
654 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
655 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
656 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
657 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
658 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
659 	.bcnflt_stats_update_period =
660 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
661 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
662 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
663 	.dcs = WMI_10X_PDEV_PARAM_DCS,
664 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
665 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
666 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
667 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
668 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
669 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
670 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
671 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
672 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
673 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
674 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
675 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
676 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
677 };
678 
679 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
680 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
681 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
682 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
683 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
684 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
685 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
686 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
687 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
688 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
689 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
690 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
691 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
692 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
693 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
694 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
695 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
696 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
697 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
698 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
699 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
700 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
701 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
702 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
703 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
704 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
705 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
706 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
707 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
708 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
709 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
710 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
711 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
712 	.bcnflt_stats_update_period =
713 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
714 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
715 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
716 	.dcs = WMI_10X_PDEV_PARAM_DCS,
717 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
718 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
719 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
720 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
721 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
722 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
723 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
724 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
725 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
726 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
727 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
728 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
729 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
730 };
731 
732 /* firmware 10.2 specific mappings */
733 static struct wmi_cmd_map wmi_10_2_cmd_map = {
734 	.init_cmdid = WMI_10_2_INIT_CMDID,
735 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
736 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
737 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
738 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
739 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
740 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
741 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
742 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
743 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
744 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
745 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
746 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
747 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
748 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
749 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
750 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
751 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
752 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
753 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
754 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
755 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
756 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
757 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
758 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
759 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
760 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
761 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
762 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
763 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
764 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
765 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
766 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
767 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
768 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
769 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
770 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
771 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
772 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
773 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
774 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
775 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
776 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
777 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
778 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
779 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
780 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
781 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
782 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
783 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
784 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
785 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
786 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
787 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
788 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
789 	.roam_scan_rssi_change_threshold =
790 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
791 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
792 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
793 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
794 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
795 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
796 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
797 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
798 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
799 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
800 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
801 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
802 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
803 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
804 	.wlan_profile_set_hist_intvl_cmdid =
805 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
806 	.wlan_profile_get_profile_data_cmdid =
807 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
808 	.wlan_profile_enable_profile_id_cmdid =
809 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
810 	.wlan_profile_list_profile_id_cmdid =
811 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
812 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
813 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
814 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
815 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
816 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
817 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
818 	.wow_enable_disable_wake_event_cmdid =
819 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
820 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
821 	.wow_hostwakeup_from_sleep_cmdid =
822 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
823 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
824 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
825 	.vdev_spectral_scan_configure_cmdid =
826 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
827 	.vdev_spectral_scan_enable_cmdid =
828 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
829 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
830 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
831 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
832 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
833 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
834 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
835 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
836 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
837 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
838 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
839 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
840 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
841 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
842 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
843 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
844 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
845 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
846 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
847 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
848 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
849 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
850 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
851 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
852 };
853 
854 void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
855 				const struct wmi_channel_arg *arg)
856 {
857 	u32 flags = 0;
858 
859 	memset(ch, 0, sizeof(*ch));
860 
861 	if (arg->passive)
862 		flags |= WMI_CHAN_FLAG_PASSIVE;
863 	if (arg->allow_ibss)
864 		flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
865 	if (arg->allow_ht)
866 		flags |= WMI_CHAN_FLAG_ALLOW_HT;
867 	if (arg->allow_vht)
868 		flags |= WMI_CHAN_FLAG_ALLOW_VHT;
869 	if (arg->ht40plus)
870 		flags |= WMI_CHAN_FLAG_HT40_PLUS;
871 	if (arg->chan_radar)
872 		flags |= WMI_CHAN_FLAG_DFS;
873 
874 	ch->mhz = __cpu_to_le32(arg->freq);
875 	ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
876 	ch->band_center_freq2 = 0;
877 	ch->min_power = arg->min_power;
878 	ch->max_power = arg->max_power;
879 	ch->reg_power = arg->max_reg_power;
880 	ch->antenna_max = arg->max_antenna_gain;
881 
882 	/* mode & flags share storage */
883 	ch->mode = arg->mode;
884 	ch->flags |= __cpu_to_le32(flags);
885 }
886 
887 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
888 {
889 	unsigned long time_left;
890 
891 	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
892 						WMI_SERVICE_READY_TIMEOUT_HZ);
893 	if (!time_left)
894 		return -ETIMEDOUT;
895 	return 0;
896 }
897 
898 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
899 {
900 	unsigned long time_left;
901 
902 	time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
903 						WMI_UNIFIED_READY_TIMEOUT_HZ);
904 	if (!time_left)
905 		return -ETIMEDOUT;
906 	return 0;
907 }
908 
909 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
910 {
911 	struct sk_buff *skb;
912 	u32 round_len = roundup(len, 4);
913 
914 	skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
915 	if (!skb)
916 		return NULL;
917 
918 	skb_reserve(skb, WMI_SKB_HEADROOM);
919 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
920 		ath10k_warn(ar, "Unaligned WMI skb\n");
921 
922 	skb_put(skb, round_len);
923 	memset(skb->data, 0, round_len);
924 
925 	return skb;
926 }
927 
928 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
929 {
930 	dev_kfree_skb(skb);
931 }
932 
933 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
934 			       u32 cmd_id)
935 {
936 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
937 	struct wmi_cmd_hdr *cmd_hdr;
938 	int ret;
939 	u32 cmd = 0;
940 
941 	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
942 		return -ENOMEM;
943 
944 	cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
945 
946 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
947 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
948 
949 	memset(skb_cb, 0, sizeof(*skb_cb));
950 	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
951 	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
952 
953 	if (ret)
954 		goto err_pull;
955 
956 	return 0;
957 
958 err_pull:
959 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
960 	return ret;
961 }
962 
963 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
964 {
965 	struct ath10k *ar = arvif->ar;
966 	struct ath10k_skb_cb *cb;
967 	struct sk_buff *bcn;
968 	int ret;
969 
970 	spin_lock_bh(&ar->data_lock);
971 
972 	bcn = arvif->beacon;
973 
974 	if (!bcn)
975 		goto unlock;
976 
977 	cb = ATH10K_SKB_CB(bcn);
978 
979 	switch (arvif->beacon_state) {
980 	case ATH10K_BEACON_SENDING:
981 	case ATH10K_BEACON_SENT:
982 		break;
983 	case ATH10K_BEACON_SCHEDULED:
984 		arvif->beacon_state = ATH10K_BEACON_SENDING;
985 		spin_unlock_bh(&ar->data_lock);
986 
987 		ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
988 							arvif->vdev_id,
989 							bcn->data, bcn->len,
990 							cb->paddr,
991 							cb->bcn.dtim_zero,
992 							cb->bcn.deliver_cab);
993 
994 		spin_lock_bh(&ar->data_lock);
995 
996 		if (ret == 0)
997 			arvif->beacon_state = ATH10K_BEACON_SENT;
998 		else
999 			arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1000 	}
1001 
1002 unlock:
1003 	spin_unlock_bh(&ar->data_lock);
1004 }
1005 
1006 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
1007 				       struct ieee80211_vif *vif)
1008 {
1009 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1010 
1011 	ath10k_wmi_tx_beacon_nowait(arvif);
1012 }
1013 
1014 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
1015 {
1016 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1017 						   IEEE80211_IFACE_ITER_NORMAL,
1018 						   ath10k_wmi_tx_beacons_iter,
1019 						   NULL);
1020 }
1021 
1022 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
1023 {
1024 	/* try to send pending beacons first. they take priority */
1025 	ath10k_wmi_tx_beacons_nowait(ar);
1026 
1027 	wake_up(&ar->wmi.tx_credits_wq);
1028 }
1029 
1030 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
1031 {
1032 	int ret = -EOPNOTSUPP;
1033 
1034 	might_sleep();
1035 
1036 	if (cmd_id == WMI_CMD_UNSUPPORTED) {
1037 		ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
1038 			    cmd_id);
1039 		return ret;
1040 	}
1041 
1042 	wait_event_timeout(ar->wmi.tx_credits_wq, ({
1043 		/* try to send pending beacons first. they take priority */
1044 		ath10k_wmi_tx_beacons_nowait(ar);
1045 
1046 		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
1047 
1048 		if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1049 			ret = -ESHUTDOWN;
1050 
1051 		(ret != -EAGAIN);
1052 	}), 3*HZ);
1053 
1054 	if (ret)
1055 		dev_kfree_skb_any(skb);
1056 
1057 	return ret;
1058 }
1059 
1060 static struct sk_buff *
1061 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
1062 {
1063 	struct wmi_mgmt_tx_cmd *cmd;
1064 	struct ieee80211_hdr *hdr;
1065 	struct sk_buff *skb;
1066 	int len;
1067 	u32 buf_len = msdu->len;
1068 	u16 fc;
1069 
1070 	hdr = (struct ieee80211_hdr *)msdu->data;
1071 	fc = le16_to_cpu(hdr->frame_control);
1072 
1073 	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
1074 		return ERR_PTR(-EINVAL);
1075 
1076 	len = sizeof(cmd->hdr) + msdu->len;
1077 
1078 	if ((ieee80211_is_action(hdr->frame_control) ||
1079 	     ieee80211_is_deauth(hdr->frame_control) ||
1080 	     ieee80211_is_disassoc(hdr->frame_control)) &&
1081 	     ieee80211_has_protected(hdr->frame_control)) {
1082 		len += IEEE80211_CCMP_MIC_LEN;
1083 		buf_len += IEEE80211_CCMP_MIC_LEN;
1084 	}
1085 
1086 	len = round_up(len, 4);
1087 
1088 	skb = ath10k_wmi_alloc_skb(ar, len);
1089 	if (!skb)
1090 		return ERR_PTR(-ENOMEM);
1091 
1092 	cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
1093 
1094 	cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
1095 	cmd->hdr.tx_rate = 0;
1096 	cmd->hdr.tx_power = 0;
1097 	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
1098 
1099 	ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
1100 	memcpy(cmd->buf, msdu->data, msdu->len);
1101 
1102 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
1103 		   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
1104 		   fc & IEEE80211_FCTL_STYPE);
1105 	trace_ath10k_tx_hdr(ar, skb->data, skb->len);
1106 	trace_ath10k_tx_payload(ar, skb->data, skb->len);
1107 
1108 	return skb;
1109 }
1110 
1111 static void ath10k_wmi_event_scan_started(struct ath10k *ar)
1112 {
1113 	lockdep_assert_held(&ar->data_lock);
1114 
1115 	switch (ar->scan.state) {
1116 	case ATH10K_SCAN_IDLE:
1117 	case ATH10K_SCAN_RUNNING:
1118 	case ATH10K_SCAN_ABORTING:
1119 		ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
1120 			    ath10k_scan_state_str(ar->scan.state),
1121 			    ar->scan.state);
1122 		break;
1123 	case ATH10K_SCAN_STARTING:
1124 		ar->scan.state = ATH10K_SCAN_RUNNING;
1125 
1126 		if (ar->scan.is_roc)
1127 			ieee80211_ready_on_channel(ar->hw);
1128 
1129 		complete(&ar->scan.started);
1130 		break;
1131 	}
1132 }
1133 
1134 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
1135 {
1136 	lockdep_assert_held(&ar->data_lock);
1137 
1138 	switch (ar->scan.state) {
1139 	case ATH10K_SCAN_IDLE:
1140 	case ATH10K_SCAN_RUNNING:
1141 	case ATH10K_SCAN_ABORTING:
1142 		ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
1143 			    ath10k_scan_state_str(ar->scan.state),
1144 			    ar->scan.state);
1145 		break;
1146 	case ATH10K_SCAN_STARTING:
1147 		complete(&ar->scan.started);
1148 		__ath10k_scan_finish(ar);
1149 		break;
1150 	}
1151 }
1152 
1153 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
1154 {
1155 	lockdep_assert_held(&ar->data_lock);
1156 
1157 	switch (ar->scan.state) {
1158 	case ATH10K_SCAN_IDLE:
1159 	case ATH10K_SCAN_STARTING:
1160 		/* One suspected reason scan can be completed while starting is
1161 		 * if firmware fails to deliver all scan events to the host,
1162 		 * e.g. when transport pipe is full. This has been observed
1163 		 * with spectral scan phyerr events starving wmi transport
1164 		 * pipe. In such case the "scan completed" event should be (and
1165 		 * is) ignored by the host as it may be just firmware's scan
1166 		 * state machine recovering.
1167 		 */
1168 		ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
1169 			    ath10k_scan_state_str(ar->scan.state),
1170 			    ar->scan.state);
1171 		break;
1172 	case ATH10K_SCAN_RUNNING:
1173 	case ATH10K_SCAN_ABORTING:
1174 		__ath10k_scan_finish(ar);
1175 		break;
1176 	}
1177 }
1178 
1179 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
1180 {
1181 	lockdep_assert_held(&ar->data_lock);
1182 
1183 	switch (ar->scan.state) {
1184 	case ATH10K_SCAN_IDLE:
1185 	case ATH10K_SCAN_STARTING:
1186 		ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
1187 			    ath10k_scan_state_str(ar->scan.state),
1188 			    ar->scan.state);
1189 		break;
1190 	case ATH10K_SCAN_RUNNING:
1191 	case ATH10K_SCAN_ABORTING:
1192 		ar->scan_channel = NULL;
1193 		break;
1194 	}
1195 }
1196 
1197 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
1198 {
1199 	lockdep_assert_held(&ar->data_lock);
1200 
1201 	switch (ar->scan.state) {
1202 	case ATH10K_SCAN_IDLE:
1203 	case ATH10K_SCAN_STARTING:
1204 		ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
1205 			    ath10k_scan_state_str(ar->scan.state),
1206 			    ar->scan.state);
1207 		break;
1208 	case ATH10K_SCAN_RUNNING:
1209 	case ATH10K_SCAN_ABORTING:
1210 		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
1211 
1212 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
1213 			complete(&ar->scan.on_channel);
1214 		break;
1215 	}
1216 }
1217 
1218 static const char *
1219 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
1220 			       enum wmi_scan_completion_reason reason)
1221 {
1222 	switch (type) {
1223 	case WMI_SCAN_EVENT_STARTED:
1224 		return "started";
1225 	case WMI_SCAN_EVENT_COMPLETED:
1226 		switch (reason) {
1227 		case WMI_SCAN_REASON_COMPLETED:
1228 			return "completed";
1229 		case WMI_SCAN_REASON_CANCELLED:
1230 			return "completed [cancelled]";
1231 		case WMI_SCAN_REASON_PREEMPTED:
1232 			return "completed [preempted]";
1233 		case WMI_SCAN_REASON_TIMEDOUT:
1234 			return "completed [timedout]";
1235 		case WMI_SCAN_REASON_MAX:
1236 			break;
1237 		}
1238 		return "completed [unknown]";
1239 	case WMI_SCAN_EVENT_BSS_CHANNEL:
1240 		return "bss channel";
1241 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
1242 		return "foreign channel";
1243 	case WMI_SCAN_EVENT_DEQUEUED:
1244 		return "dequeued";
1245 	case WMI_SCAN_EVENT_PREEMPTED:
1246 		return "preempted";
1247 	case WMI_SCAN_EVENT_START_FAILED:
1248 		return "start failed";
1249 	default:
1250 		return "unknown";
1251 	}
1252 }
1253 
1254 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
1255 				      struct wmi_scan_ev_arg *arg)
1256 {
1257 	struct wmi_scan_event *ev = (void *)skb->data;
1258 
1259 	if (skb->len < sizeof(*ev))
1260 		return -EPROTO;
1261 
1262 	skb_pull(skb, sizeof(*ev));
1263 	arg->event_type = ev->event_type;
1264 	arg->reason = ev->reason;
1265 	arg->channel_freq = ev->channel_freq;
1266 	arg->scan_req_id = ev->scan_req_id;
1267 	arg->scan_id = ev->scan_id;
1268 	arg->vdev_id = ev->vdev_id;
1269 
1270 	return 0;
1271 }
1272 
1273 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
1274 {
1275 	struct wmi_scan_ev_arg arg = {};
1276 	enum wmi_scan_event_type event_type;
1277 	enum wmi_scan_completion_reason reason;
1278 	u32 freq;
1279 	u32 req_id;
1280 	u32 scan_id;
1281 	u32 vdev_id;
1282 	int ret;
1283 
1284 	ret = ath10k_wmi_pull_scan(ar, skb, &arg);
1285 	if (ret) {
1286 		ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
1287 		return ret;
1288 	}
1289 
1290 	event_type = __le32_to_cpu(arg.event_type);
1291 	reason = __le32_to_cpu(arg.reason);
1292 	freq = __le32_to_cpu(arg.channel_freq);
1293 	req_id = __le32_to_cpu(arg.scan_req_id);
1294 	scan_id = __le32_to_cpu(arg.scan_id);
1295 	vdev_id = __le32_to_cpu(arg.vdev_id);
1296 
1297 	spin_lock_bh(&ar->data_lock);
1298 
1299 	ath10k_dbg(ar, ATH10K_DBG_WMI,
1300 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
1301 		   ath10k_wmi_event_scan_type_str(event_type, reason),
1302 		   event_type, reason, freq, req_id, scan_id, vdev_id,
1303 		   ath10k_scan_state_str(ar->scan.state), ar->scan.state);
1304 
1305 	switch (event_type) {
1306 	case WMI_SCAN_EVENT_STARTED:
1307 		ath10k_wmi_event_scan_started(ar);
1308 		break;
1309 	case WMI_SCAN_EVENT_COMPLETED:
1310 		ath10k_wmi_event_scan_completed(ar);
1311 		break;
1312 	case WMI_SCAN_EVENT_BSS_CHANNEL:
1313 		ath10k_wmi_event_scan_bss_chan(ar);
1314 		break;
1315 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
1316 		ath10k_wmi_event_scan_foreign_chan(ar, freq);
1317 		break;
1318 	case WMI_SCAN_EVENT_START_FAILED:
1319 		ath10k_warn(ar, "received scan start failure event\n");
1320 		ath10k_wmi_event_scan_start_failed(ar);
1321 		break;
1322 	case WMI_SCAN_EVENT_DEQUEUED:
1323 	case WMI_SCAN_EVENT_PREEMPTED:
1324 	default:
1325 		break;
1326 	}
1327 
1328 	spin_unlock_bh(&ar->data_lock);
1329 	return 0;
1330 }
1331 
1332 static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
1333 {
1334 	enum ieee80211_band band;
1335 
1336 	switch (phy_mode) {
1337 	case MODE_11A:
1338 	case MODE_11NA_HT20:
1339 	case MODE_11NA_HT40:
1340 	case MODE_11AC_VHT20:
1341 	case MODE_11AC_VHT40:
1342 	case MODE_11AC_VHT80:
1343 		band = IEEE80211_BAND_5GHZ;
1344 		break;
1345 	case MODE_11G:
1346 	case MODE_11B:
1347 	case MODE_11GONLY:
1348 	case MODE_11NG_HT20:
1349 	case MODE_11NG_HT40:
1350 	case MODE_11AC_VHT20_2G:
1351 	case MODE_11AC_VHT40_2G:
1352 	case MODE_11AC_VHT80_2G:
1353 	default:
1354 		band = IEEE80211_BAND_2GHZ;
1355 	}
1356 
1357 	return band;
1358 }
1359 
1360 /* If keys are configured, HW decrypts all frames
1361  * with protected bit set. Mark such frames as decrypted.
1362  */
1363 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
1364 					 struct sk_buff *skb,
1365 					 struct ieee80211_rx_status *status)
1366 {
1367 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1368 	unsigned int hdrlen;
1369 	bool peer_key;
1370 	u8 *addr, keyidx;
1371 
1372 	if (!ieee80211_is_auth(hdr->frame_control) ||
1373 	    !ieee80211_has_protected(hdr->frame_control))
1374 		return;
1375 
1376 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
1377 	if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
1378 		return;
1379 
1380 	keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
1381 	addr = ieee80211_get_SA(hdr);
1382 
1383 	spin_lock_bh(&ar->data_lock);
1384 	peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
1385 	spin_unlock_bh(&ar->data_lock);
1386 
1387 	if (peer_key) {
1388 		ath10k_dbg(ar, ATH10K_DBG_MAC,
1389 			   "mac wep key present for peer %pM\n", addr);
1390 		status->flag |= RX_FLAG_DECRYPTED;
1391 	}
1392 }
1393 
1394 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
1395 					 struct wmi_mgmt_rx_ev_arg *arg)
1396 {
1397 	struct wmi_mgmt_rx_event_v1 *ev_v1;
1398 	struct wmi_mgmt_rx_event_v2 *ev_v2;
1399 	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
1400 	size_t pull_len;
1401 	u32 msdu_len;
1402 
1403 	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
1404 		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
1405 		ev_hdr = &ev_v2->hdr.v1;
1406 		pull_len = sizeof(*ev_v2);
1407 	} else {
1408 		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
1409 		ev_hdr = &ev_v1->hdr;
1410 		pull_len = sizeof(*ev_v1);
1411 	}
1412 
1413 	if (skb->len < pull_len)
1414 		return -EPROTO;
1415 
1416 	skb_pull(skb, pull_len);
1417 	arg->channel = ev_hdr->channel;
1418 	arg->buf_len = ev_hdr->buf_len;
1419 	arg->status = ev_hdr->status;
1420 	arg->snr = ev_hdr->snr;
1421 	arg->phy_mode = ev_hdr->phy_mode;
1422 	arg->rate = ev_hdr->rate;
1423 
1424 	msdu_len = __le32_to_cpu(arg->buf_len);
1425 	if (skb->len < msdu_len)
1426 		return -EPROTO;
1427 
1428 	/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
1429 	 * trailer with credit update. Trim the excess garbage.
1430 	 */
1431 	skb_trim(skb, msdu_len);
1432 
1433 	return 0;
1434 }
1435 
1436 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
1437 {
1438 	struct wmi_mgmt_rx_ev_arg arg = {};
1439 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1440 	struct ieee80211_hdr *hdr;
1441 	struct ieee80211_supported_band *sband;
1442 	u32 rx_status;
1443 	u32 channel;
1444 	u32 phy_mode;
1445 	u32 snr;
1446 	u32 rate;
1447 	u32 buf_len;
1448 	u16 fc;
1449 	int ret;
1450 
1451 	ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
1452 	if (ret) {
1453 		ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
1454 		dev_kfree_skb(skb);
1455 		return ret;
1456 	}
1457 
1458 	channel = __le32_to_cpu(arg.channel);
1459 	buf_len = __le32_to_cpu(arg.buf_len);
1460 	rx_status = __le32_to_cpu(arg.status);
1461 	snr = __le32_to_cpu(arg.snr);
1462 	phy_mode = __le32_to_cpu(arg.phy_mode);
1463 	rate = __le32_to_cpu(arg.rate);
1464 
1465 	memset(status, 0, sizeof(*status));
1466 
1467 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
1468 		   "event mgmt rx status %08x\n", rx_status);
1469 
1470 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1471 		dev_kfree_skb(skb);
1472 		return 0;
1473 	}
1474 
1475 	if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
1476 		dev_kfree_skb(skb);
1477 		return 0;
1478 	}
1479 
1480 	if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
1481 		dev_kfree_skb(skb);
1482 		return 0;
1483 	}
1484 
1485 	if (rx_status & WMI_RX_STATUS_ERR_CRC) {
1486 		dev_kfree_skb(skb);
1487 		return 0;
1488 	}
1489 
1490 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
1491 		status->flag |= RX_FLAG_MMIC_ERROR;
1492 
1493 	/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
1494 	 * MODE_11B. This means phy_mode is not a reliable source for the band
1495 	 * of mgmt rx.
1496 	 */
1497 	if (channel >= 1 && channel <= 14) {
1498 		status->band = IEEE80211_BAND_2GHZ;
1499 	} else if (channel >= 36 && channel <= 165) {
1500 		status->band = IEEE80211_BAND_5GHZ;
1501 	} else {
1502 		/* Shouldn't happen unless list of advertised channels to
1503 		 * mac80211 has been changed.
1504 		 */
1505 		WARN_ON_ONCE(1);
1506 		dev_kfree_skb(skb);
1507 		return 0;
1508 	}
1509 
1510 	if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
1511 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
1512 
1513 	sband = &ar->mac.sbands[status->band];
1514 
1515 	status->freq = ieee80211_channel_to_frequency(channel, status->band);
1516 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
1517 	status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
1518 
1519 	hdr = (struct ieee80211_hdr *)skb->data;
1520 	fc = le16_to_cpu(hdr->frame_control);
1521 
1522 	ath10k_wmi_handle_wep_reauth(ar, skb, status);
1523 
1524 	/* FW delivers WEP Shared Auth frame with Protected Bit set and
1525 	 * encrypted payload. However in case of PMF it delivers decrypted
1526 	 * frames with Protected Bit set. */
1527 	if (ieee80211_has_protected(hdr->frame_control) &&
1528 	    !ieee80211_is_auth(hdr->frame_control)) {
1529 		status->flag |= RX_FLAG_DECRYPTED;
1530 
1531 		if (!ieee80211_is_action(hdr->frame_control) &&
1532 		    !ieee80211_is_deauth(hdr->frame_control) &&
1533 		    !ieee80211_is_disassoc(hdr->frame_control)) {
1534 			status->flag |= RX_FLAG_IV_STRIPPED |
1535 					RX_FLAG_MMIC_STRIPPED;
1536 			hdr->frame_control = __cpu_to_le16(fc &
1537 					~IEEE80211_FCTL_PROTECTED);
1538 		}
1539 	}
1540 
1541 	if (ieee80211_is_beacon(hdr->frame_control))
1542 		ath10k_mac_handle_beacon(ar, skb);
1543 
1544 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
1545 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
1546 		   skb, skb->len,
1547 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
1548 
1549 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
1550 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
1551 		   status->freq, status->band, status->signal,
1552 		   status->rate_idx);
1553 
1554 	ieee80211_rx(ar->hw, skb);
1555 	return 0;
1556 }
1557 
1558 static int freq_to_idx(struct ath10k *ar, int freq)
1559 {
1560 	struct ieee80211_supported_band *sband;
1561 	int band, ch, idx = 0;
1562 
1563 	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
1564 		sband = ar->hw->wiphy->bands[band];
1565 		if (!sband)
1566 			continue;
1567 
1568 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
1569 			if (sband->channels[ch].center_freq == freq)
1570 				goto exit;
1571 	}
1572 
1573 exit:
1574 	return idx;
1575 }
1576 
1577 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
1578 					 struct wmi_ch_info_ev_arg *arg)
1579 {
1580 	struct wmi_chan_info_event *ev = (void *)skb->data;
1581 
1582 	if (skb->len < sizeof(*ev))
1583 		return -EPROTO;
1584 
1585 	skb_pull(skb, sizeof(*ev));
1586 	arg->err_code = ev->err_code;
1587 	arg->freq = ev->freq;
1588 	arg->cmd_flags = ev->cmd_flags;
1589 	arg->noise_floor = ev->noise_floor;
1590 	arg->rx_clear_count = ev->rx_clear_count;
1591 	arg->cycle_count = ev->cycle_count;
1592 
1593 	return 0;
1594 }
1595 
1596 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
1597 {
1598 	struct wmi_ch_info_ev_arg arg = {};
1599 	struct survey_info *survey;
1600 	u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
1601 	int idx, ret;
1602 
1603 	ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
1604 	if (ret) {
1605 		ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
1606 		return;
1607 	}
1608 
1609 	err_code = __le32_to_cpu(arg.err_code);
1610 	freq = __le32_to_cpu(arg.freq);
1611 	cmd_flags = __le32_to_cpu(arg.cmd_flags);
1612 	noise_floor = __le32_to_cpu(arg.noise_floor);
1613 	rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
1614 	cycle_count = __le32_to_cpu(arg.cycle_count);
1615 
1616 	ath10k_dbg(ar, ATH10K_DBG_WMI,
1617 		   "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
1618 		   err_code, freq, cmd_flags, noise_floor, rx_clear_count,
1619 		   cycle_count);
1620 
1621 	spin_lock_bh(&ar->data_lock);
1622 
1623 	switch (ar->scan.state) {
1624 	case ATH10K_SCAN_IDLE:
1625 	case ATH10K_SCAN_STARTING:
1626 		ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
1627 		goto exit;
1628 	case ATH10K_SCAN_RUNNING:
1629 	case ATH10K_SCAN_ABORTING:
1630 		break;
1631 	}
1632 
1633 	idx = freq_to_idx(ar, freq);
1634 	if (idx >= ARRAY_SIZE(ar->survey)) {
1635 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
1636 			    freq, idx);
1637 		goto exit;
1638 	}
1639 
1640 	if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
1641 		if (ar->ch_info_can_report_survey) {
1642 			survey = &ar->survey[idx];
1643 			survey->noise = noise_floor;
1644 			survey->filled = SURVEY_INFO_NOISE_DBM;
1645 
1646 			ath10k_hw_fill_survey_time(ar,
1647 						   survey,
1648 						   cycle_count,
1649 						   rx_clear_count,
1650 						   ar->survey_last_cycle_count,
1651 						   ar->survey_last_rx_clear_count);
1652 		}
1653 
1654 		ar->ch_info_can_report_survey = false;
1655 	} else {
1656 		ar->ch_info_can_report_survey = true;
1657 	}
1658 
1659 	ar->survey_last_rx_clear_count = rx_clear_count;
1660 	ar->survey_last_cycle_count = cycle_count;
1661 
1662 exit:
1663 	spin_unlock_bh(&ar->data_lock);
1664 }
1665 
1666 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
1667 {
1668 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
1669 }
1670 
1671 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
1672 {
1673 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
1674 		   skb->len);
1675 
1676 	trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
1677 
1678 	return 0;
1679 }
1680 
1681 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
1682 				     struct ath10k_fw_stats_pdev *dst)
1683 {
1684 	dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
1685 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
1686 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
1687 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
1688 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
1689 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
1690 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
1691 }
1692 
1693 void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
1694 				   struct ath10k_fw_stats_pdev *dst)
1695 {
1696 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
1697 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
1698 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
1699 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
1700 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
1701 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
1702 	dst->local_freed = __le32_to_cpu(src->local_freed);
1703 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
1704 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
1705 	dst->underrun = __le32_to_cpu(src->underrun);
1706 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
1707 	dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
1708 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
1709 	dst->data_rc = __le32_to_cpu(src->data_rc);
1710 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
1711 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
1712 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
1713 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
1714 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
1715 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
1716 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
1717 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
1718 }
1719 
1720 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
1721 				   struct ath10k_fw_stats_pdev *dst)
1722 {
1723 	dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
1724 	dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
1725 	dst->r0_frags = __le32_to_cpu(src->r0_frags);
1726 	dst->r1_frags = __le32_to_cpu(src->r1_frags);
1727 	dst->r2_frags = __le32_to_cpu(src->r2_frags);
1728 	dst->r3_frags = __le32_to_cpu(src->r3_frags);
1729 	dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
1730 	dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
1731 	dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
1732 	dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
1733 	dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
1734 	dst->phy_errs = __le32_to_cpu(src->phy_errs);
1735 	dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
1736 	dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
1737 }
1738 
1739 void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
1740 				      struct ath10k_fw_stats_pdev *dst)
1741 {
1742 	dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
1743 	dst->rts_bad = __le32_to_cpu(src->rts_bad);
1744 	dst->rts_good = __le32_to_cpu(src->rts_good);
1745 	dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
1746 	dst->no_beacons = __le32_to_cpu(src->no_beacons);
1747 	dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
1748 }
1749 
1750 void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
1751 				struct ath10k_fw_stats_peer *dst)
1752 {
1753 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
1754 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
1755 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
1756 }
1757 
1758 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
1759 					    struct sk_buff *skb,
1760 					    struct ath10k_fw_stats *stats)
1761 {
1762 	const struct wmi_stats_event *ev = (void *)skb->data;
1763 	u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
1764 	int i;
1765 
1766 	if (!skb_pull(skb, sizeof(*ev)))
1767 		return -EPROTO;
1768 
1769 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1770 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1771 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1772 
1773 	for (i = 0; i < num_pdev_stats; i++) {
1774 		const struct wmi_pdev_stats *src;
1775 		struct ath10k_fw_stats_pdev *dst;
1776 
1777 		src = (void *)skb->data;
1778 		if (!skb_pull(skb, sizeof(*src)))
1779 			return -EPROTO;
1780 
1781 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1782 		if (!dst)
1783 			continue;
1784 
1785 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1786 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1787 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1788 
1789 		list_add_tail(&dst->list, &stats->pdevs);
1790 	}
1791 
1792 	/* fw doesn't implement vdev stats */
1793 
1794 	for (i = 0; i < num_peer_stats; i++) {
1795 		const struct wmi_peer_stats *src;
1796 		struct ath10k_fw_stats_peer *dst;
1797 
1798 		src = (void *)skb->data;
1799 		if (!skb_pull(skb, sizeof(*src)))
1800 			return -EPROTO;
1801 
1802 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1803 		if (!dst)
1804 			continue;
1805 
1806 		ath10k_wmi_pull_peer_stats(src, dst);
1807 		list_add_tail(&dst->list, &stats->peers);
1808 	}
1809 
1810 	return 0;
1811 }
1812 
1813 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
1814 					   struct sk_buff *skb,
1815 					   struct ath10k_fw_stats *stats)
1816 {
1817 	const struct wmi_stats_event *ev = (void *)skb->data;
1818 	u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
1819 	int i;
1820 
1821 	if (!skb_pull(skb, sizeof(*ev)))
1822 		return -EPROTO;
1823 
1824 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1825 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1826 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1827 
1828 	for (i = 0; i < num_pdev_stats; i++) {
1829 		const struct wmi_10x_pdev_stats *src;
1830 		struct ath10k_fw_stats_pdev *dst;
1831 
1832 		src = (void *)skb->data;
1833 		if (!skb_pull(skb, sizeof(*src)))
1834 			return -EPROTO;
1835 
1836 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1837 		if (!dst)
1838 			continue;
1839 
1840 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1841 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1842 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1843 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
1844 
1845 		list_add_tail(&dst->list, &stats->pdevs);
1846 	}
1847 
1848 	/* fw doesn't implement vdev stats */
1849 
1850 	for (i = 0; i < num_peer_stats; i++) {
1851 		const struct wmi_10x_peer_stats *src;
1852 		struct ath10k_fw_stats_peer *dst;
1853 
1854 		src = (void *)skb->data;
1855 		if (!skb_pull(skb, sizeof(*src)))
1856 			return -EPROTO;
1857 
1858 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1859 		if (!dst)
1860 			continue;
1861 
1862 		ath10k_wmi_pull_peer_stats(&src->old, dst);
1863 
1864 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1865 
1866 		list_add_tail(&dst->list, &stats->peers);
1867 	}
1868 
1869 	return 0;
1870 }
1871 
1872 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
1873 					    struct sk_buff *skb,
1874 					    struct ath10k_fw_stats *stats)
1875 {
1876 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
1877 	u32 num_pdev_stats;
1878 	u32 num_pdev_ext_stats;
1879 	u32 num_vdev_stats;
1880 	u32 num_peer_stats;
1881 	int i;
1882 
1883 	if (!skb_pull(skb, sizeof(*ev)))
1884 		return -EPROTO;
1885 
1886 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1887 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
1888 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1889 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1890 
1891 	for (i = 0; i < num_pdev_stats; i++) {
1892 		const struct wmi_10_2_pdev_stats *src;
1893 		struct ath10k_fw_stats_pdev *dst;
1894 
1895 		src = (void *)skb->data;
1896 		if (!skb_pull(skb, sizeof(*src)))
1897 			return -EPROTO;
1898 
1899 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1900 		if (!dst)
1901 			continue;
1902 
1903 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1904 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1905 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1906 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
1907 		/* FIXME: expose 10.2 specific values */
1908 
1909 		list_add_tail(&dst->list, &stats->pdevs);
1910 	}
1911 
1912 	for (i = 0; i < num_pdev_ext_stats; i++) {
1913 		const struct wmi_10_2_pdev_ext_stats *src;
1914 
1915 		src = (void *)skb->data;
1916 		if (!skb_pull(skb, sizeof(*src)))
1917 			return -EPROTO;
1918 
1919 		/* FIXME: expose values to userspace
1920 		 *
1921 		 * Note: Even though this loop seems to do nothing it is
1922 		 * required to parse following sub-structures properly.
1923 		 */
1924 	}
1925 
1926 	/* fw doesn't implement vdev stats */
1927 
1928 	for (i = 0; i < num_peer_stats; i++) {
1929 		const struct wmi_10_2_peer_stats *src;
1930 		struct ath10k_fw_stats_peer *dst;
1931 
1932 		src = (void *)skb->data;
1933 		if (!skb_pull(skb, sizeof(*src)))
1934 			return -EPROTO;
1935 
1936 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1937 		if (!dst)
1938 			continue;
1939 
1940 		ath10k_wmi_pull_peer_stats(&src->old, dst);
1941 
1942 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1943 		/* FIXME: expose 10.2 specific values */
1944 
1945 		list_add_tail(&dst->list, &stats->peers);
1946 	}
1947 
1948 	return 0;
1949 }
1950 
1951 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
1952 					      struct sk_buff *skb,
1953 					      struct ath10k_fw_stats *stats)
1954 {
1955 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
1956 	u32 num_pdev_stats;
1957 	u32 num_pdev_ext_stats;
1958 	u32 num_vdev_stats;
1959 	u32 num_peer_stats;
1960 	int i;
1961 
1962 	if (!skb_pull(skb, sizeof(*ev)))
1963 		return -EPROTO;
1964 
1965 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1966 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
1967 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1968 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1969 
1970 	for (i = 0; i < num_pdev_stats; i++) {
1971 		const struct wmi_10_2_pdev_stats *src;
1972 		struct ath10k_fw_stats_pdev *dst;
1973 
1974 		src = (void *)skb->data;
1975 		if (!skb_pull(skb, sizeof(*src)))
1976 			return -EPROTO;
1977 
1978 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1979 		if (!dst)
1980 			continue;
1981 
1982 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1983 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1984 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1985 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
1986 		/* FIXME: expose 10.2 specific values */
1987 
1988 		list_add_tail(&dst->list, &stats->pdevs);
1989 	}
1990 
1991 	for (i = 0; i < num_pdev_ext_stats; i++) {
1992 		const struct wmi_10_2_pdev_ext_stats *src;
1993 
1994 		src = (void *)skb->data;
1995 		if (!skb_pull(skb, sizeof(*src)))
1996 			return -EPROTO;
1997 
1998 		/* FIXME: expose values to userspace
1999 		 *
2000 		 * Note: Even though this loop seems to do nothing it is
2001 		 * required to parse following sub-structures properly.
2002 		 */
2003 	}
2004 
2005 	/* fw doesn't implement vdev stats */
2006 
2007 	for (i = 0; i < num_peer_stats; i++) {
2008 		const struct wmi_10_2_4_peer_stats *src;
2009 		struct ath10k_fw_stats_peer *dst;
2010 
2011 		src = (void *)skb->data;
2012 		if (!skb_pull(skb, sizeof(*src)))
2013 			return -EPROTO;
2014 
2015 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2016 		if (!dst)
2017 			continue;
2018 
2019 		ath10k_wmi_pull_peer_stats(&src->common.old, dst);
2020 
2021 		dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
2022 		/* FIXME: expose 10.2 specific values */
2023 
2024 		list_add_tail(&dst->list, &stats->peers);
2025 	}
2026 
2027 	return 0;
2028 }
2029 
2030 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
2031 {
2032 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
2033 	ath10k_debug_fw_stats_process(ar, skb);
2034 }
2035 
2036 static int
2037 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
2038 				 struct wmi_vdev_start_ev_arg *arg)
2039 {
2040 	struct wmi_vdev_start_response_event *ev = (void *)skb->data;
2041 
2042 	if (skb->len < sizeof(*ev))
2043 		return -EPROTO;
2044 
2045 	skb_pull(skb, sizeof(*ev));
2046 	arg->vdev_id = ev->vdev_id;
2047 	arg->req_id = ev->req_id;
2048 	arg->resp_type = ev->resp_type;
2049 	arg->status = ev->status;
2050 
2051 	return 0;
2052 }
2053 
2054 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
2055 {
2056 	struct wmi_vdev_start_ev_arg arg = {};
2057 	int ret;
2058 
2059 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
2060 
2061 	ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
2062 	if (ret) {
2063 		ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
2064 		return;
2065 	}
2066 
2067 	if (WARN_ON(__le32_to_cpu(arg.status)))
2068 		return;
2069 
2070 	complete(&ar->vdev_setup_done);
2071 }
2072 
2073 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
2074 {
2075 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
2076 	complete(&ar->vdev_setup_done);
2077 }
2078 
2079 static int
2080 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
2081 				struct wmi_peer_kick_ev_arg *arg)
2082 {
2083 	struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
2084 
2085 	if (skb->len < sizeof(*ev))
2086 		return -EPROTO;
2087 
2088 	skb_pull(skb, sizeof(*ev));
2089 	arg->mac_addr = ev->peer_macaddr.addr;
2090 
2091 	return 0;
2092 }
2093 
2094 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
2095 {
2096 	struct wmi_peer_kick_ev_arg arg = {};
2097 	struct ieee80211_sta *sta;
2098 	int ret;
2099 
2100 	ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
2101 	if (ret) {
2102 		ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
2103 			    ret);
2104 		return;
2105 	}
2106 
2107 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
2108 		   arg.mac_addr);
2109 
2110 	rcu_read_lock();
2111 
2112 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
2113 	if (!sta) {
2114 		ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
2115 			    arg.mac_addr);
2116 		goto exit;
2117 	}
2118 
2119 	ieee80211_report_low_ack(sta, 10);
2120 
2121 exit:
2122 	rcu_read_unlock();
2123 }
2124 
2125 /*
2126  * FIXME
2127  *
2128  * We don't report to mac80211 sleep state of connected
2129  * stations. Due to this mac80211 can't fill in TIM IE
2130  * correctly.
2131  *
2132  * I know of no way of getting nullfunc frames that contain
2133  * sleep transition from connected stations - these do not
2134  * seem to be sent from the target to the host. There also
2135  * doesn't seem to be a dedicated event for that. So the
2136  * only way left to do this would be to read tim_bitmap
2137  * during SWBA.
2138  *
2139  * We could probably try using tim_bitmap from SWBA to tell
2140  * mac80211 which stations are asleep and which are not. The
2141  * problem here is calling mac80211 functions so many times
2142  * could take too long and make us miss the time to submit
2143  * the beacon to the target.
2144  *
2145  * So as a workaround we try to extend the TIM IE if there
2146  * is unicast buffered for stations with aid > 7 and fill it
2147  * in ourselves.
2148  */
2149 static void ath10k_wmi_update_tim(struct ath10k *ar,
2150 				  struct ath10k_vif *arvif,
2151 				  struct sk_buff *bcn,
2152 				  const struct wmi_tim_info *tim_info)
2153 {
2154 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
2155 	struct ieee80211_tim_ie *tim;
2156 	u8 *ies, *ie;
2157 	u8 ie_len, pvm_len;
2158 	__le32 t;
2159 	u32 v;
2160 
2161 	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
2162 	 * we must copy the bitmap upon change and reuse it later */
2163 	if (__le32_to_cpu(tim_info->tim_changed)) {
2164 		int i;
2165 
2166 		BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
2167 			     sizeof(tim_info->tim_bitmap));
2168 
2169 		for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
2170 			t = tim_info->tim_bitmap[i / 4];
2171 			v = __le32_to_cpu(t);
2172 			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
2173 		}
2174 
2175 		/* FW reports either length 0 or 16
2176 		 * so we calculate this on our own */
2177 		arvif->u.ap.tim_len = 0;
2178 		for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
2179 			if (arvif->u.ap.tim_bitmap[i])
2180 				arvif->u.ap.tim_len = i;
2181 
2182 		arvif->u.ap.tim_len++;
2183 	}
2184 
2185 	ies = bcn->data;
2186 	ies += ieee80211_hdrlen(hdr->frame_control);
2187 	ies += 12; /* fixed parameters */
2188 
2189 	ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
2190 				    (u8 *)skb_tail_pointer(bcn) - ies);
2191 	if (!ie) {
2192 		if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
2193 			ath10k_warn(ar, "no tim ie found;\n");
2194 		return;
2195 	}
2196 
2197 	tim = (void *)ie + 2;
2198 	ie_len = ie[1];
2199 	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
2200 
2201 	if (pvm_len < arvif->u.ap.tim_len) {
2202 		int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
2203 		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
2204 		void *next_ie = ie + 2 + ie_len;
2205 
2206 		if (skb_put(bcn, expand_size)) {
2207 			memmove(next_ie + expand_size, next_ie, move_size);
2208 
2209 			ie[1] += expand_size;
2210 			ie_len += expand_size;
2211 			pvm_len += expand_size;
2212 		} else {
2213 			ath10k_warn(ar, "tim expansion failed\n");
2214 		}
2215 	}
2216 
2217 	if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
2218 		ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
2219 		return;
2220 	}
2221 
2222 	tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
2223 	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
2224 
2225 	if (tim->dtim_count == 0) {
2226 		ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
2227 
2228 		if (__le32_to_cpu(tim_info->tim_mcast) == 1)
2229 			ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
2230 	}
2231 
2232 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
2233 		   tim->dtim_count, tim->dtim_period,
2234 		   tim->bitmap_ctrl, pvm_len);
2235 }
2236 
2237 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
2238 				  struct sk_buff *bcn,
2239 				  const struct wmi_p2p_noa_info *noa)
2240 {
2241 	if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
2242 		return;
2243 
2244 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
2245 
2246 	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
2247 		ath10k_p2p_noa_update(arvif, noa);
2248 
2249 	if (arvif->u.ap.noa_data)
2250 		if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
2251 			memcpy(skb_put(bcn, arvif->u.ap.noa_len),
2252 			       arvif->u.ap.noa_data,
2253 			       arvif->u.ap.noa_len);
2254 
2255 	return;
2256 }
2257 
2258 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
2259 				      struct wmi_swba_ev_arg *arg)
2260 {
2261 	struct wmi_host_swba_event *ev = (void *)skb->data;
2262 	u32 map;
2263 	size_t i;
2264 
2265 	if (skb->len < sizeof(*ev))
2266 		return -EPROTO;
2267 
2268 	skb_pull(skb, sizeof(*ev));
2269 	arg->vdev_map = ev->vdev_map;
2270 
2271 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
2272 		if (!(map & BIT(0)))
2273 			continue;
2274 
2275 		/* If this happens there were some changes in firmware and
2276 		 * ath10k should update the max size of tim_info array.
2277 		 */
2278 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
2279 			break;
2280 
2281 		arg->tim_info[i] = &ev->bcn_info[i].tim_info;
2282 		arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
2283 		i++;
2284 	}
2285 
2286 	return 0;
2287 }
2288 
2289 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
2290 {
2291 	struct wmi_swba_ev_arg arg = {};
2292 	u32 map;
2293 	int i = -1;
2294 	const struct wmi_tim_info *tim_info;
2295 	const struct wmi_p2p_noa_info *noa_info;
2296 	struct ath10k_vif *arvif;
2297 	struct sk_buff *bcn;
2298 	dma_addr_t paddr;
2299 	int ret, vdev_id = 0;
2300 
2301 	ret = ath10k_wmi_pull_swba(ar, skb, &arg);
2302 	if (ret) {
2303 		ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
2304 		return;
2305 	}
2306 
2307 	map = __le32_to_cpu(arg.vdev_map);
2308 
2309 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
2310 		   map);
2311 
2312 	for (; map; map >>= 1, vdev_id++) {
2313 		if (!(map & 0x1))
2314 			continue;
2315 
2316 		i++;
2317 
2318 		if (i >= WMI_MAX_AP_VDEV) {
2319 			ath10k_warn(ar, "swba has corrupted vdev map\n");
2320 			break;
2321 		}
2322 
2323 		tim_info = arg.tim_info[i];
2324 		noa_info = arg.noa_info[i];
2325 
2326 		ath10k_dbg(ar, ATH10K_DBG_MGMT,
2327 			   "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
2328 			   i,
2329 			   __le32_to_cpu(tim_info->tim_len),
2330 			   __le32_to_cpu(tim_info->tim_mcast),
2331 			   __le32_to_cpu(tim_info->tim_changed),
2332 			   __le32_to_cpu(tim_info->tim_num_ps_pending),
2333 			   __le32_to_cpu(tim_info->tim_bitmap[3]),
2334 			   __le32_to_cpu(tim_info->tim_bitmap[2]),
2335 			   __le32_to_cpu(tim_info->tim_bitmap[1]),
2336 			   __le32_to_cpu(tim_info->tim_bitmap[0]));
2337 
2338 		arvif = ath10k_get_arvif(ar, vdev_id);
2339 		if (arvif == NULL) {
2340 			ath10k_warn(ar, "no vif for vdev_id %d found\n",
2341 				    vdev_id);
2342 			continue;
2343 		}
2344 
2345 		/* There are no completions for beacons so wait for next SWBA
2346 		 * before telling mac80211 to decrement CSA counter
2347 		 *
2348 		 * Once CSA counter is completed stop sending beacons until
2349 		 * actual channel switch is done */
2350 		if (arvif->vif->csa_active &&
2351 		    ieee80211_csa_is_complete(arvif->vif)) {
2352 			ieee80211_csa_finish(arvif->vif);
2353 			continue;
2354 		}
2355 
2356 		bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
2357 		if (!bcn) {
2358 			ath10k_warn(ar, "could not get mac80211 beacon\n");
2359 			continue;
2360 		}
2361 
2362 		ath10k_tx_h_seq_no(arvif->vif, bcn);
2363 		ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
2364 		ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
2365 
2366 		spin_lock_bh(&ar->data_lock);
2367 
2368 		if (arvif->beacon) {
2369 			switch (arvif->beacon_state) {
2370 			case ATH10K_BEACON_SENT:
2371 				break;
2372 			case ATH10K_BEACON_SCHEDULED:
2373 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
2374 					    arvif->vdev_id);
2375 				break;
2376 			case ATH10K_BEACON_SENDING:
2377 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
2378 					    arvif->vdev_id);
2379 				dev_kfree_skb(bcn);
2380 				goto skip;
2381 			}
2382 
2383 			ath10k_mac_vif_beacon_free(arvif);
2384 		}
2385 
2386 		if (!arvif->beacon_buf) {
2387 			paddr = dma_map_single(arvif->ar->dev, bcn->data,
2388 					       bcn->len, DMA_TO_DEVICE);
2389 			ret = dma_mapping_error(arvif->ar->dev, paddr);
2390 			if (ret) {
2391 				ath10k_warn(ar, "failed to map beacon: %d\n",
2392 					    ret);
2393 				dev_kfree_skb_any(bcn);
2394 				goto skip;
2395 			}
2396 
2397 			ATH10K_SKB_CB(bcn)->paddr = paddr;
2398 		} else {
2399 			if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
2400 				ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
2401 					    bcn->len, IEEE80211_MAX_FRAME_LEN);
2402 				skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
2403 			}
2404 			memcpy(arvif->beacon_buf, bcn->data, bcn->len);
2405 			ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
2406 		}
2407 
2408 		arvif->beacon = bcn;
2409 		arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
2410 
2411 		trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
2412 		trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
2413 
2414 skip:
2415 		spin_unlock_bh(&ar->data_lock);
2416 	}
2417 
2418 	ath10k_wmi_tx_beacons_nowait(ar);
2419 }
2420 
2421 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
2422 {
2423 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
2424 }
2425 
2426 static void ath10k_dfs_radar_report(struct ath10k *ar,
2427 				    const struct wmi_phyerr *phyerr,
2428 				    const struct phyerr_radar_report *rr,
2429 				    u64 tsf)
2430 {
2431 	u32 reg0, reg1, tsf32l;
2432 	struct ieee80211_channel *ch;
2433 	struct pulse_event pe;
2434 	u64 tsf64;
2435 	u8 rssi, width;
2436 
2437 	reg0 = __le32_to_cpu(rr->reg0);
2438 	reg1 = __le32_to_cpu(rr->reg1);
2439 
2440 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2441 		   "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
2442 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
2443 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
2444 		   MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
2445 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
2446 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2447 		   "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
2448 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
2449 		   MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
2450 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
2451 		   MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
2452 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
2453 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2454 		   "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
2455 		   MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
2456 		   MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
2457 
2458 	if (!ar->dfs_detector)
2459 		return;
2460 
2461 	spin_lock_bh(&ar->data_lock);
2462 	ch = ar->rx_channel;
2463 	spin_unlock_bh(&ar->data_lock);
2464 
2465 	if (!ch) {
2466 		ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
2467 		goto radar_detected;
2468 	}
2469 
2470 	/* report event to DFS pattern detector */
2471 	tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
2472 	tsf64 = tsf & (~0xFFFFFFFFULL);
2473 	tsf64 |= tsf32l;
2474 
2475 	width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
2476 	rssi = phyerr->rssi_combined;
2477 
2478 	/* hardware store this as 8 bit signed value,
2479 	 * set to zero if negative number
2480 	 */
2481 	if (rssi & 0x80)
2482 		rssi = 0;
2483 
2484 	pe.ts = tsf64;
2485 	pe.freq = ch->center_freq;
2486 	pe.width = width;
2487 	pe.rssi = rssi;
2488 	pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
2489 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2490 		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
2491 		   pe.freq, pe.width, pe.rssi, pe.ts);
2492 
2493 	ATH10K_DFS_STAT_INC(ar, pulses_detected);
2494 
2495 	if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
2496 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2497 			   "dfs no pulse pattern detected, yet\n");
2498 		return;
2499 	}
2500 
2501 radar_detected:
2502 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
2503 	ATH10K_DFS_STAT_INC(ar, radar_detected);
2504 
2505 	/* Control radar events reporting in debugfs file
2506 	   dfs_block_radar_events */
2507 	if (ar->dfs_block_radar_events) {
2508 		ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
2509 		return;
2510 	}
2511 
2512 	ieee80211_radar_detected(ar->hw);
2513 }
2514 
2515 static int ath10k_dfs_fft_report(struct ath10k *ar,
2516 				 const struct wmi_phyerr *phyerr,
2517 				 const struct phyerr_fft_report *fftr,
2518 				 u64 tsf)
2519 {
2520 	u32 reg0, reg1;
2521 	u8 rssi, peak_mag;
2522 
2523 	reg0 = __le32_to_cpu(fftr->reg0);
2524 	reg1 = __le32_to_cpu(fftr->reg1);
2525 	rssi = phyerr->rssi_combined;
2526 
2527 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2528 		   "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
2529 		   MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
2530 		   MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
2531 		   MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
2532 		   MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
2533 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2534 		   "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
2535 		   MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
2536 		   MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
2537 		   MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
2538 		   MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
2539 
2540 	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
2541 
2542 	/* false event detection */
2543 	if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
2544 	    peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
2545 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
2546 		ATH10K_DFS_STAT_INC(ar, pulses_discarded);
2547 		return -EINVAL;
2548 	}
2549 
2550 	return 0;
2551 }
2552 
2553 void ath10k_wmi_event_dfs(struct ath10k *ar,
2554 			  const struct wmi_phyerr *phyerr,
2555 			  u64 tsf)
2556 {
2557 	int buf_len, tlv_len, res, i = 0;
2558 	const struct phyerr_tlv *tlv;
2559 	const struct phyerr_radar_report *rr;
2560 	const struct phyerr_fft_report *fftr;
2561 	const u8 *tlv_buf;
2562 
2563 	buf_len = __le32_to_cpu(phyerr->buf_len);
2564 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2565 		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
2566 		   phyerr->phy_err_code, phyerr->rssi_combined,
2567 		   __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
2568 
2569 	/* Skip event if DFS disabled */
2570 	if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
2571 		return;
2572 
2573 	ATH10K_DFS_STAT_INC(ar, pulses_total);
2574 
2575 	while (i < buf_len) {
2576 		if (i + sizeof(*tlv) > buf_len) {
2577 			ath10k_warn(ar, "too short buf for tlv header (%d)\n",
2578 				    i);
2579 			return;
2580 		}
2581 
2582 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
2583 		tlv_len = __le16_to_cpu(tlv->len);
2584 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
2585 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
2586 			   "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
2587 			   tlv_len, tlv->tag, tlv->sig);
2588 
2589 		switch (tlv->tag) {
2590 		case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
2591 			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
2592 				ath10k_warn(ar, "too short radar pulse summary (%d)\n",
2593 					    i);
2594 				return;
2595 			}
2596 
2597 			rr = (struct phyerr_radar_report *)tlv_buf;
2598 			ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
2599 			break;
2600 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
2601 			if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
2602 				ath10k_warn(ar, "too short fft report (%d)\n",
2603 					    i);
2604 				return;
2605 			}
2606 
2607 			fftr = (struct phyerr_fft_report *)tlv_buf;
2608 			res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
2609 			if (res)
2610 				return;
2611 			break;
2612 		}
2613 
2614 		i += sizeof(*tlv) + tlv_len;
2615 	}
2616 }
2617 
2618 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
2619 				    const struct wmi_phyerr *phyerr,
2620 				    u64 tsf)
2621 {
2622 	int buf_len, tlv_len, res, i = 0;
2623 	struct phyerr_tlv *tlv;
2624 	const void *tlv_buf;
2625 	const struct phyerr_fft_report *fftr;
2626 	size_t fftr_len;
2627 
2628 	buf_len = __le32_to_cpu(phyerr->buf_len);
2629 
2630 	while (i < buf_len) {
2631 		if (i + sizeof(*tlv) > buf_len) {
2632 			ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
2633 				    i);
2634 			return;
2635 		}
2636 
2637 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
2638 		tlv_len = __le16_to_cpu(tlv->len);
2639 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
2640 
2641 		if (i + sizeof(*tlv) + tlv_len > buf_len) {
2642 			ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
2643 				    i);
2644 			return;
2645 		}
2646 
2647 		switch (tlv->tag) {
2648 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
2649 			if (sizeof(*fftr) > tlv_len) {
2650 				ath10k_warn(ar, "failed to parse fft report at byte %d\n",
2651 					    i);
2652 				return;
2653 			}
2654 
2655 			fftr_len = tlv_len - sizeof(*fftr);
2656 			fftr = tlv_buf;
2657 			res = ath10k_spectral_process_fft(ar, phyerr,
2658 							  fftr, fftr_len,
2659 							  tsf);
2660 			if (res < 0) {
2661 				ath10k_warn(ar, "failed to process fft report: %d\n",
2662 					    res);
2663 				return;
2664 			}
2665 			break;
2666 		}
2667 
2668 		i += sizeof(*tlv) + tlv_len;
2669 	}
2670 }
2671 
2672 static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
2673 					struct wmi_phyerr_ev_arg *arg)
2674 {
2675 	struct wmi_phyerr_event *ev = (void *)skb->data;
2676 
2677 	if (skb->len < sizeof(*ev))
2678 		return -EPROTO;
2679 
2680 	arg->num_phyerrs = ev->num_phyerrs;
2681 	arg->tsf_l32 = ev->tsf_l32;
2682 	arg->tsf_u32 = ev->tsf_u32;
2683 	arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
2684 	arg->phyerrs = ev->phyerrs;
2685 
2686 	return 0;
2687 }
2688 
2689 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
2690 {
2691 	struct wmi_phyerr_ev_arg arg = {};
2692 	const struct wmi_phyerr *phyerr;
2693 	u32 count, i, buf_len, phy_err_code;
2694 	u64 tsf;
2695 	int left_len, ret;
2696 
2697 	ATH10K_DFS_STAT_INC(ar, phy_errors);
2698 
2699 	ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
2700 	if (ret) {
2701 		ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
2702 		return;
2703 	}
2704 
2705 	left_len = __le32_to_cpu(arg.buf_len);
2706 
2707 	/* Check number of included events */
2708 	count = __le32_to_cpu(arg.num_phyerrs);
2709 
2710 	tsf = __le32_to_cpu(arg.tsf_u32);
2711 	tsf <<= 32;
2712 	tsf |= __le32_to_cpu(arg.tsf_l32);
2713 
2714 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2715 		   "wmi event phyerr count %d tsf64 0x%llX\n",
2716 		   count, tsf);
2717 
2718 	phyerr = arg.phyerrs;
2719 	for (i = 0; i < count; i++) {
2720 		/* Check if we can read event header */
2721 		if (left_len < sizeof(*phyerr)) {
2722 			ath10k_warn(ar, "single event (%d) wrong head len\n",
2723 				    i);
2724 			return;
2725 		}
2726 
2727 		left_len -= sizeof(*phyerr);
2728 
2729 		buf_len = __le32_to_cpu(phyerr->buf_len);
2730 		phy_err_code = phyerr->phy_err_code;
2731 
2732 		if (left_len < buf_len) {
2733 			ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
2734 			return;
2735 		}
2736 
2737 		left_len -= buf_len;
2738 
2739 		switch (phy_err_code) {
2740 		case PHY_ERROR_RADAR:
2741 			ath10k_wmi_event_dfs(ar, phyerr, tsf);
2742 			break;
2743 		case PHY_ERROR_SPECTRAL_SCAN:
2744 			ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
2745 			break;
2746 		case PHY_ERROR_FALSE_RADAR_EXT:
2747 			ath10k_wmi_event_dfs(ar, phyerr, tsf);
2748 			ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
2749 			break;
2750 		default:
2751 			break;
2752 		}
2753 
2754 		phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
2755 	}
2756 }
2757 
2758 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
2759 {
2760 	struct wmi_roam_ev_arg arg = {};
2761 	int ret;
2762 	u32 vdev_id;
2763 	u32 reason;
2764 	s32 rssi;
2765 
2766 	ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
2767 	if (ret) {
2768 		ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
2769 		return;
2770 	}
2771 
2772 	vdev_id = __le32_to_cpu(arg.vdev_id);
2773 	reason = __le32_to_cpu(arg.reason);
2774 	rssi = __le32_to_cpu(arg.rssi);
2775 	rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
2776 
2777 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2778 		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
2779 		   vdev_id, reason, rssi);
2780 
2781 	if (reason >= WMI_ROAM_REASON_MAX)
2782 		ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
2783 			    reason, vdev_id);
2784 
2785 	switch (reason) {
2786 	case WMI_ROAM_REASON_BEACON_MISS:
2787 		ath10k_mac_handle_beacon_miss(ar, vdev_id);
2788 		break;
2789 	case WMI_ROAM_REASON_BETTER_AP:
2790 	case WMI_ROAM_REASON_LOW_RSSI:
2791 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
2792 	case WMI_ROAM_REASON_HO_FAILED:
2793 		ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
2794 			    reason, vdev_id);
2795 		break;
2796 	}
2797 }
2798 
2799 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
2800 {
2801 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
2802 }
2803 
2804 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
2805 {
2806 	char buf[101], c;
2807 	int i;
2808 
2809 	for (i = 0; i < sizeof(buf) - 1; i++) {
2810 		if (i >= skb->len)
2811 			break;
2812 
2813 		c = skb->data[i];
2814 
2815 		if (c == '\0')
2816 			break;
2817 
2818 		if (isascii(c) && isprint(c))
2819 			buf[i] = c;
2820 		else
2821 			buf[i] = '.';
2822 	}
2823 
2824 	if (i == sizeof(buf) - 1)
2825 		ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
2826 
2827 	/* for some reason the debug prints end with \n, remove that */
2828 	if (skb->data[i - 1] == '\n')
2829 		i--;
2830 
2831 	/* the last byte is always reserved for the null character */
2832 	buf[i] = '\0';
2833 
2834 	ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
2835 }
2836 
2837 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
2838 {
2839 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
2840 }
2841 
2842 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
2843 {
2844 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
2845 }
2846 
2847 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
2848 					     struct sk_buff *skb)
2849 {
2850 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
2851 }
2852 
2853 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
2854 					     struct sk_buff *skb)
2855 {
2856 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
2857 }
2858 
2859 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
2860 {
2861 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
2862 }
2863 
2864 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
2865 {
2866 	struct wmi_wow_ev_arg ev = {};
2867 	int ret;
2868 
2869 	complete(&ar->wow.wakeup_completed);
2870 
2871 	ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
2872 	if (ret) {
2873 		ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
2874 		return;
2875 	}
2876 
2877 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
2878 		   wow_reason(ev.wake_reason));
2879 }
2880 
2881 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
2882 {
2883 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
2884 }
2885 
2886 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
2887 {
2888 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
2889 }
2890 
2891 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
2892 {
2893 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
2894 }
2895 
2896 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
2897 {
2898 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
2899 }
2900 
2901 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
2902 {
2903 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
2904 }
2905 
2906 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
2907 {
2908 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
2909 }
2910 
2911 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
2912 {
2913 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
2914 }
2915 
2916 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
2917 						struct sk_buff *skb)
2918 {
2919 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
2920 }
2921 
2922 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
2923 {
2924 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
2925 }
2926 
2927 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
2928 {
2929 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
2930 }
2931 
2932 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
2933 {
2934 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
2935 }
2936 
2937 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
2938 				     u32 num_units, u32 unit_len)
2939 {
2940 	dma_addr_t paddr;
2941 	u32 pool_size;
2942 	int idx = ar->wmi.num_mem_chunks;
2943 
2944 	pool_size = num_units * round_up(unit_len, 4);
2945 
2946 	if (!pool_size)
2947 		return -EINVAL;
2948 
2949 	ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
2950 							   pool_size,
2951 							   &paddr,
2952 							   GFP_ATOMIC);
2953 	if (!ar->wmi.mem_chunks[idx].vaddr) {
2954 		ath10k_warn(ar, "failed to allocate memory chunk\n");
2955 		return -ENOMEM;
2956 	}
2957 
2958 	memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
2959 
2960 	ar->wmi.mem_chunks[idx].paddr = paddr;
2961 	ar->wmi.mem_chunks[idx].len = pool_size;
2962 	ar->wmi.mem_chunks[idx].req_id = req_id;
2963 	ar->wmi.num_mem_chunks++;
2964 
2965 	return 0;
2966 }
2967 
2968 static int
2969 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
2970 				   struct wmi_svc_rdy_ev_arg *arg)
2971 {
2972 	struct wmi_service_ready_event *ev;
2973 	size_t i, n;
2974 
2975 	if (skb->len < sizeof(*ev))
2976 		return -EPROTO;
2977 
2978 	ev = (void *)skb->data;
2979 	skb_pull(skb, sizeof(*ev));
2980 	arg->min_tx_power = ev->hw_min_tx_power;
2981 	arg->max_tx_power = ev->hw_max_tx_power;
2982 	arg->ht_cap = ev->ht_cap_info;
2983 	arg->vht_cap = ev->vht_cap_info;
2984 	arg->sw_ver0 = ev->sw_version;
2985 	arg->sw_ver1 = ev->sw_version_1;
2986 	arg->phy_capab = ev->phy_capability;
2987 	arg->num_rf_chains = ev->num_rf_chains;
2988 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
2989 	arg->num_mem_reqs = ev->num_mem_reqs;
2990 	arg->service_map = ev->wmi_service_bitmap;
2991 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
2992 
2993 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
2994 		  ARRAY_SIZE(arg->mem_reqs));
2995 	for (i = 0; i < n; i++)
2996 		arg->mem_reqs[i] = &ev->mem_reqs[i];
2997 
2998 	if (skb->len <
2999 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
3000 		return -EPROTO;
3001 
3002 	return 0;
3003 }
3004 
3005 static int
3006 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
3007 				  struct wmi_svc_rdy_ev_arg *arg)
3008 {
3009 	struct wmi_10x_service_ready_event *ev;
3010 	int i, n;
3011 
3012 	if (skb->len < sizeof(*ev))
3013 		return -EPROTO;
3014 
3015 	ev = (void *)skb->data;
3016 	skb_pull(skb, sizeof(*ev));
3017 	arg->min_tx_power = ev->hw_min_tx_power;
3018 	arg->max_tx_power = ev->hw_max_tx_power;
3019 	arg->ht_cap = ev->ht_cap_info;
3020 	arg->vht_cap = ev->vht_cap_info;
3021 	arg->sw_ver0 = ev->sw_version;
3022 	arg->phy_capab = ev->phy_capability;
3023 	arg->num_rf_chains = ev->num_rf_chains;
3024 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
3025 	arg->num_mem_reqs = ev->num_mem_reqs;
3026 	arg->service_map = ev->wmi_service_bitmap;
3027 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
3028 
3029 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
3030 		  ARRAY_SIZE(arg->mem_reqs));
3031 	for (i = 0; i < n; i++)
3032 		arg->mem_reqs[i] = &ev->mem_reqs[i];
3033 
3034 	if (skb->len <
3035 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
3036 		return -EPROTO;
3037 
3038 	return 0;
3039 }
3040 
3041 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
3042 {
3043 	struct wmi_svc_rdy_ev_arg arg = {};
3044 	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
3045 	int ret;
3046 
3047 	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
3048 	if (ret) {
3049 		ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
3050 		return;
3051 	}
3052 
3053 	memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
3054 	ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
3055 			   arg.service_map_len);
3056 
3057 	ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
3058 	ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
3059 	ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
3060 	ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
3061 	ar->fw_version_major =
3062 		(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
3063 	ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
3064 	ar->fw_version_release =
3065 		(__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
3066 	ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
3067 	ar->phy_capability = __le32_to_cpu(arg.phy_capab);
3068 	ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
3069 	ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
3070 
3071 	ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
3072 			arg.service_map, arg.service_map_len);
3073 
3074 	/* only manually set fw features when not using FW IE format */
3075 	if (ar->fw_api == 1 && ar->fw_version_build > 636)
3076 		set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
3077 
3078 	if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
3079 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
3080 			    ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
3081 		ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
3082 	}
3083 
3084 	ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
3085 	ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
3086 
3087 	if (strlen(ar->hw->wiphy->fw_version) == 0) {
3088 		snprintf(ar->hw->wiphy->fw_version,
3089 			 sizeof(ar->hw->wiphy->fw_version),
3090 			 "%u.%u.%u.%u",
3091 			 ar->fw_version_major,
3092 			 ar->fw_version_minor,
3093 			 ar->fw_version_release,
3094 			 ar->fw_version_build);
3095 	}
3096 
3097 	num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
3098 	if (num_mem_reqs > WMI_MAX_MEM_REQS) {
3099 		ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
3100 			    num_mem_reqs);
3101 		return;
3102 	}
3103 
3104 	for (i = 0; i < num_mem_reqs; ++i) {
3105 		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
3106 		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
3107 		unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
3108 		num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
3109 
3110 		if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
3111 			/* number of units to allocate is number of
3112 			 * peers, 1 extra for self peer on target */
3113 			/* this needs to be tied, host and target
3114 			 * can get out of sync */
3115 			num_units = TARGET_10X_NUM_PEERS + 1;
3116 		else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
3117 			num_units = TARGET_10X_NUM_VDEVS + 1;
3118 
3119 		ath10k_dbg(ar, ATH10K_DBG_WMI,
3120 			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
3121 			   req_id,
3122 			   __le32_to_cpu(arg.mem_reqs[i]->num_units),
3123 			   num_unit_info,
3124 			   unit_size,
3125 			   num_units);
3126 
3127 		ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
3128 						unit_size);
3129 		if (ret)
3130 			return;
3131 	}
3132 
3133 	ath10k_dbg(ar, ATH10K_DBG_WMI,
3134 		   "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
3135 		   __le32_to_cpu(arg.min_tx_power),
3136 		   __le32_to_cpu(arg.max_tx_power),
3137 		   __le32_to_cpu(arg.ht_cap),
3138 		   __le32_to_cpu(arg.vht_cap),
3139 		   __le32_to_cpu(arg.sw_ver0),
3140 		   __le32_to_cpu(arg.sw_ver1),
3141 		   __le32_to_cpu(arg.fw_build),
3142 		   __le32_to_cpu(arg.phy_capab),
3143 		   __le32_to_cpu(arg.num_rf_chains),
3144 		   __le32_to_cpu(arg.eeprom_rd),
3145 		   __le32_to_cpu(arg.num_mem_reqs));
3146 
3147 	complete(&ar->wmi.service_ready);
3148 }
3149 
3150 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
3151 				     struct wmi_rdy_ev_arg *arg)
3152 {
3153 	struct wmi_ready_event *ev = (void *)skb->data;
3154 
3155 	if (skb->len < sizeof(*ev))
3156 		return -EPROTO;
3157 
3158 	skb_pull(skb, sizeof(*ev));
3159 	arg->sw_version = ev->sw_version;
3160 	arg->abi_version = ev->abi_version;
3161 	arg->status = ev->status;
3162 	arg->mac_addr = ev->mac_addr.addr;
3163 
3164 	return 0;
3165 }
3166 
3167 static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
3168 				      struct wmi_roam_ev_arg *arg)
3169 {
3170 	struct wmi_roam_ev *ev = (void *)skb->data;
3171 
3172 	if (skb->len < sizeof(*ev))
3173 		return -EPROTO;
3174 
3175 	skb_pull(skb, sizeof(*ev));
3176 	arg->vdev_id = ev->vdev_id;
3177 	arg->reason = ev->reason;
3178 
3179 	return 0;
3180 }
3181 
3182 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
3183 {
3184 	struct wmi_rdy_ev_arg arg = {};
3185 	int ret;
3186 
3187 	ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
3188 	if (ret) {
3189 		ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
3190 		return ret;
3191 	}
3192 
3193 	ath10k_dbg(ar, ATH10K_DBG_WMI,
3194 		   "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
3195 		   __le32_to_cpu(arg.sw_version),
3196 		   __le32_to_cpu(arg.abi_version),
3197 		   arg.mac_addr,
3198 		   __le32_to_cpu(arg.status));
3199 
3200 	ether_addr_copy(ar->mac_addr, arg.mac_addr);
3201 	complete(&ar->wmi.unified_ready);
3202 	return 0;
3203 }
3204 
3205 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
3206 {
3207 	const struct wmi_pdev_temperature_event *ev;
3208 
3209 	ev = (struct wmi_pdev_temperature_event *)skb->data;
3210 	if (WARN_ON(skb->len < sizeof(*ev)))
3211 		return -EPROTO;
3212 
3213 	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
3214 	return 0;
3215 }
3216 
3217 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
3218 {
3219 	struct wmi_cmd_hdr *cmd_hdr;
3220 	enum wmi_event_id id;
3221 
3222 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
3223 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
3224 
3225 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
3226 		goto out;
3227 
3228 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
3229 
3230 	switch (id) {
3231 	case WMI_MGMT_RX_EVENTID:
3232 		ath10k_wmi_event_mgmt_rx(ar, skb);
3233 		/* mgmt_rx() owns the skb now! */
3234 		return;
3235 	case WMI_SCAN_EVENTID:
3236 		ath10k_wmi_event_scan(ar, skb);
3237 		break;
3238 	case WMI_CHAN_INFO_EVENTID:
3239 		ath10k_wmi_event_chan_info(ar, skb);
3240 		break;
3241 	case WMI_ECHO_EVENTID:
3242 		ath10k_wmi_event_echo(ar, skb);
3243 		break;
3244 	case WMI_DEBUG_MESG_EVENTID:
3245 		ath10k_wmi_event_debug_mesg(ar, skb);
3246 		break;
3247 	case WMI_UPDATE_STATS_EVENTID:
3248 		ath10k_wmi_event_update_stats(ar, skb);
3249 		break;
3250 	case WMI_VDEV_START_RESP_EVENTID:
3251 		ath10k_wmi_event_vdev_start_resp(ar, skb);
3252 		break;
3253 	case WMI_VDEV_STOPPED_EVENTID:
3254 		ath10k_wmi_event_vdev_stopped(ar, skb);
3255 		break;
3256 	case WMI_PEER_STA_KICKOUT_EVENTID:
3257 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
3258 		break;
3259 	case WMI_HOST_SWBA_EVENTID:
3260 		ath10k_wmi_event_host_swba(ar, skb);
3261 		break;
3262 	case WMI_TBTTOFFSET_UPDATE_EVENTID:
3263 		ath10k_wmi_event_tbttoffset_update(ar, skb);
3264 		break;
3265 	case WMI_PHYERR_EVENTID:
3266 		ath10k_wmi_event_phyerr(ar, skb);
3267 		break;
3268 	case WMI_ROAM_EVENTID:
3269 		ath10k_wmi_event_roam(ar, skb);
3270 		break;
3271 	case WMI_PROFILE_MATCH:
3272 		ath10k_wmi_event_profile_match(ar, skb);
3273 		break;
3274 	case WMI_DEBUG_PRINT_EVENTID:
3275 		ath10k_wmi_event_debug_print(ar, skb);
3276 		break;
3277 	case WMI_PDEV_QVIT_EVENTID:
3278 		ath10k_wmi_event_pdev_qvit(ar, skb);
3279 		break;
3280 	case WMI_WLAN_PROFILE_DATA_EVENTID:
3281 		ath10k_wmi_event_wlan_profile_data(ar, skb);
3282 		break;
3283 	case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
3284 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
3285 		break;
3286 	case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
3287 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
3288 		break;
3289 	case WMI_RTT_ERROR_REPORT_EVENTID:
3290 		ath10k_wmi_event_rtt_error_report(ar, skb);
3291 		break;
3292 	case WMI_WOW_WAKEUP_HOST_EVENTID:
3293 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
3294 		break;
3295 	case WMI_DCS_INTERFERENCE_EVENTID:
3296 		ath10k_wmi_event_dcs_interference(ar, skb);
3297 		break;
3298 	case WMI_PDEV_TPC_CONFIG_EVENTID:
3299 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
3300 		break;
3301 	case WMI_PDEV_FTM_INTG_EVENTID:
3302 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
3303 		break;
3304 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
3305 		ath10k_wmi_event_gtk_offload_status(ar, skb);
3306 		break;
3307 	case WMI_GTK_REKEY_FAIL_EVENTID:
3308 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
3309 		break;
3310 	case WMI_TX_DELBA_COMPLETE_EVENTID:
3311 		ath10k_wmi_event_delba_complete(ar, skb);
3312 		break;
3313 	case WMI_TX_ADDBA_COMPLETE_EVENTID:
3314 		ath10k_wmi_event_addba_complete(ar, skb);
3315 		break;
3316 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
3317 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
3318 		break;
3319 	case WMI_SERVICE_READY_EVENTID:
3320 		ath10k_wmi_event_service_ready(ar, skb);
3321 		break;
3322 	case WMI_READY_EVENTID:
3323 		ath10k_wmi_event_ready(ar, skb);
3324 		break;
3325 	default:
3326 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
3327 		break;
3328 	}
3329 
3330 out:
3331 	dev_kfree_skb(skb);
3332 }
3333 
3334 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
3335 {
3336 	struct wmi_cmd_hdr *cmd_hdr;
3337 	enum wmi_10x_event_id id;
3338 	bool consumed;
3339 
3340 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
3341 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
3342 
3343 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
3344 		goto out;
3345 
3346 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
3347 
3348 	consumed = ath10k_tm_event_wmi(ar, id, skb);
3349 
3350 	/* Ready event must be handled normally also in UTF mode so that we
3351 	 * know the UTF firmware has booted, others we are just bypass WMI
3352 	 * events to testmode.
3353 	 */
3354 	if (consumed && id != WMI_10X_READY_EVENTID) {
3355 		ath10k_dbg(ar, ATH10K_DBG_WMI,
3356 			   "wmi testmode consumed 0x%x\n", id);
3357 		goto out;
3358 	}
3359 
3360 	switch (id) {
3361 	case WMI_10X_MGMT_RX_EVENTID:
3362 		ath10k_wmi_event_mgmt_rx(ar, skb);
3363 		/* mgmt_rx() owns the skb now! */
3364 		return;
3365 	case WMI_10X_SCAN_EVENTID:
3366 		ath10k_wmi_event_scan(ar, skb);
3367 		break;
3368 	case WMI_10X_CHAN_INFO_EVENTID:
3369 		ath10k_wmi_event_chan_info(ar, skb);
3370 		break;
3371 	case WMI_10X_ECHO_EVENTID:
3372 		ath10k_wmi_event_echo(ar, skb);
3373 		break;
3374 	case WMI_10X_DEBUG_MESG_EVENTID:
3375 		ath10k_wmi_event_debug_mesg(ar, skb);
3376 		break;
3377 	case WMI_10X_UPDATE_STATS_EVENTID:
3378 		ath10k_wmi_event_update_stats(ar, skb);
3379 		break;
3380 	case WMI_10X_VDEV_START_RESP_EVENTID:
3381 		ath10k_wmi_event_vdev_start_resp(ar, skb);
3382 		break;
3383 	case WMI_10X_VDEV_STOPPED_EVENTID:
3384 		ath10k_wmi_event_vdev_stopped(ar, skb);
3385 		break;
3386 	case WMI_10X_PEER_STA_KICKOUT_EVENTID:
3387 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
3388 		break;
3389 	case WMI_10X_HOST_SWBA_EVENTID:
3390 		ath10k_wmi_event_host_swba(ar, skb);
3391 		break;
3392 	case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
3393 		ath10k_wmi_event_tbttoffset_update(ar, skb);
3394 		break;
3395 	case WMI_10X_PHYERR_EVENTID:
3396 		ath10k_wmi_event_phyerr(ar, skb);
3397 		break;
3398 	case WMI_10X_ROAM_EVENTID:
3399 		ath10k_wmi_event_roam(ar, skb);
3400 		break;
3401 	case WMI_10X_PROFILE_MATCH:
3402 		ath10k_wmi_event_profile_match(ar, skb);
3403 		break;
3404 	case WMI_10X_DEBUG_PRINT_EVENTID:
3405 		ath10k_wmi_event_debug_print(ar, skb);
3406 		break;
3407 	case WMI_10X_PDEV_QVIT_EVENTID:
3408 		ath10k_wmi_event_pdev_qvit(ar, skb);
3409 		break;
3410 	case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
3411 		ath10k_wmi_event_wlan_profile_data(ar, skb);
3412 		break;
3413 	case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
3414 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
3415 		break;
3416 	case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
3417 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
3418 		break;
3419 	case WMI_10X_RTT_ERROR_REPORT_EVENTID:
3420 		ath10k_wmi_event_rtt_error_report(ar, skb);
3421 		break;
3422 	case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
3423 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
3424 		break;
3425 	case WMI_10X_DCS_INTERFERENCE_EVENTID:
3426 		ath10k_wmi_event_dcs_interference(ar, skb);
3427 		break;
3428 	case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
3429 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
3430 		break;
3431 	case WMI_10X_INST_RSSI_STATS_EVENTID:
3432 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
3433 		break;
3434 	case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
3435 		ath10k_wmi_event_vdev_standby_req(ar, skb);
3436 		break;
3437 	case WMI_10X_VDEV_RESUME_REQ_EVENTID:
3438 		ath10k_wmi_event_vdev_resume_req(ar, skb);
3439 		break;
3440 	case WMI_10X_SERVICE_READY_EVENTID:
3441 		ath10k_wmi_event_service_ready(ar, skb);
3442 		break;
3443 	case WMI_10X_READY_EVENTID:
3444 		ath10k_wmi_event_ready(ar, skb);
3445 		break;
3446 	case WMI_10X_PDEV_UTF_EVENTID:
3447 		/* ignore utf events */
3448 		break;
3449 	default:
3450 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
3451 		break;
3452 	}
3453 
3454 out:
3455 	dev_kfree_skb(skb);
3456 }
3457 
3458 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
3459 {
3460 	struct wmi_cmd_hdr *cmd_hdr;
3461 	enum wmi_10_2_event_id id;
3462 
3463 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
3464 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
3465 
3466 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
3467 		goto out;
3468 
3469 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
3470 
3471 	switch (id) {
3472 	case WMI_10_2_MGMT_RX_EVENTID:
3473 		ath10k_wmi_event_mgmt_rx(ar, skb);
3474 		/* mgmt_rx() owns the skb now! */
3475 		return;
3476 	case WMI_10_2_SCAN_EVENTID:
3477 		ath10k_wmi_event_scan(ar, skb);
3478 		break;
3479 	case WMI_10_2_CHAN_INFO_EVENTID:
3480 		ath10k_wmi_event_chan_info(ar, skb);
3481 		break;
3482 	case WMI_10_2_ECHO_EVENTID:
3483 		ath10k_wmi_event_echo(ar, skb);
3484 		break;
3485 	case WMI_10_2_DEBUG_MESG_EVENTID:
3486 		ath10k_wmi_event_debug_mesg(ar, skb);
3487 		break;
3488 	case WMI_10_2_UPDATE_STATS_EVENTID:
3489 		ath10k_wmi_event_update_stats(ar, skb);
3490 		break;
3491 	case WMI_10_2_VDEV_START_RESP_EVENTID:
3492 		ath10k_wmi_event_vdev_start_resp(ar, skb);
3493 		break;
3494 	case WMI_10_2_VDEV_STOPPED_EVENTID:
3495 		ath10k_wmi_event_vdev_stopped(ar, skb);
3496 		break;
3497 	case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
3498 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
3499 		break;
3500 	case WMI_10_2_HOST_SWBA_EVENTID:
3501 		ath10k_wmi_event_host_swba(ar, skb);
3502 		break;
3503 	case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
3504 		ath10k_wmi_event_tbttoffset_update(ar, skb);
3505 		break;
3506 	case WMI_10_2_PHYERR_EVENTID:
3507 		ath10k_wmi_event_phyerr(ar, skb);
3508 		break;
3509 	case WMI_10_2_ROAM_EVENTID:
3510 		ath10k_wmi_event_roam(ar, skb);
3511 		break;
3512 	case WMI_10_2_PROFILE_MATCH:
3513 		ath10k_wmi_event_profile_match(ar, skb);
3514 		break;
3515 	case WMI_10_2_DEBUG_PRINT_EVENTID:
3516 		ath10k_wmi_event_debug_print(ar, skb);
3517 		break;
3518 	case WMI_10_2_PDEV_QVIT_EVENTID:
3519 		ath10k_wmi_event_pdev_qvit(ar, skb);
3520 		break;
3521 	case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
3522 		ath10k_wmi_event_wlan_profile_data(ar, skb);
3523 		break;
3524 	case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
3525 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
3526 		break;
3527 	case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
3528 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
3529 		break;
3530 	case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
3531 		ath10k_wmi_event_rtt_error_report(ar, skb);
3532 		break;
3533 	case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
3534 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
3535 		break;
3536 	case WMI_10_2_DCS_INTERFERENCE_EVENTID:
3537 		ath10k_wmi_event_dcs_interference(ar, skb);
3538 		break;
3539 	case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
3540 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
3541 		break;
3542 	case WMI_10_2_INST_RSSI_STATS_EVENTID:
3543 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
3544 		break;
3545 	case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
3546 		ath10k_wmi_event_vdev_standby_req(ar, skb);
3547 		break;
3548 	case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
3549 		ath10k_wmi_event_vdev_resume_req(ar, skb);
3550 		break;
3551 	case WMI_10_2_SERVICE_READY_EVENTID:
3552 		ath10k_wmi_event_service_ready(ar, skb);
3553 		break;
3554 	case WMI_10_2_READY_EVENTID:
3555 		ath10k_wmi_event_ready(ar, skb);
3556 		break;
3557 	case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
3558 		ath10k_wmi_event_temperature(ar, skb);
3559 		break;
3560 	case WMI_10_2_RTT_KEEPALIVE_EVENTID:
3561 	case WMI_10_2_GPIO_INPUT_EVENTID:
3562 	case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
3563 	case WMI_10_2_GENERIC_BUFFER_EVENTID:
3564 	case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
3565 	case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
3566 	case WMI_10_2_WDS_PEER_EVENTID:
3567 		ath10k_dbg(ar, ATH10K_DBG_WMI,
3568 			   "received event id %d not implemented\n", id);
3569 		break;
3570 	default:
3571 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
3572 		break;
3573 	}
3574 
3575 out:
3576 	dev_kfree_skb(skb);
3577 }
3578 
3579 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
3580 {
3581 	int ret;
3582 
3583 	ret = ath10k_wmi_rx(ar, skb);
3584 	if (ret)
3585 		ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
3586 }
3587 
3588 int ath10k_wmi_connect(struct ath10k *ar)
3589 {
3590 	int status;
3591 	struct ath10k_htc_svc_conn_req conn_req;
3592 	struct ath10k_htc_svc_conn_resp conn_resp;
3593 
3594 	memset(&conn_req, 0, sizeof(conn_req));
3595 	memset(&conn_resp, 0, sizeof(conn_resp));
3596 
3597 	/* these fields are the same for all service endpoints */
3598 	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
3599 	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
3600 	conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
3601 
3602 	/* connect to control service */
3603 	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
3604 
3605 	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
3606 	if (status) {
3607 		ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
3608 			    status);
3609 		return status;
3610 	}
3611 
3612 	ar->wmi.eid = conn_resp.eid;
3613 	return 0;
3614 }
3615 
3616 static struct sk_buff *
3617 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
3618 			      u16 ctl2g, u16 ctl5g,
3619 			      enum wmi_dfs_region dfs_reg)
3620 {
3621 	struct wmi_pdev_set_regdomain_cmd *cmd;
3622 	struct sk_buff *skb;
3623 
3624 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3625 	if (!skb)
3626 		return ERR_PTR(-ENOMEM);
3627 
3628 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
3629 	cmd->reg_domain = __cpu_to_le32(rd);
3630 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
3631 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
3632 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
3633 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
3634 
3635 	ath10k_dbg(ar, ATH10K_DBG_WMI,
3636 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
3637 		   rd, rd2g, rd5g, ctl2g, ctl5g);
3638 	return skb;
3639 }
3640 
3641 static struct sk_buff *
3642 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
3643 				  rd5g, u16 ctl2g, u16 ctl5g,
3644 				  enum wmi_dfs_region dfs_reg)
3645 {
3646 	struct wmi_pdev_set_regdomain_cmd_10x *cmd;
3647 	struct sk_buff *skb;
3648 
3649 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3650 	if (!skb)
3651 		return ERR_PTR(-ENOMEM);
3652 
3653 	cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
3654 	cmd->reg_domain = __cpu_to_le32(rd);
3655 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
3656 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
3657 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
3658 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
3659 	cmd->dfs_domain = __cpu_to_le32(dfs_reg);
3660 
3661 	ath10k_dbg(ar, ATH10K_DBG_WMI,
3662 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
3663 		   rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
3664 	return skb;
3665 }
3666 
3667 static struct sk_buff *
3668 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
3669 {
3670 	struct wmi_pdev_suspend_cmd *cmd;
3671 	struct sk_buff *skb;
3672 
3673 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3674 	if (!skb)
3675 		return ERR_PTR(-ENOMEM);
3676 
3677 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
3678 	cmd->suspend_opt = __cpu_to_le32(suspend_opt);
3679 
3680 	return skb;
3681 }
3682 
3683 static struct sk_buff *
3684 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
3685 {
3686 	struct sk_buff *skb;
3687 
3688 	skb = ath10k_wmi_alloc_skb(ar, 0);
3689 	if (!skb)
3690 		return ERR_PTR(-ENOMEM);
3691 
3692 	return skb;
3693 }
3694 
3695 static struct sk_buff *
3696 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
3697 {
3698 	struct wmi_pdev_set_param_cmd *cmd;
3699 	struct sk_buff *skb;
3700 
3701 	if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
3702 		ath10k_warn(ar, "pdev param %d not supported by firmware\n",
3703 			    id);
3704 		return ERR_PTR(-EOPNOTSUPP);
3705 	}
3706 
3707 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3708 	if (!skb)
3709 		return ERR_PTR(-ENOMEM);
3710 
3711 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
3712 	cmd->param_id    = __cpu_to_le32(id);
3713 	cmd->param_value = __cpu_to_le32(value);
3714 
3715 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
3716 		   id, value);
3717 	return skb;
3718 }
3719 
3720 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
3721 				    struct wmi_host_mem_chunks *chunks)
3722 {
3723 	struct host_memory_chunk *chunk;
3724 	int i;
3725 
3726 	chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
3727 
3728 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
3729 		chunk = &chunks->items[i];
3730 		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
3731 		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
3732 		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
3733 
3734 		ath10k_dbg(ar, ATH10K_DBG_WMI,
3735 			   "wmi chunk %d len %d requested, addr 0x%llx\n",
3736 			   i,
3737 			   ar->wmi.mem_chunks[i].len,
3738 			   (unsigned long long)ar->wmi.mem_chunks[i].paddr);
3739 	}
3740 }
3741 
3742 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
3743 {
3744 	struct wmi_init_cmd *cmd;
3745 	struct sk_buff *buf;
3746 	struct wmi_resource_config config = {};
3747 	u32 len, val;
3748 
3749 	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
3750 	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
3751 	config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
3752 
3753 	config.num_offload_reorder_bufs =
3754 		__cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
3755 
3756 	config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
3757 	config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
3758 	config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
3759 	config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
3760 	config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
3761 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
3762 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
3763 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
3764 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
3765 	config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
3766 
3767 	config.scan_max_pending_reqs =
3768 		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
3769 
3770 	config.bmiss_offload_max_vdev =
3771 		__cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
3772 
3773 	config.roam_offload_max_vdev =
3774 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
3775 
3776 	config.roam_offload_max_ap_profiles =
3777 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
3778 
3779 	config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
3780 	config.num_mcast_table_elems =
3781 		__cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
3782 
3783 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
3784 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
3785 	config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
3786 	config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
3787 	config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
3788 
3789 	val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
3790 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
3791 
3792 	config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
3793 
3794 	config.gtk_offload_max_vdev =
3795 		__cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
3796 
3797 	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
3798 	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
3799 
3800 	len = sizeof(*cmd) +
3801 	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
3802 
3803 	buf = ath10k_wmi_alloc_skb(ar, len);
3804 	if (!buf)
3805 		return ERR_PTR(-ENOMEM);
3806 
3807 	cmd = (struct wmi_init_cmd *)buf->data;
3808 
3809 	memcpy(&cmd->resource_config, &config, sizeof(config));
3810 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
3811 
3812 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
3813 	return buf;
3814 }
3815 
3816 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
3817 {
3818 	struct wmi_init_cmd_10x *cmd;
3819 	struct sk_buff *buf;
3820 	struct wmi_resource_config_10x config = {};
3821 	u32 len, val;
3822 
3823 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
3824 	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
3825 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
3826 	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
3827 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
3828 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
3829 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
3830 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3831 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3832 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3833 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
3834 	config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
3835 
3836 	config.scan_max_pending_reqs =
3837 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
3838 
3839 	config.bmiss_offload_max_vdev =
3840 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
3841 
3842 	config.roam_offload_max_vdev =
3843 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
3844 
3845 	config.roam_offload_max_ap_profiles =
3846 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
3847 
3848 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
3849 	config.num_mcast_table_elems =
3850 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
3851 
3852 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
3853 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
3854 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
3855 	config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
3856 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
3857 
3858 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
3859 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
3860 
3861 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
3862 
3863 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
3864 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
3865 
3866 	len = sizeof(*cmd) +
3867 	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
3868 
3869 	buf = ath10k_wmi_alloc_skb(ar, len);
3870 	if (!buf)
3871 		return ERR_PTR(-ENOMEM);
3872 
3873 	cmd = (struct wmi_init_cmd_10x *)buf->data;
3874 
3875 	memcpy(&cmd->resource_config, &config, sizeof(config));
3876 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
3877 
3878 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
3879 	return buf;
3880 }
3881 
3882 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
3883 {
3884 	struct wmi_init_cmd_10_2 *cmd;
3885 	struct sk_buff *buf;
3886 	struct wmi_resource_config_10x config = {};
3887 	u32 len, val, features;
3888 
3889 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
3890 	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
3891 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
3892 	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
3893 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
3894 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
3895 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
3896 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3897 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3898 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3899 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
3900 	config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
3901 
3902 	config.scan_max_pending_reqs =
3903 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
3904 
3905 	config.bmiss_offload_max_vdev =
3906 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
3907 
3908 	config.roam_offload_max_vdev =
3909 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
3910 
3911 	config.roam_offload_max_ap_profiles =
3912 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
3913 
3914 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
3915 	config.num_mcast_table_elems =
3916 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
3917 
3918 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
3919 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
3920 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
3921 	config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
3922 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
3923 
3924 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
3925 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
3926 
3927 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
3928 
3929 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
3930 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
3931 
3932 	len = sizeof(*cmd) +
3933 	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
3934 
3935 	buf = ath10k_wmi_alloc_skb(ar, len);
3936 	if (!buf)
3937 		return ERR_PTR(-ENOMEM);
3938 
3939 	cmd = (struct wmi_init_cmd_10_2 *)buf->data;
3940 
3941 	features = WMI_10_2_RX_BATCH_MODE;
3942 	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
3943 		features |= WMI_10_2_COEX_GPIO;
3944 	cmd->resource_config.feature_mask = __cpu_to_le32(features);
3945 
3946 	memcpy(&cmd->resource_config.common, &config, sizeof(config));
3947 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
3948 
3949 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
3950 	return buf;
3951 }
3952 
3953 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
3954 {
3955 	if (arg->ie_len && !arg->ie)
3956 		return -EINVAL;
3957 	if (arg->n_channels && !arg->channels)
3958 		return -EINVAL;
3959 	if (arg->n_ssids && !arg->ssids)
3960 		return -EINVAL;
3961 	if (arg->n_bssids && !arg->bssids)
3962 		return -EINVAL;
3963 
3964 	if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
3965 		return -EINVAL;
3966 	if (arg->n_channels > ARRAY_SIZE(arg->channels))
3967 		return -EINVAL;
3968 	if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
3969 		return -EINVAL;
3970 	if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
3971 		return -EINVAL;
3972 
3973 	return 0;
3974 }
3975 
3976 static size_t
3977 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
3978 {
3979 	int len = 0;
3980 
3981 	if (arg->ie_len) {
3982 		len += sizeof(struct wmi_ie_data);
3983 		len += roundup(arg->ie_len, 4);
3984 	}
3985 
3986 	if (arg->n_channels) {
3987 		len += sizeof(struct wmi_chan_list);
3988 		len += sizeof(__le32) * arg->n_channels;
3989 	}
3990 
3991 	if (arg->n_ssids) {
3992 		len += sizeof(struct wmi_ssid_list);
3993 		len += sizeof(struct wmi_ssid) * arg->n_ssids;
3994 	}
3995 
3996 	if (arg->n_bssids) {
3997 		len += sizeof(struct wmi_bssid_list);
3998 		len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
3999 	}
4000 
4001 	return len;
4002 }
4003 
4004 void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
4005 				      const struct wmi_start_scan_arg *arg)
4006 {
4007 	u32 scan_id;
4008 	u32 scan_req_id;
4009 
4010 	scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
4011 	scan_id |= arg->scan_id;
4012 
4013 	scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
4014 	scan_req_id |= arg->scan_req_id;
4015 
4016 	cmn->scan_id            = __cpu_to_le32(scan_id);
4017 	cmn->scan_req_id        = __cpu_to_le32(scan_req_id);
4018 	cmn->vdev_id            = __cpu_to_le32(arg->vdev_id);
4019 	cmn->scan_priority      = __cpu_to_le32(arg->scan_priority);
4020 	cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
4021 	cmn->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
4022 	cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
4023 	cmn->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
4024 	cmn->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
4025 	cmn->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
4026 	cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
4027 	cmn->idle_time          = __cpu_to_le32(arg->idle_time);
4028 	cmn->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
4029 	cmn->probe_delay        = __cpu_to_le32(arg->probe_delay);
4030 	cmn->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
4031 }
4032 
4033 static void
4034 ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
4035 			       const struct wmi_start_scan_arg *arg)
4036 {
4037 	struct wmi_ie_data *ie;
4038 	struct wmi_chan_list *channels;
4039 	struct wmi_ssid_list *ssids;
4040 	struct wmi_bssid_list *bssids;
4041 	void *ptr = tlvs->tlvs;
4042 	int i;
4043 
4044 	if (arg->n_channels) {
4045 		channels = ptr;
4046 		channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
4047 		channels->num_chan = __cpu_to_le32(arg->n_channels);
4048 
4049 		for (i = 0; i < arg->n_channels; i++)
4050 			channels->channel_list[i].freq =
4051 				__cpu_to_le16(arg->channels[i]);
4052 
4053 		ptr += sizeof(*channels);
4054 		ptr += sizeof(__le32) * arg->n_channels;
4055 	}
4056 
4057 	if (arg->n_ssids) {
4058 		ssids = ptr;
4059 		ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
4060 		ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
4061 
4062 		for (i = 0; i < arg->n_ssids; i++) {
4063 			ssids->ssids[i].ssid_len =
4064 				__cpu_to_le32(arg->ssids[i].len);
4065 			memcpy(&ssids->ssids[i].ssid,
4066 			       arg->ssids[i].ssid,
4067 			       arg->ssids[i].len);
4068 		}
4069 
4070 		ptr += sizeof(*ssids);
4071 		ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
4072 	}
4073 
4074 	if (arg->n_bssids) {
4075 		bssids = ptr;
4076 		bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
4077 		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
4078 
4079 		for (i = 0; i < arg->n_bssids; i++)
4080 			memcpy(&bssids->bssid_list[i],
4081 			       arg->bssids[i].bssid,
4082 			       ETH_ALEN);
4083 
4084 		ptr += sizeof(*bssids);
4085 		ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
4086 	}
4087 
4088 	if (arg->ie_len) {
4089 		ie = ptr;
4090 		ie->tag = __cpu_to_le32(WMI_IE_TAG);
4091 		ie->ie_len = __cpu_to_le32(arg->ie_len);
4092 		memcpy(ie->ie_data, arg->ie, arg->ie_len);
4093 
4094 		ptr += sizeof(*ie);
4095 		ptr += roundup(arg->ie_len, 4);
4096 	}
4097 }
4098 
4099 static struct sk_buff *
4100 ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
4101 			     const struct wmi_start_scan_arg *arg)
4102 {
4103 	struct wmi_start_scan_cmd *cmd;
4104 	struct sk_buff *skb;
4105 	size_t len;
4106 	int ret;
4107 
4108 	ret = ath10k_wmi_start_scan_verify(arg);
4109 	if (ret)
4110 		return ERR_PTR(ret);
4111 
4112 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
4113 	skb = ath10k_wmi_alloc_skb(ar, len);
4114 	if (!skb)
4115 		return ERR_PTR(-ENOMEM);
4116 
4117 	cmd = (struct wmi_start_scan_cmd *)skb->data;
4118 
4119 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
4120 	ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
4121 
4122 	cmd->burst_duration_ms = __cpu_to_le32(0);
4123 
4124 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
4125 	return skb;
4126 }
4127 
4128 static struct sk_buff *
4129 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
4130 				 const struct wmi_start_scan_arg *arg)
4131 {
4132 	struct wmi_10x_start_scan_cmd *cmd;
4133 	struct sk_buff *skb;
4134 	size_t len;
4135 	int ret;
4136 
4137 	ret = ath10k_wmi_start_scan_verify(arg);
4138 	if (ret)
4139 		return ERR_PTR(ret);
4140 
4141 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
4142 	skb = ath10k_wmi_alloc_skb(ar, len);
4143 	if (!skb)
4144 		return ERR_PTR(-ENOMEM);
4145 
4146 	cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
4147 
4148 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
4149 	ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
4150 
4151 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
4152 	return skb;
4153 }
4154 
4155 void ath10k_wmi_start_scan_init(struct ath10k *ar,
4156 				struct wmi_start_scan_arg *arg)
4157 {
4158 	/* setup commonly used values */
4159 	arg->scan_req_id = 1;
4160 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
4161 	arg->dwell_time_active = 50;
4162 	arg->dwell_time_passive = 150;
4163 	arg->min_rest_time = 50;
4164 	arg->max_rest_time = 500;
4165 	arg->repeat_probe_time = 0;
4166 	arg->probe_spacing_time = 0;
4167 	arg->idle_time = 0;
4168 	arg->max_scan_time = 20000;
4169 	arg->probe_delay = 5;
4170 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
4171 		| WMI_SCAN_EVENT_COMPLETED
4172 		| WMI_SCAN_EVENT_BSS_CHANNEL
4173 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
4174 		| WMI_SCAN_EVENT_DEQUEUED;
4175 	arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
4176 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
4177 	arg->n_bssids = 1;
4178 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
4179 }
4180 
4181 static struct sk_buff *
4182 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
4183 			    const struct wmi_stop_scan_arg *arg)
4184 {
4185 	struct wmi_stop_scan_cmd *cmd;
4186 	struct sk_buff *skb;
4187 	u32 scan_id;
4188 	u32 req_id;
4189 
4190 	if (arg->req_id > 0xFFF)
4191 		return ERR_PTR(-EINVAL);
4192 	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
4193 		return ERR_PTR(-EINVAL);
4194 
4195 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4196 	if (!skb)
4197 		return ERR_PTR(-ENOMEM);
4198 
4199 	scan_id = arg->u.scan_id;
4200 	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
4201 
4202 	req_id = arg->req_id;
4203 	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
4204 
4205 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
4206 	cmd->req_type    = __cpu_to_le32(arg->req_type);
4207 	cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
4208 	cmd->scan_id     = __cpu_to_le32(scan_id);
4209 	cmd->scan_req_id = __cpu_to_le32(req_id);
4210 
4211 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4212 		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
4213 		   arg->req_id, arg->req_type, arg->u.scan_id);
4214 	return skb;
4215 }
4216 
4217 static struct sk_buff *
4218 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
4219 			      enum wmi_vdev_type type,
4220 			      enum wmi_vdev_subtype subtype,
4221 			      const u8 macaddr[ETH_ALEN])
4222 {
4223 	struct wmi_vdev_create_cmd *cmd;
4224 	struct sk_buff *skb;
4225 
4226 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4227 	if (!skb)
4228 		return ERR_PTR(-ENOMEM);
4229 
4230 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
4231 	cmd->vdev_id      = __cpu_to_le32(vdev_id);
4232 	cmd->vdev_type    = __cpu_to_le32(type);
4233 	cmd->vdev_subtype = __cpu_to_le32(subtype);
4234 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
4235 
4236 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4237 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
4238 		   vdev_id, type, subtype, macaddr);
4239 	return skb;
4240 }
4241 
4242 static struct sk_buff *
4243 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
4244 {
4245 	struct wmi_vdev_delete_cmd *cmd;
4246 	struct sk_buff *skb;
4247 
4248 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4249 	if (!skb)
4250 		return ERR_PTR(-ENOMEM);
4251 
4252 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
4253 	cmd->vdev_id = __cpu_to_le32(vdev_id);
4254 
4255 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4256 		   "WMI vdev delete id %d\n", vdev_id);
4257 	return skb;
4258 }
4259 
4260 static struct sk_buff *
4261 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
4262 			     const struct wmi_vdev_start_request_arg *arg,
4263 			     bool restart)
4264 {
4265 	struct wmi_vdev_start_request_cmd *cmd;
4266 	struct sk_buff *skb;
4267 	const char *cmdname;
4268 	u32 flags = 0;
4269 
4270 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
4271 		return ERR_PTR(-EINVAL);
4272 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
4273 		return ERR_PTR(-EINVAL);
4274 
4275 	if (restart)
4276 		cmdname = "restart";
4277 	else
4278 		cmdname = "start";
4279 
4280 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4281 	if (!skb)
4282 		return ERR_PTR(-ENOMEM);
4283 
4284 	if (arg->hidden_ssid)
4285 		flags |= WMI_VDEV_START_HIDDEN_SSID;
4286 	if (arg->pmf_enabled)
4287 		flags |= WMI_VDEV_START_PMF_ENABLED;
4288 
4289 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
4290 	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
4291 	cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
4292 	cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
4293 	cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
4294 	cmd->flags           = __cpu_to_le32(flags);
4295 	cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
4296 	cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
4297 
4298 	if (arg->ssid) {
4299 		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
4300 		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
4301 	}
4302 
4303 	ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
4304 
4305 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4306 		   "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
4307 		   cmdname, arg->vdev_id,
4308 		   flags, arg->channel.freq, arg->channel.mode,
4309 		   cmd->chan.flags, arg->channel.max_power);
4310 
4311 	return skb;
4312 }
4313 
4314 static struct sk_buff *
4315 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
4316 {
4317 	struct wmi_vdev_stop_cmd *cmd;
4318 	struct sk_buff *skb;
4319 
4320 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4321 	if (!skb)
4322 		return ERR_PTR(-ENOMEM);
4323 
4324 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
4325 	cmd->vdev_id = __cpu_to_le32(vdev_id);
4326 
4327 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
4328 	return skb;
4329 }
4330 
4331 static struct sk_buff *
4332 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
4333 			  const u8 *bssid)
4334 {
4335 	struct wmi_vdev_up_cmd *cmd;
4336 	struct sk_buff *skb;
4337 
4338 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4339 	if (!skb)
4340 		return ERR_PTR(-ENOMEM);
4341 
4342 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
4343 	cmd->vdev_id       = __cpu_to_le32(vdev_id);
4344 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
4345 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
4346 
4347 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4348 		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
4349 		   vdev_id, aid, bssid);
4350 	return skb;
4351 }
4352 
4353 static struct sk_buff *
4354 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
4355 {
4356 	struct wmi_vdev_down_cmd *cmd;
4357 	struct sk_buff *skb;
4358 
4359 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4360 	if (!skb)
4361 		return ERR_PTR(-ENOMEM);
4362 
4363 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
4364 	cmd->vdev_id = __cpu_to_le32(vdev_id);
4365 
4366 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4367 		   "wmi mgmt vdev down id 0x%x\n", vdev_id);
4368 	return skb;
4369 }
4370 
4371 static struct sk_buff *
4372 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
4373 				 u32 param_id, u32 param_value)
4374 {
4375 	struct wmi_vdev_set_param_cmd *cmd;
4376 	struct sk_buff *skb;
4377 
4378 	if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
4379 		ath10k_dbg(ar, ATH10K_DBG_WMI,
4380 			   "vdev param %d not supported by firmware\n",
4381 			    param_id);
4382 		return ERR_PTR(-EOPNOTSUPP);
4383 	}
4384 
4385 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4386 	if (!skb)
4387 		return ERR_PTR(-ENOMEM);
4388 
4389 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
4390 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
4391 	cmd->param_id    = __cpu_to_le32(param_id);
4392 	cmd->param_value = __cpu_to_le32(param_value);
4393 
4394 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4395 		   "wmi vdev id 0x%x set param %d value %d\n",
4396 		   vdev_id, param_id, param_value);
4397 	return skb;
4398 }
4399 
4400 static struct sk_buff *
4401 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
4402 				   const struct wmi_vdev_install_key_arg *arg)
4403 {
4404 	struct wmi_vdev_install_key_cmd *cmd;
4405 	struct sk_buff *skb;
4406 
4407 	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
4408 		return ERR_PTR(-EINVAL);
4409 	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
4410 		return ERR_PTR(-EINVAL);
4411 
4412 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
4413 	if (!skb)
4414 		return ERR_PTR(-ENOMEM);
4415 
4416 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
4417 	cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
4418 	cmd->key_idx       = __cpu_to_le32(arg->key_idx);
4419 	cmd->key_flags     = __cpu_to_le32(arg->key_flags);
4420 	cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
4421 	cmd->key_len       = __cpu_to_le32(arg->key_len);
4422 	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
4423 	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
4424 
4425 	if (arg->macaddr)
4426 		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
4427 	if (arg->key_data)
4428 		memcpy(cmd->key_data, arg->key_data, arg->key_len);
4429 
4430 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4431 		   "wmi vdev install key idx %d cipher %d len %d\n",
4432 		   arg->key_idx, arg->key_cipher, arg->key_len);
4433 	return skb;
4434 }
4435 
4436 static struct sk_buff *
4437 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
4438 				     const struct wmi_vdev_spectral_conf_arg *arg)
4439 {
4440 	struct wmi_vdev_spectral_conf_cmd *cmd;
4441 	struct sk_buff *skb;
4442 
4443 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4444 	if (!skb)
4445 		return ERR_PTR(-ENOMEM);
4446 
4447 	cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
4448 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
4449 	cmd->scan_count = __cpu_to_le32(arg->scan_count);
4450 	cmd->scan_period = __cpu_to_le32(arg->scan_period);
4451 	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
4452 	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
4453 	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
4454 	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
4455 	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
4456 	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
4457 	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
4458 	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
4459 	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
4460 	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
4461 	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
4462 	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
4463 	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
4464 	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
4465 	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
4466 	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
4467 
4468 	return skb;
4469 }
4470 
4471 static struct sk_buff *
4472 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
4473 				       u32 trigger, u32 enable)
4474 {
4475 	struct wmi_vdev_spectral_enable_cmd *cmd;
4476 	struct sk_buff *skb;
4477 
4478 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4479 	if (!skb)
4480 		return ERR_PTR(-ENOMEM);
4481 
4482 	cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
4483 	cmd->vdev_id = __cpu_to_le32(vdev_id);
4484 	cmd->trigger_cmd = __cpu_to_le32(trigger);
4485 	cmd->enable_cmd = __cpu_to_le32(enable);
4486 
4487 	return skb;
4488 }
4489 
4490 static struct sk_buff *
4491 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
4492 			      const u8 peer_addr[ETH_ALEN],
4493 			      enum wmi_peer_type peer_type)
4494 {
4495 	struct wmi_peer_create_cmd *cmd;
4496 	struct sk_buff *skb;
4497 
4498 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4499 	if (!skb)
4500 		return ERR_PTR(-ENOMEM);
4501 
4502 	cmd = (struct wmi_peer_create_cmd *)skb->data;
4503 	cmd->vdev_id = __cpu_to_le32(vdev_id);
4504 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
4505 
4506 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4507 		   "wmi peer create vdev_id %d peer_addr %pM\n",
4508 		   vdev_id, peer_addr);
4509 	return skb;
4510 }
4511 
4512 static struct sk_buff *
4513 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
4514 			      const u8 peer_addr[ETH_ALEN])
4515 {
4516 	struct wmi_peer_delete_cmd *cmd;
4517 	struct sk_buff *skb;
4518 
4519 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4520 	if (!skb)
4521 		return ERR_PTR(-ENOMEM);
4522 
4523 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
4524 	cmd->vdev_id = __cpu_to_le32(vdev_id);
4525 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
4526 
4527 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4528 		   "wmi peer delete vdev_id %d peer_addr %pM\n",
4529 		   vdev_id, peer_addr);
4530 	return skb;
4531 }
4532 
4533 static struct sk_buff *
4534 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
4535 			     const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
4536 {
4537 	struct wmi_peer_flush_tids_cmd *cmd;
4538 	struct sk_buff *skb;
4539 
4540 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4541 	if (!skb)
4542 		return ERR_PTR(-ENOMEM);
4543 
4544 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
4545 	cmd->vdev_id         = __cpu_to_le32(vdev_id);
4546 	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
4547 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
4548 
4549 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4550 		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
4551 		   vdev_id, peer_addr, tid_bitmap);
4552 	return skb;
4553 }
4554 
4555 static struct sk_buff *
4556 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
4557 				 const u8 *peer_addr,
4558 				 enum wmi_peer_param param_id,
4559 				 u32 param_value)
4560 {
4561 	struct wmi_peer_set_param_cmd *cmd;
4562 	struct sk_buff *skb;
4563 
4564 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4565 	if (!skb)
4566 		return ERR_PTR(-ENOMEM);
4567 
4568 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
4569 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
4570 	cmd->param_id    = __cpu_to_le32(param_id);
4571 	cmd->param_value = __cpu_to_le32(param_value);
4572 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
4573 
4574 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4575 		   "wmi vdev %d peer 0x%pM set param %d value %d\n",
4576 		   vdev_id, peer_addr, param_id, param_value);
4577 	return skb;
4578 }
4579 
4580 static struct sk_buff *
4581 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
4582 			     enum wmi_sta_ps_mode psmode)
4583 {
4584 	struct wmi_sta_powersave_mode_cmd *cmd;
4585 	struct sk_buff *skb;
4586 
4587 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4588 	if (!skb)
4589 		return ERR_PTR(-ENOMEM);
4590 
4591 	cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
4592 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
4593 	cmd->sta_ps_mode = __cpu_to_le32(psmode);
4594 
4595 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4596 		   "wmi set powersave id 0x%x mode %d\n",
4597 		   vdev_id, psmode);
4598 	return skb;
4599 }
4600 
4601 static struct sk_buff *
4602 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
4603 			     enum wmi_sta_powersave_param param_id,
4604 			     u32 value)
4605 {
4606 	struct wmi_sta_powersave_param_cmd *cmd;
4607 	struct sk_buff *skb;
4608 
4609 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4610 	if (!skb)
4611 		return ERR_PTR(-ENOMEM);
4612 
4613 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
4614 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
4615 	cmd->param_id    = __cpu_to_le32(param_id);
4616 	cmd->param_value = __cpu_to_le32(value);
4617 
4618 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4619 		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
4620 		   vdev_id, param_id, value);
4621 	return skb;
4622 }
4623 
4624 static struct sk_buff *
4625 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
4626 			    enum wmi_ap_ps_peer_param param_id, u32 value)
4627 {
4628 	struct wmi_ap_ps_peer_cmd *cmd;
4629 	struct sk_buff *skb;
4630 
4631 	if (!mac)
4632 		return ERR_PTR(-EINVAL);
4633 
4634 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4635 	if (!skb)
4636 		return ERR_PTR(-ENOMEM);
4637 
4638 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
4639 	cmd->vdev_id = __cpu_to_le32(vdev_id);
4640 	cmd->param_id = __cpu_to_le32(param_id);
4641 	cmd->param_value = __cpu_to_le32(value);
4642 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
4643 
4644 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4645 		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
4646 		   vdev_id, param_id, value, mac);
4647 	return skb;
4648 }
4649 
4650 static struct sk_buff *
4651 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
4652 				 const struct wmi_scan_chan_list_arg *arg)
4653 {
4654 	struct wmi_scan_chan_list_cmd *cmd;
4655 	struct sk_buff *skb;
4656 	struct wmi_channel_arg *ch;
4657 	struct wmi_channel *ci;
4658 	int len;
4659 	int i;
4660 
4661 	len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
4662 
4663 	skb = ath10k_wmi_alloc_skb(ar, len);
4664 	if (!skb)
4665 		return ERR_PTR(-EINVAL);
4666 
4667 	cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
4668 	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
4669 
4670 	for (i = 0; i < arg->n_channels; i++) {
4671 		ch = &arg->channels[i];
4672 		ci = &cmd->chan_info[i];
4673 
4674 		ath10k_wmi_put_wmi_channel(ci, ch);
4675 	}
4676 
4677 	return skb;
4678 }
4679 
4680 static void
4681 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
4682 			   const struct wmi_peer_assoc_complete_arg *arg)
4683 {
4684 	struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
4685 
4686 	cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
4687 	cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
4688 	cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
4689 	cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
4690 	cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
4691 	cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
4692 	cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
4693 	cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
4694 	cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
4695 	cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
4696 	cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
4697 	cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
4698 	cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
4699 
4700 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
4701 
4702 	cmd->peer_legacy_rates.num_rates =
4703 		__cpu_to_le32(arg->peer_legacy_rates.num_rates);
4704 	memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
4705 	       arg->peer_legacy_rates.num_rates);
4706 
4707 	cmd->peer_ht_rates.num_rates =
4708 		__cpu_to_le32(arg->peer_ht_rates.num_rates);
4709 	memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
4710 	       arg->peer_ht_rates.num_rates);
4711 
4712 	cmd->peer_vht_rates.rx_max_rate =
4713 		__cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
4714 	cmd->peer_vht_rates.rx_mcs_set =
4715 		__cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
4716 	cmd->peer_vht_rates.tx_max_rate =
4717 		__cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
4718 	cmd->peer_vht_rates.tx_mcs_set =
4719 		__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
4720 }
4721 
4722 static void
4723 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
4724 				const struct wmi_peer_assoc_complete_arg *arg)
4725 {
4726 	struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
4727 
4728 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
4729 	memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
4730 }
4731 
4732 static void
4733 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
4734 				const struct wmi_peer_assoc_complete_arg *arg)
4735 {
4736 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
4737 }
4738 
4739 static void
4740 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
4741 				const struct wmi_peer_assoc_complete_arg *arg)
4742 {
4743 	struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
4744 	int max_mcs, max_nss;
4745 	u32 info0;
4746 
4747 	/* TODO: Is using max values okay with firmware? */
4748 	max_mcs = 0xf;
4749 	max_nss = 0xf;
4750 
4751 	info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
4752 		SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
4753 
4754 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
4755 	cmd->info0 = __cpu_to_le32(info0);
4756 }
4757 
4758 static int
4759 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
4760 {
4761 	if (arg->peer_mpdu_density > 16)
4762 		return -EINVAL;
4763 	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
4764 		return -EINVAL;
4765 	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
4766 		return -EINVAL;
4767 
4768 	return 0;
4769 }
4770 
4771 static struct sk_buff *
4772 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
4773 			     const struct wmi_peer_assoc_complete_arg *arg)
4774 {
4775 	size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
4776 	struct sk_buff *skb;
4777 	int ret;
4778 
4779 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
4780 	if (ret)
4781 		return ERR_PTR(ret);
4782 
4783 	skb = ath10k_wmi_alloc_skb(ar, len);
4784 	if (!skb)
4785 		return ERR_PTR(-ENOMEM);
4786 
4787 	ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
4788 
4789 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4790 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
4791 		   arg->vdev_id, arg->addr,
4792 		   arg->peer_reassoc ? "reassociate" : "new");
4793 	return skb;
4794 }
4795 
4796 static struct sk_buff *
4797 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
4798 				  const struct wmi_peer_assoc_complete_arg *arg)
4799 {
4800 	size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
4801 	struct sk_buff *skb;
4802 	int ret;
4803 
4804 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
4805 	if (ret)
4806 		return ERR_PTR(ret);
4807 
4808 	skb = ath10k_wmi_alloc_skb(ar, len);
4809 	if (!skb)
4810 		return ERR_PTR(-ENOMEM);
4811 
4812 	ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
4813 
4814 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4815 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
4816 		   arg->vdev_id, arg->addr,
4817 		   arg->peer_reassoc ? "reassociate" : "new");
4818 	return skb;
4819 }
4820 
4821 static struct sk_buff *
4822 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
4823 				  const struct wmi_peer_assoc_complete_arg *arg)
4824 {
4825 	size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
4826 	struct sk_buff *skb;
4827 	int ret;
4828 
4829 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
4830 	if (ret)
4831 		return ERR_PTR(ret);
4832 
4833 	skb = ath10k_wmi_alloc_skb(ar, len);
4834 	if (!skb)
4835 		return ERR_PTR(-ENOMEM);
4836 
4837 	ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
4838 
4839 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4840 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
4841 		   arg->vdev_id, arg->addr,
4842 		   arg->peer_reassoc ? "reassociate" : "new");
4843 	return skb;
4844 }
4845 
4846 static struct sk_buff *
4847 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
4848 {
4849 	struct sk_buff *skb;
4850 
4851 	skb = ath10k_wmi_alloc_skb(ar, 0);
4852 	if (!skb)
4853 		return ERR_PTR(-ENOMEM);
4854 
4855 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
4856 	return skb;
4857 }
4858 
4859 /* This function assumes the beacon is already DMA mapped */
4860 static struct sk_buff *
4861 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
4862 			     size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
4863 			     bool deliver_cab)
4864 {
4865 	struct wmi_bcn_tx_ref_cmd *cmd;
4866 	struct sk_buff *skb;
4867 	struct ieee80211_hdr *hdr;
4868 	u16 fc;
4869 
4870 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4871 	if (!skb)
4872 		return ERR_PTR(-ENOMEM);
4873 
4874 	hdr = (struct ieee80211_hdr *)bcn;
4875 	fc = le16_to_cpu(hdr->frame_control);
4876 
4877 	cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
4878 	cmd->vdev_id = __cpu_to_le32(vdev_id);
4879 	cmd->data_len = __cpu_to_le32(bcn_len);
4880 	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
4881 	cmd->msdu_id = 0;
4882 	cmd->frame_control = __cpu_to_le32(fc);
4883 	cmd->flags = 0;
4884 	cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
4885 
4886 	if (dtim_zero)
4887 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
4888 
4889 	if (deliver_cab)
4890 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
4891 
4892 	return skb;
4893 }
4894 
4895 void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
4896 			      const struct wmi_wmm_params_arg *arg)
4897 {
4898 	params->cwmin  = __cpu_to_le32(arg->cwmin);
4899 	params->cwmax  = __cpu_to_le32(arg->cwmax);
4900 	params->aifs   = __cpu_to_le32(arg->aifs);
4901 	params->txop   = __cpu_to_le32(arg->txop);
4902 	params->acm    = __cpu_to_le32(arg->acm);
4903 	params->no_ack = __cpu_to_le32(arg->no_ack);
4904 }
4905 
4906 static struct sk_buff *
4907 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
4908 			       const struct wmi_wmm_params_all_arg *arg)
4909 {
4910 	struct wmi_pdev_set_wmm_params *cmd;
4911 	struct sk_buff *skb;
4912 
4913 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4914 	if (!skb)
4915 		return ERR_PTR(-ENOMEM);
4916 
4917 	cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
4918 	ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
4919 	ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
4920 	ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
4921 	ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
4922 
4923 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
4924 	return skb;
4925 }
4926 
4927 static struct sk_buff *
4928 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
4929 {
4930 	struct wmi_request_stats_cmd *cmd;
4931 	struct sk_buff *skb;
4932 
4933 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4934 	if (!skb)
4935 		return ERR_PTR(-ENOMEM);
4936 
4937 	cmd = (struct wmi_request_stats_cmd *)skb->data;
4938 	cmd->stats_id = __cpu_to_le32(stats_mask);
4939 
4940 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
4941 		   stats_mask);
4942 	return skb;
4943 }
4944 
4945 static struct sk_buff *
4946 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
4947 				enum wmi_force_fw_hang_type type, u32 delay_ms)
4948 {
4949 	struct wmi_force_fw_hang_cmd *cmd;
4950 	struct sk_buff *skb;
4951 
4952 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4953 	if (!skb)
4954 		return ERR_PTR(-ENOMEM);
4955 
4956 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
4957 	cmd->type = __cpu_to_le32(type);
4958 	cmd->delay_ms = __cpu_to_le32(delay_ms);
4959 
4960 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
4961 		   type, delay_ms);
4962 	return skb;
4963 }
4964 
4965 static struct sk_buff *
4966 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
4967 			     u32 log_level)
4968 {
4969 	struct wmi_dbglog_cfg_cmd *cmd;
4970 	struct sk_buff *skb;
4971 	u32 cfg;
4972 
4973 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4974 	if (!skb)
4975 		return ERR_PTR(-ENOMEM);
4976 
4977 	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
4978 
4979 	if (module_enable) {
4980 		cfg = SM(log_level,
4981 			 ATH10K_DBGLOG_CFG_LOG_LVL);
4982 	} else {
4983 		/* set back defaults, all modules with WARN level */
4984 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
4985 			 ATH10K_DBGLOG_CFG_LOG_LVL);
4986 		module_enable = ~0;
4987 	}
4988 
4989 	cmd->module_enable = __cpu_to_le32(module_enable);
4990 	cmd->module_valid = __cpu_to_le32(~0);
4991 	cmd->config_enable = __cpu_to_le32(cfg);
4992 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
4993 
4994 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4995 		   "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
4996 		   __le32_to_cpu(cmd->module_enable),
4997 		   __le32_to_cpu(cmd->module_valid),
4998 		   __le32_to_cpu(cmd->config_enable),
4999 		   __le32_to_cpu(cmd->config_valid));
5000 	return skb;
5001 }
5002 
5003 static struct sk_buff *
5004 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
5005 {
5006 	struct wmi_pdev_pktlog_enable_cmd *cmd;
5007 	struct sk_buff *skb;
5008 
5009 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5010 	if (!skb)
5011 		return ERR_PTR(-ENOMEM);
5012 
5013 	ev_bitmap &= ATH10K_PKTLOG_ANY;
5014 
5015 	cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
5016 	cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
5017 
5018 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
5019 		   ev_bitmap);
5020 	return skb;
5021 }
5022 
5023 static struct sk_buff *
5024 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
5025 {
5026 	struct sk_buff *skb;
5027 
5028 	skb = ath10k_wmi_alloc_skb(ar, 0);
5029 	if (!skb)
5030 		return ERR_PTR(-ENOMEM);
5031 
5032 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
5033 	return skb;
5034 }
5035 
5036 static struct sk_buff *
5037 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
5038 				      u32 duration, u32 next_offset,
5039 				      u32 enabled)
5040 {
5041 	struct wmi_pdev_set_quiet_cmd *cmd;
5042 	struct sk_buff *skb;
5043 
5044 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5045 	if (!skb)
5046 		return ERR_PTR(-ENOMEM);
5047 
5048 	cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
5049 	cmd->period = __cpu_to_le32(period);
5050 	cmd->duration = __cpu_to_le32(duration);
5051 	cmd->next_start = __cpu_to_le32(next_offset);
5052 	cmd->enabled = __cpu_to_le32(enabled);
5053 
5054 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5055 		   "wmi quiet param: period %u duration %u enabled %d\n",
5056 		   period, duration, enabled);
5057 	return skb;
5058 }
5059 
5060 static struct sk_buff *
5061 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
5062 				   const u8 *mac)
5063 {
5064 	struct wmi_addba_clear_resp_cmd *cmd;
5065 	struct sk_buff *skb;
5066 
5067 	if (!mac)
5068 		return ERR_PTR(-EINVAL);
5069 
5070 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5071 	if (!skb)
5072 		return ERR_PTR(-ENOMEM);
5073 
5074 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
5075 	cmd->vdev_id = __cpu_to_le32(vdev_id);
5076 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
5077 
5078 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5079 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
5080 		   vdev_id, mac);
5081 	return skb;
5082 }
5083 
5084 static struct sk_buff *
5085 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
5086 			     u32 tid, u32 buf_size)
5087 {
5088 	struct wmi_addba_send_cmd *cmd;
5089 	struct sk_buff *skb;
5090 
5091 	if (!mac)
5092 		return ERR_PTR(-EINVAL);
5093 
5094 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5095 	if (!skb)
5096 		return ERR_PTR(-ENOMEM);
5097 
5098 	cmd = (struct wmi_addba_send_cmd *)skb->data;
5099 	cmd->vdev_id = __cpu_to_le32(vdev_id);
5100 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
5101 	cmd->tid = __cpu_to_le32(tid);
5102 	cmd->buffersize = __cpu_to_le32(buf_size);
5103 
5104 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5105 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
5106 		   vdev_id, mac, tid, buf_size);
5107 	return skb;
5108 }
5109 
5110 static struct sk_buff *
5111 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
5112 				 u32 tid, u32 status)
5113 {
5114 	struct wmi_addba_setresponse_cmd *cmd;
5115 	struct sk_buff *skb;
5116 
5117 	if (!mac)
5118 		return ERR_PTR(-EINVAL);
5119 
5120 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5121 	if (!skb)
5122 		return ERR_PTR(-ENOMEM);
5123 
5124 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
5125 	cmd->vdev_id = __cpu_to_le32(vdev_id);
5126 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
5127 	cmd->tid = __cpu_to_le32(tid);
5128 	cmd->statuscode = __cpu_to_le32(status);
5129 
5130 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5131 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
5132 		   vdev_id, mac, tid, status);
5133 	return skb;
5134 }
5135 
5136 static struct sk_buff *
5137 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
5138 			     u32 tid, u32 initiator, u32 reason)
5139 {
5140 	struct wmi_delba_send_cmd *cmd;
5141 	struct sk_buff *skb;
5142 
5143 	if (!mac)
5144 		return ERR_PTR(-EINVAL);
5145 
5146 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5147 	if (!skb)
5148 		return ERR_PTR(-ENOMEM);
5149 
5150 	cmd = (struct wmi_delba_send_cmd *)skb->data;
5151 	cmd->vdev_id = __cpu_to_le32(vdev_id);
5152 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
5153 	cmd->tid = __cpu_to_le32(tid);
5154 	cmd->initiator = __cpu_to_le32(initiator);
5155 	cmd->reasoncode = __cpu_to_le32(reason);
5156 
5157 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5158 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
5159 		   vdev_id, mac, tid, initiator, reason);
5160 	return skb;
5161 }
5162 
5163 static const struct wmi_ops wmi_ops = {
5164 	.rx = ath10k_wmi_op_rx,
5165 	.map_svc = wmi_main_svc_map,
5166 
5167 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
5168 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
5169 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
5170 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
5171 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
5172 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
5173 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
5174 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
5175 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
5176 	.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
5177 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
5178 
5179 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
5180 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
5181 	.gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
5182 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
5183 	.gen_init = ath10k_wmi_op_gen_init,
5184 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
5185 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
5186 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
5187 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
5188 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
5189 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
5190 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
5191 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
5192 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
5193 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
5194 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
5195 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
5196 	/* .gen_vdev_wmm_conf not implemented */
5197 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
5198 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
5199 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
5200 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
5201 	.gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
5202 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
5203 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
5204 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
5205 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
5206 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
5207 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
5208 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
5209 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
5210 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
5211 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
5212 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
5213 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
5214 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
5215 	/* .gen_pdev_get_temperature not implemented */
5216 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
5217 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
5218 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
5219 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
5220 	/* .gen_bcn_tmpl not implemented */
5221 	/* .gen_prb_tmpl not implemented */
5222 	/* .gen_p2p_go_bcn_ie not implemented */
5223 	/* .gen_adaptive_qcs not implemented */
5224 };
5225 
5226 static const struct wmi_ops wmi_10_1_ops = {
5227 	.rx = ath10k_wmi_10_1_op_rx,
5228 	.map_svc = wmi_10x_svc_map,
5229 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
5230 	.pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
5231 	.gen_init = ath10k_wmi_10_1_op_gen_init,
5232 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
5233 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
5234 	.gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
5235 	/* .gen_pdev_get_temperature not implemented */
5236 
5237 	/* shared with main branch */
5238 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
5239 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
5240 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
5241 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
5242 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
5243 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
5244 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
5245 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
5246 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
5247 
5248 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
5249 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
5250 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
5251 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
5252 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
5253 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
5254 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
5255 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
5256 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
5257 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
5258 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
5259 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
5260 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
5261 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
5262 	/* .gen_vdev_wmm_conf not implemented */
5263 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
5264 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
5265 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
5266 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
5267 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
5268 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
5269 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
5270 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
5271 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
5272 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
5273 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
5274 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
5275 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
5276 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
5277 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
5278 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
5279 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
5280 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
5281 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
5282 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
5283 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
5284 	/* .gen_bcn_tmpl not implemented */
5285 	/* .gen_prb_tmpl not implemented */
5286 	/* .gen_p2p_go_bcn_ie not implemented */
5287 	/* .gen_adaptive_qcs not implemented */
5288 };
5289 
5290 static const struct wmi_ops wmi_10_2_ops = {
5291 	.rx = ath10k_wmi_10_2_op_rx,
5292 	.pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
5293 	.gen_init = ath10k_wmi_10_2_op_gen_init,
5294 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
5295 	/* .gen_pdev_get_temperature not implemented */
5296 
5297 	/* shared with 10.1 */
5298 	.map_svc = wmi_10x_svc_map,
5299 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
5300 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
5301 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
5302 
5303 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
5304 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
5305 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
5306 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
5307 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
5308 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
5309 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
5310 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
5311 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
5312 
5313 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
5314 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
5315 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
5316 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
5317 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
5318 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
5319 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
5320 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
5321 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
5322 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
5323 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
5324 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
5325 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
5326 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
5327 	/* .gen_vdev_wmm_conf not implemented */
5328 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
5329 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
5330 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
5331 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
5332 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
5333 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
5334 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
5335 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
5336 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
5337 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
5338 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
5339 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
5340 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
5341 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
5342 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
5343 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
5344 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
5345 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
5346 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
5347 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
5348 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
5349 };
5350 
5351 static const struct wmi_ops wmi_10_2_4_ops = {
5352 	.rx = ath10k_wmi_10_2_op_rx,
5353 	.pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
5354 	.gen_init = ath10k_wmi_10_2_op_gen_init,
5355 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
5356 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
5357 
5358 	/* shared with 10.1 */
5359 	.map_svc = wmi_10x_svc_map,
5360 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
5361 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
5362 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
5363 
5364 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
5365 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
5366 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
5367 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
5368 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
5369 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
5370 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
5371 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
5372 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
5373 
5374 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
5375 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
5376 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
5377 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
5378 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
5379 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
5380 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
5381 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
5382 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
5383 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
5384 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
5385 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
5386 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
5387 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
5388 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
5389 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
5390 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
5391 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
5392 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
5393 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
5394 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
5395 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
5396 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
5397 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
5398 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
5399 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
5400 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
5401 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
5402 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
5403 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
5404 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
5405 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
5406 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
5407 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
5408 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
5409 	/* .gen_bcn_tmpl not implemented */
5410 	/* .gen_prb_tmpl not implemented */
5411 	/* .gen_p2p_go_bcn_ie not implemented */
5412 	/* .gen_adaptive_qcs not implemented */
5413 };
5414 
5415 int ath10k_wmi_attach(struct ath10k *ar)
5416 {
5417 	switch (ar->wmi.op_version) {
5418 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
5419 		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
5420 		ar->wmi.ops = &wmi_10_2_4_ops;
5421 		ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
5422 		ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
5423 		break;
5424 	case ATH10K_FW_WMI_OP_VERSION_10_2:
5425 		ar->wmi.cmd = &wmi_10_2_cmd_map;
5426 		ar->wmi.ops = &wmi_10_2_ops;
5427 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
5428 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
5429 		break;
5430 	case ATH10K_FW_WMI_OP_VERSION_10_1:
5431 		ar->wmi.cmd = &wmi_10x_cmd_map;
5432 		ar->wmi.ops = &wmi_10_1_ops;
5433 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
5434 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
5435 		break;
5436 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
5437 		ar->wmi.cmd = &wmi_cmd_map;
5438 		ar->wmi.ops = &wmi_ops;
5439 		ar->wmi.vdev_param = &wmi_vdev_param_map;
5440 		ar->wmi.pdev_param = &wmi_pdev_param_map;
5441 		break;
5442 	case ATH10K_FW_WMI_OP_VERSION_TLV:
5443 		ath10k_wmi_tlv_attach(ar);
5444 		break;
5445 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
5446 	case ATH10K_FW_WMI_OP_VERSION_MAX:
5447 		ath10k_err(ar, "unsupported WMI op version: %d\n",
5448 			   ar->wmi.op_version);
5449 		return -EINVAL;
5450 	}
5451 
5452 	init_completion(&ar->wmi.service_ready);
5453 	init_completion(&ar->wmi.unified_ready);
5454 
5455 	return 0;
5456 }
5457 
5458 void ath10k_wmi_detach(struct ath10k *ar)
5459 {
5460 	int i;
5461 
5462 	/* free the host memory chunks requested by firmware */
5463 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
5464 		dma_free_coherent(ar->dev,
5465 				  ar->wmi.mem_chunks[i].len,
5466 				  ar->wmi.mem_chunks[i].vaddr,
5467 				  ar->wmi.mem_chunks[i].paddr);
5468 	}
5469 
5470 	ar->wmi.num_mem_chunks = 0;
5471 }
5472