xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/wmi.c (revision 5f5a9397)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/skbuff.h>
9 #include <linux/ctype.h>
10 
11 #include "core.h"
12 #include "htc.h"
13 #include "debug.h"
14 #include "wmi.h"
15 #include "wmi-tlv.h"
16 #include "mac.h"
17 #include "testmode.h"
18 #include "wmi-ops.h"
19 #include "p2p.h"
20 #include "hw.h"
21 #include "hif.h"
22 #include "txrx.h"
23 
24 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
25 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
26 #define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
27 
28 /* MAIN WMI cmd track */
29 static struct wmi_cmd_map wmi_cmd_map = {
30 	.init_cmdid = WMI_INIT_CMDID,
31 	.start_scan_cmdid = WMI_START_SCAN_CMDID,
32 	.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
33 	.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
34 	.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
35 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
36 	.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
37 	.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
38 	.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
39 	.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
40 	.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
41 	.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
42 	.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
43 	.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
44 	.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
45 	.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
46 	.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
47 	.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
48 	.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
49 	.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
50 	.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
51 	.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
52 	.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
53 	.vdev_up_cmdid = WMI_VDEV_UP_CMDID,
54 	.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
55 	.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
56 	.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
57 	.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
58 	.peer_create_cmdid = WMI_PEER_CREATE_CMDID,
59 	.peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
60 	.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
61 	.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
62 	.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
63 	.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
64 	.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
65 	.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
66 	.bcn_tx_cmdid = WMI_BCN_TX_CMDID,
67 	.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
68 	.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
69 	.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
70 	.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
71 	.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
72 	.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
73 	.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
74 	.addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
75 	.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
76 	.delba_send_cmdid = WMI_DELBA_SEND_CMDID,
77 	.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
78 	.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
79 	.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
80 	.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
81 	.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
82 	.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
83 	.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
84 	.roam_scan_mode = WMI_ROAM_SCAN_MODE,
85 	.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
86 	.roam_scan_period = WMI_ROAM_SCAN_PERIOD,
87 	.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
88 	.roam_ap_profile = WMI_ROAM_AP_PROFILE,
89 	.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
90 	.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
91 	.ofl_scan_period = WMI_OFL_SCAN_PERIOD,
92 	.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
93 	.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
94 	.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
95 	.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
96 	.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
97 	.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
98 	.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
99 	.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
100 	.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
101 	.wlan_profile_set_hist_intvl_cmdid =
102 				WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
103 	.wlan_profile_get_profile_data_cmdid =
104 				WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
105 	.wlan_profile_enable_profile_id_cmdid =
106 				WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
107 	.wlan_profile_list_profile_id_cmdid =
108 				WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
109 	.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
110 	.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
111 	.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
112 	.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
113 	.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
114 	.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
115 	.wow_enable_disable_wake_event_cmdid =
116 				WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
117 	.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
118 	.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
119 	.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
120 	.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
121 	.vdev_spectral_scan_configure_cmdid =
122 				WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
123 	.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
124 	.request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
125 	.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
126 	.network_list_offload_config_cmdid =
127 				WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
128 	.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
129 	.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
130 	.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
131 	.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
132 	.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
133 	.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
134 	.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
135 	.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
136 	.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
137 	.echo_cmdid = WMI_ECHO_CMDID,
138 	.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
139 	.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
140 	.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
141 	.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
142 	.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
143 	.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
144 	.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
145 	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
146 	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
147 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
148 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
149 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
150 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
151 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
152 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
153 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
154 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
155 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
156 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
157 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
158 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
159 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
160 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
161 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
162 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
163 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
164 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
165 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
166 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
167 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
168 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
169 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
170 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
171 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
172 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
173 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
174 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
175 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
176 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
177 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
178 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
179 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
180 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
181 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
182 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
183 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
184 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
185 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
186 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
187 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
188 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
189 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
190 	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
191 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
192 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
193 };
194 
195 /* 10.X WMI cmd track */
196 static struct wmi_cmd_map wmi_10x_cmd_map = {
197 	.init_cmdid = WMI_10X_INIT_CMDID,
198 	.start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
199 	.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
200 	.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
201 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
202 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
203 	.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
204 	.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
205 	.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
206 	.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
207 	.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
208 	.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
209 	.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
210 	.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
211 	.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
212 	.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
213 	.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
214 	.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
215 	.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
216 	.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
217 	.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
218 	.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
219 	.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
220 	.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
221 	.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
222 	.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
223 	.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
224 	.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
225 	.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
226 	.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
227 	.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
228 	.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
229 	.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
230 	.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
231 	.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
232 	.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
233 	.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
234 	.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
235 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
236 	.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
237 	.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
238 	.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
239 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
240 	.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
241 	.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
242 	.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
243 	.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
244 	.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
245 	.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
246 	.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
247 	.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
248 	.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
249 	.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
250 	.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
251 	.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
252 	.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
253 	.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
254 	.roam_scan_rssi_change_threshold =
255 				WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
256 	.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
257 	.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
258 	.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
259 	.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
260 	.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
261 	.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
262 	.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
263 	.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
264 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
265 	.ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
266 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
267 	.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
268 	.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
269 	.wlan_profile_set_hist_intvl_cmdid =
270 				WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
271 	.wlan_profile_get_profile_data_cmdid =
272 				WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
273 	.wlan_profile_enable_profile_id_cmdid =
274 				WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
275 	.wlan_profile_list_profile_id_cmdid =
276 				WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
277 	.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
278 	.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
279 	.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
280 	.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
281 	.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
282 	.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
283 	.wow_enable_disable_wake_event_cmdid =
284 				WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
285 	.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
286 	.wow_hostwakeup_from_sleep_cmdid =
287 				WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
288 	.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
289 	.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
290 	.vdev_spectral_scan_configure_cmdid =
291 				WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
292 	.vdev_spectral_scan_enable_cmdid =
293 				WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
294 	.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
295 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
296 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
297 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
298 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
299 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
300 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
301 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
302 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
303 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
304 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
305 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
306 	.echo_cmdid = WMI_10X_ECHO_CMDID,
307 	.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
308 	.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
309 	.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
310 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
311 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
312 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
313 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
314 	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
315 	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
316 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
317 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
318 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
319 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
320 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
321 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
322 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
323 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
324 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
325 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
326 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
327 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
328 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
329 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
330 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
331 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
332 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
333 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
334 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
335 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
336 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
337 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
338 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
339 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
340 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
341 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
342 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
343 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
344 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
345 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
346 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
347 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
348 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
349 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
350 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
351 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
352 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
353 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
354 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
355 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
356 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
357 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
358 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
359 	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
360 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
361 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
362 };
363 
364 /* 10.2.4 WMI cmd track */
365 static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
366 	.init_cmdid = WMI_10_2_INIT_CMDID,
367 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
368 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
369 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
370 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
371 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
372 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
373 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
374 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
375 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
376 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
377 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
378 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
379 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
380 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
381 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
382 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
383 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
384 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
385 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
386 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
387 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
388 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
389 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
390 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
391 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
392 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
393 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
394 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
395 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
396 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
397 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
398 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
399 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
400 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
401 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
402 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
403 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
404 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
405 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
406 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
407 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
408 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
409 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
410 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
411 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
412 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
413 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
414 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
415 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
416 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
417 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
418 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
419 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
420 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
421 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
422 	.roam_scan_rssi_change_threshold =
423 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
424 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
425 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
426 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
427 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
428 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
429 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
430 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
431 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
432 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
433 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
434 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
435 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
436 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
437 	.wlan_profile_set_hist_intvl_cmdid =
438 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
439 	.wlan_profile_get_profile_data_cmdid =
440 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
441 	.wlan_profile_enable_profile_id_cmdid =
442 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
443 	.wlan_profile_list_profile_id_cmdid =
444 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
445 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
446 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
447 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
448 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
449 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
450 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
451 	.wow_enable_disable_wake_event_cmdid =
452 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
453 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
454 	.wow_hostwakeup_from_sleep_cmdid =
455 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
456 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
457 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
458 	.vdev_spectral_scan_configure_cmdid =
459 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
460 	.vdev_spectral_scan_enable_cmdid =
461 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
462 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
463 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
464 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
465 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
466 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
467 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
468 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
469 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
470 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
471 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
472 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
473 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
474 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
475 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
476 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
477 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
478 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
479 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
480 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
481 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
482 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
483 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
484 	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
485 	.pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
486 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
487 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
488 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
489 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
490 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
491 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
492 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
493 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
494 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
495 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
496 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
497 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
498 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
499 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
500 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
501 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
502 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
503 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
504 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
505 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
506 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
507 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
508 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
509 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
510 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
511 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
512 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
513 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
514 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
515 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
516 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
517 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
518 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
519 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
520 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
521 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
522 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
523 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
524 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
525 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
526 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
527 	.pdev_bss_chan_info_request_cmdid =
528 		WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
529 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
530 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
531 	.set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
532 };
533 
534 /* 10.4 WMI cmd track */
535 static struct wmi_cmd_map wmi_10_4_cmd_map = {
536 	.init_cmdid = WMI_10_4_INIT_CMDID,
537 	.start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
538 	.stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
539 	.scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
540 	.scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
541 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
542 	.pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
543 	.pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
544 	.pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
545 	.pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
546 	.pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
547 	.pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
548 	.pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
549 	.pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
550 	.pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
551 	.pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
552 	.pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
553 	.pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
554 	.pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
555 	.vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
556 	.vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
557 	.vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
558 	.vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
559 	.vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
560 	.vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
561 	.vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
562 	.vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
563 	.vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
564 	.peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
565 	.peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
566 	.peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
567 	.peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
568 	.peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
569 	.peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
570 	.peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
571 	.peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
572 	.bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
573 	.pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
574 	.bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
575 	.bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
576 	.prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
577 	.mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
578 	.prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
579 	.addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
580 	.addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
581 	.addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
582 	.delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
583 	.addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
584 	.send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
585 	.sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
586 	.sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
587 	.sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
588 	.pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
589 	.pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
590 	.roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
591 	.roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
592 	.roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
593 	.roam_scan_rssi_change_threshold =
594 				WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
595 	.roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
596 	.ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
597 	.ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
598 	.ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
599 	.p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
600 	.p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
601 	.p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
602 	.p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
603 	.p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
604 	.ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
605 	.ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
606 	.peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
607 	.wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
608 	.wlan_profile_set_hist_intvl_cmdid =
609 				WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
610 	.wlan_profile_get_profile_data_cmdid =
611 				WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
612 	.wlan_profile_enable_profile_id_cmdid =
613 				WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
614 	.wlan_profile_list_profile_id_cmdid =
615 				WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
616 	.pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
617 	.pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
618 	.add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
619 	.rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
620 	.wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
621 	.wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
622 	.wow_enable_disable_wake_event_cmdid =
623 				WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
624 	.wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
625 	.wow_hostwakeup_from_sleep_cmdid =
626 				WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
627 	.rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
628 	.rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
629 	.vdev_spectral_scan_configure_cmdid =
630 				WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
631 	.vdev_spectral_scan_enable_cmdid =
632 				WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
633 	.request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
634 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
635 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
636 	.gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
637 	.csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
638 	.csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
639 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
640 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
641 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
642 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
643 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
644 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
645 	.echo_cmdid = WMI_10_4_ECHO_CMDID,
646 	.pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
647 	.dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
648 	.pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
649 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
650 	.vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
651 	.vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
652 	.force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
653 	.gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
654 	.gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
655 	.pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
656 	.vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
657 	.adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
658 	.scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
659 	.vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
660 	.vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
661 	.wlan_peer_caching_add_peer_cmdid =
662 			WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
663 	.wlan_peer_caching_evict_peer_cmdid =
664 			WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
665 	.wlan_peer_caching_restore_peer_cmdid =
666 			WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
667 	.wlan_peer_caching_print_all_peers_info_cmdid =
668 			WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
669 	.peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
670 	.peer_add_proxy_sta_entry_cmdid =
671 			WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
672 	.rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
673 	.oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
674 	.nan_cmdid = WMI_10_4_NAN_CMDID,
675 	.vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
676 	.qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
677 	.pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
678 	.pdev_smart_ant_set_rx_antenna_cmdid =
679 			WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
680 	.peer_smart_ant_set_tx_antenna_cmdid =
681 			WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
682 	.peer_smart_ant_set_train_info_cmdid =
683 			WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
684 	.peer_smart_ant_set_node_config_ops_cmdid =
685 			WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
686 	.pdev_set_antenna_switch_table_cmdid =
687 			WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
688 	.pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
689 	.pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
690 	.pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
691 	.pdev_ratepwr_chainmsk_table_cmdid =
692 			WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
693 	.pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
694 	.tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
695 	.fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
696 	.vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
697 	.peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
698 	.pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
699 	.pdev_get_ani_ofdm_config_cmdid =
700 			WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
701 	.pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
702 	.pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
703 	.pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
704 	.pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
705 	.vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
706 	.pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
707 	.vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
708 	.vdev_filter_neighbor_rx_packets_cmdid =
709 			WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
710 	.mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
711 	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
712 	.pdev_bss_chan_info_request_cmdid =
713 			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
714 	.ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
715 	.vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
716 	.set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
717 	.atf_ssid_grouping_request_cmdid =
718 			WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
719 	.peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
720 	.set_periodic_channel_stats_cfg_cmdid =
721 			WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
722 	.peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
723 	.btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
724 	.peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
725 	.peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
726 	.peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
727 	.pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
728 	.coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
729 	.pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
730 	.pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
731 	.vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
732 	.prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
733 	.config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
734 	.debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
735 	.get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
736 	.pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
737 	.vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
738 	.pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
739 	.tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
740 	.tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
741 	.tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
742 	.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
743 	.per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
744 };
745 
746 static struct wmi_peer_param_map wmi_peer_param_map = {
747 	.smps_state = WMI_PEER_SMPS_STATE,
748 	.ampdu = WMI_PEER_AMPDU,
749 	.authorize = WMI_PEER_AUTHORIZE,
750 	.chan_width = WMI_PEER_CHAN_WIDTH,
751 	.nss = WMI_PEER_NSS,
752 	.use_4addr = WMI_PEER_USE_4ADDR,
753 	.use_fixed_power = WMI_PEER_USE_FIXED_PWR,
754 	.debug = WMI_PEER_DEBUG,
755 	.phymode = WMI_PEER_PHYMODE,
756 	.dummy_var = WMI_PEER_DUMMY_VAR,
757 };
758 
759 /* MAIN WMI VDEV param map */
760 static struct wmi_vdev_param_map wmi_vdev_param_map = {
761 	.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
762 	.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
763 	.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
764 	.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
765 	.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
766 	.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
767 	.slot_time = WMI_VDEV_PARAM_SLOT_TIME,
768 	.preamble = WMI_VDEV_PARAM_PREAMBLE,
769 	.swba_time = WMI_VDEV_PARAM_SWBA_TIME,
770 	.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
771 	.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
772 	.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
773 	.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
774 	.wmi_vdev_oc_scheduler_air_time_limit =
775 					WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
776 	.wds = WMI_VDEV_PARAM_WDS,
777 	.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
778 	.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
779 	.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
780 	.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
781 	.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
782 	.chwidth = WMI_VDEV_PARAM_CHWIDTH,
783 	.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
784 	.disable_htprotection =	WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
785 	.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
786 	.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
787 	.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
788 	.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
789 	.sgi = WMI_VDEV_PARAM_SGI,
790 	.ldpc = WMI_VDEV_PARAM_LDPC,
791 	.tx_stbc = WMI_VDEV_PARAM_TX_STBC,
792 	.rx_stbc = WMI_VDEV_PARAM_RX_STBC,
793 	.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
794 	.def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
795 	.nss = WMI_VDEV_PARAM_NSS,
796 	.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
797 	.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
798 	.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
799 	.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
800 	.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
801 	.ap_keepalive_min_idle_inactive_time_secs =
802 			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
803 	.ap_keepalive_max_idle_inactive_time_secs =
804 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
805 	.ap_keepalive_max_unresponsive_time_secs =
806 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
807 	.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
808 	.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
809 	.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
810 	.txbf = WMI_VDEV_PARAM_TXBF,
811 	.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
812 	.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
813 	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
814 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
815 					WMI_VDEV_PARAM_UNSUPPORTED,
816 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
817 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
818 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
819 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
820 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
821 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
822 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
823 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
824 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
825 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
826 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
827 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
828 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
829 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
830 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
831 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
832 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
833 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
834 };
835 
836 /* 10.X WMI VDEV param map */
837 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
838 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
839 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
840 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
841 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
842 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
843 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
844 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
845 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
846 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
847 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
848 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
849 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
850 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
851 	.wmi_vdev_oc_scheduler_air_time_limit =
852 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
853 	.wds = WMI_10X_VDEV_PARAM_WDS,
854 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
855 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
856 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
857 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
858 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
859 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
860 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
861 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
862 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
863 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
864 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
865 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
866 	.sgi = WMI_10X_VDEV_PARAM_SGI,
867 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
868 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
869 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
870 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
871 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
872 	.nss = WMI_10X_VDEV_PARAM_NSS,
873 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
874 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
875 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
876 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
877 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
878 	.ap_keepalive_min_idle_inactive_time_secs =
879 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
880 	.ap_keepalive_max_idle_inactive_time_secs =
881 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
882 	.ap_keepalive_max_unresponsive_time_secs =
883 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
884 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
885 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
886 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
887 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
888 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
889 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
890 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
891 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
892 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
893 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
894 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
895 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
896 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
897 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
898 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
899 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
900 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
901 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
902 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
903 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
904 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
905 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
906 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
907 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
908 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
909 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
910 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
911 };
912 
913 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
914 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
915 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
916 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
917 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
918 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
919 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
920 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
921 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
922 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
923 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
924 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
925 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
926 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
927 	.wmi_vdev_oc_scheduler_air_time_limit =
928 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
929 	.wds = WMI_10X_VDEV_PARAM_WDS,
930 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
931 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
932 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
933 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
934 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
935 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
936 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
937 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
938 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
939 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
940 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
941 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
942 	.sgi = WMI_10X_VDEV_PARAM_SGI,
943 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
944 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
945 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
946 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
947 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
948 	.nss = WMI_10X_VDEV_PARAM_NSS,
949 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
950 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
951 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
952 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
953 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
954 	.ap_keepalive_min_idle_inactive_time_secs =
955 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
956 	.ap_keepalive_max_idle_inactive_time_secs =
957 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
958 	.ap_keepalive_max_unresponsive_time_secs =
959 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
960 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
961 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
962 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
963 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
964 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
965 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
966 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
967 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
968 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
969 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
970 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
971 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
972 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
973 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
974 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
975 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
976 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
977 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
978 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
979 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
980 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
981 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
982 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
983 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
984 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
985 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
986 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
987 };
988 
989 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
990 	.rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
991 	.fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
992 	.beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
993 	.listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
994 	.multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
995 	.mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
996 	.slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
997 	.preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
998 	.swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
999 	.wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
1000 	.wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
1001 	.wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
1002 	.dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
1003 	.wmi_vdev_oc_scheduler_air_time_limit =
1004 	       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
1005 	.wds = WMI_10_4_VDEV_PARAM_WDS,
1006 	.atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
1007 	.bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
1008 	.bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
1009 	.bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
1010 	.feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
1011 	.chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
1012 	.chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
1013 	.disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
1014 	.sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
1015 	.mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
1016 	.protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
1017 	.fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
1018 	.sgi = WMI_10_4_VDEV_PARAM_SGI,
1019 	.ldpc = WMI_10_4_VDEV_PARAM_LDPC,
1020 	.tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
1021 	.rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
1022 	.intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
1023 	.def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
1024 	.nss = WMI_10_4_VDEV_PARAM_NSS,
1025 	.bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
1026 	.mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
1027 	.mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
1028 	.dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
1029 	.unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
1030 	.ap_keepalive_min_idle_inactive_time_secs =
1031 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
1032 	.ap_keepalive_max_idle_inactive_time_secs =
1033 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
1034 	.ap_keepalive_max_unresponsive_time_secs =
1035 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
1036 	.ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
1037 	.mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
1038 	.enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
1039 	.txbf = WMI_10_4_VDEV_PARAM_TXBF,
1040 	.packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
1041 	.drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
1042 	.tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
1043 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
1044 	       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
1045 	.rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
1046 	.cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
1047 	.mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
1048 	.rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
1049 	.vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
1050 	.vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
1051 	.early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
1052 	.early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
1053 	.early_rx_bmiss_sample_cycle =
1054 	       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
1055 	.early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
1056 	.early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
1057 	.early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
1058 	.proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
1059 	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
1060 	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
1061 	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
1062 	.inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
1063 	.dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
1064 	.disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
1065 	.rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
1066 };
1067 
1068 static struct wmi_pdev_param_map wmi_pdev_param_map = {
1069 	.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
1070 	.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
1071 	.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
1072 	.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
1073 	.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
1074 	.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
1075 	.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
1076 	.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1077 	.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
1078 	.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
1079 	.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1080 	.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
1081 	.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
1082 	.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1083 	.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
1084 	.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
1085 	.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
1086 	.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
1087 	.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
1088 	.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1089 	.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1090 	.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
1091 	.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1092 	.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
1093 	.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
1094 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1095 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1096 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1097 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1098 	.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1099 	.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1100 	.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1101 	.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1102 	.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
1103 	.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
1104 	.dcs = WMI_PDEV_PARAM_DCS,
1105 	.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
1106 	.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
1107 	.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
1108 	.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
1109 	.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
1110 	.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
1111 	.proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
1112 	.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1113 	.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1114 	.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1115 	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
1116 	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1117 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
1118 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1119 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1120 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1121 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1122 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1123 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1124 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1125 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1126 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1127 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1128 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1129 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1130 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1131 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1132 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1133 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1134 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1135 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1136 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1137 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1138 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1139 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1140 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1141 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1142 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1143 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1144 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1145 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1146 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1147 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1148 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1149 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1150 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1151 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1152 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1153 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1154 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1155 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1156 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1157 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1158 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1159 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1160 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1161 };
1162 
1163 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
1164 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1165 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1166 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1167 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1168 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1169 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1170 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1171 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1172 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1173 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1174 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1175 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1176 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1177 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1178 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1179 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1180 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1181 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1182 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1183 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1184 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1185 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1186 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1187 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1188 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1189 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1190 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1191 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1192 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1193 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1194 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1195 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1196 	.bcnflt_stats_update_period =
1197 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1198 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1199 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1200 	.dcs = WMI_10X_PDEV_PARAM_DCS,
1201 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1202 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1203 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1204 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1205 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1206 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1207 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1208 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1209 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1210 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1211 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1212 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1213 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1214 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1215 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1216 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1217 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1218 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1219 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1220 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1221 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1222 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1223 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1224 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1225 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1226 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1227 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1228 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1229 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1230 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1231 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1232 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1233 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1234 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1235 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1236 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1237 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1238 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1239 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1240 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1241 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1242 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1243 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1244 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1245 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1246 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1247 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1248 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1249 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1250 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1251 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1252 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1253 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1254 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1255 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1256 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1257 };
1258 
1259 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
1260 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1261 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1262 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1263 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1264 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1265 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1266 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1267 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1268 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1269 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1270 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1271 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1272 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1273 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1274 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1275 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1276 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1277 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1278 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1279 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1280 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1281 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1282 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1283 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1284 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1285 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1286 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1287 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1288 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1289 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1290 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1291 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1292 	.bcnflt_stats_update_period =
1293 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1294 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1295 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1296 	.dcs = WMI_10X_PDEV_PARAM_DCS,
1297 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1298 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1299 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1300 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1301 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1302 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1303 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1304 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1305 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1306 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1307 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1308 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1309 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1310 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1311 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1312 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1313 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1314 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1315 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1316 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1317 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1318 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1319 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1320 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1321 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1322 	.peer_sta_ps_statechg_enable =
1323 				WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
1324 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1325 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1326 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1327 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1328 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1329 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1330 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1331 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1332 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1333 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1334 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1335 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1336 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1337 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1338 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1339 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1340 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1341 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1342 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1343 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1344 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1345 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1346 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1347 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1348 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1349 	.pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
1350 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1351 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1352 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1353 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1354 };
1355 
1356 /* firmware 10.2 specific mappings */
1357 static struct wmi_cmd_map wmi_10_2_cmd_map = {
1358 	.init_cmdid = WMI_10_2_INIT_CMDID,
1359 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
1360 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
1361 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
1362 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
1363 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
1364 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
1365 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
1366 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
1367 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
1368 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
1369 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
1370 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
1371 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
1372 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
1373 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
1374 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
1375 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
1376 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
1377 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
1378 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
1379 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
1380 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
1381 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
1382 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
1383 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
1384 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
1385 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
1386 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
1387 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
1388 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
1389 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
1390 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
1391 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
1392 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
1393 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
1394 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
1395 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1396 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
1397 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
1398 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
1399 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1400 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
1401 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
1402 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
1403 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
1404 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
1405 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
1406 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
1407 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
1408 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
1409 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
1410 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
1411 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
1412 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
1413 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
1414 	.roam_scan_rssi_change_threshold =
1415 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
1416 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
1417 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
1418 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
1419 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
1420 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
1421 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
1422 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
1423 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
1424 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
1425 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
1426 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
1427 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
1428 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
1429 	.wlan_profile_set_hist_intvl_cmdid =
1430 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1431 	.wlan_profile_get_profile_data_cmdid =
1432 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1433 	.wlan_profile_enable_profile_id_cmdid =
1434 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1435 	.wlan_profile_list_profile_id_cmdid =
1436 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1437 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
1438 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
1439 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
1440 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
1441 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
1442 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
1443 	.wow_enable_disable_wake_event_cmdid =
1444 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
1445 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
1446 	.wow_hostwakeup_from_sleep_cmdid =
1447 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
1448 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
1449 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
1450 	.vdev_spectral_scan_configure_cmdid =
1451 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
1452 	.vdev_spectral_scan_enable_cmdid =
1453 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
1454 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
1455 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
1456 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
1457 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
1458 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
1459 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
1460 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
1461 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
1462 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
1463 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
1464 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
1465 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
1466 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
1467 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
1468 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
1469 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
1470 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
1471 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1472 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1473 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
1474 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
1475 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
1476 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
1477 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
1478 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
1479 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
1480 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
1481 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
1482 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
1483 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
1484 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
1485 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
1486 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
1487 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1488 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
1489 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
1490 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
1491 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
1492 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
1493 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1494 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1495 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
1496 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
1497 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
1498 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
1499 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
1500 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
1501 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
1502 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
1503 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
1504 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
1505 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1506 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1507 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
1508 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
1509 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
1510 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
1511 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
1512 };
1513 
1514 static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
1515 	.tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
1516 	.rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
1517 	.txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
1518 	.txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
1519 	.txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
1520 	.beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
1521 	.beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
1522 	.resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1523 	.protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
1524 	.dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
1525 	.non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1526 	.agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
1527 	.sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
1528 	.ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1529 	.ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
1530 	.ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
1531 	.ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
1532 	.ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
1533 	.ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
1534 	.ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1535 	.ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1536 	.ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
1537 	.ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1538 	.l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
1539 	.dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
1540 	.pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1541 	.pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
1542 	.pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1543 	.pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1544 	.pdev_stats_update_period =
1545 			WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1546 	.vdev_stats_update_period =
1547 			WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1548 	.peer_stats_update_period =
1549 			WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1550 	.bcnflt_stats_update_period =
1551 			WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1552 	.pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
1553 	.arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
1554 	.dcs = WMI_10_4_PDEV_PARAM_DCS,
1555 	.ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
1556 	.ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
1557 	.ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
1558 	.ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
1559 	.ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
1560 	.dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
1561 	.proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
1562 	.idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
1563 	.power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
1564 	.fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
1565 	.burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
1566 	.burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
1567 	.cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
1568 	.aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
1569 	.rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
1570 	.smart_antenna_default_antenna =
1571 			WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
1572 	.igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
1573 	.igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
1574 	.antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
1575 	.rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
1576 	.set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
1577 	.proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
1578 	.set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
1579 	.set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
1580 	.remove_mcast2ucast_buffer =
1581 			WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
1582 	.peer_sta_ps_statechg_enable =
1583 			WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
1584 	.igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
1585 	.block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
1586 	.set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
1587 	.set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
1588 	.set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
1589 	.txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
1590 	.set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
1591 	.set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
1592 	.en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
1593 	.mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
1594 	.noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
1595 	.noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
1596 	.dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
1597 	.set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
1598 	.atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
1599 	.atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
1600 	.ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
1601 	.mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
1602 	.sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
1603 	.signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
1604 	.signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
1605 	.enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
1606 	.enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
1607 	.cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
1608 	.rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
1609 	.pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
1610 	.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
1611 	.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
1612 	.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
1613 	.enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
1614 };
1615 
1616 static const u8 wmi_key_cipher_suites[] = {
1617 	[WMI_CIPHER_NONE] = WMI_CIPHER_NONE,
1618 	[WMI_CIPHER_WEP] = WMI_CIPHER_WEP,
1619 	[WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP,
1620 	[WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB,
1621 	[WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM,
1622 	[WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI,
1623 	[WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP,
1624 	[WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC,
1625 	[WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM,
1626 };
1627 
1628 static const u8 wmi_tlv_key_cipher_suites[] = {
1629 	[WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE,
1630 	[WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP,
1631 	[WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP,
1632 	[WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB,
1633 	[WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM,
1634 	[WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI,
1635 	[WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP,
1636 	[WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC,
1637 	[WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM,
1638 };
1639 
1640 static const struct wmi_peer_flags_map wmi_peer_flags_map = {
1641 	.auth = WMI_PEER_AUTH,
1642 	.qos = WMI_PEER_QOS,
1643 	.need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
1644 	.need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
1645 	.apsd = WMI_PEER_APSD,
1646 	.ht = WMI_PEER_HT,
1647 	.bw40 = WMI_PEER_40MHZ,
1648 	.stbc = WMI_PEER_STBC,
1649 	.ldbc = WMI_PEER_LDPC,
1650 	.dyn_mimops = WMI_PEER_DYN_MIMOPS,
1651 	.static_mimops = WMI_PEER_STATIC_MIMOPS,
1652 	.spatial_mux = WMI_PEER_SPATIAL_MUX,
1653 	.vht = WMI_PEER_VHT,
1654 	.bw80 = WMI_PEER_80MHZ,
1655 	.vht_2g = WMI_PEER_VHT_2G,
1656 	.pmf = WMI_PEER_PMF,
1657 	.bw160 = WMI_PEER_160MHZ,
1658 };
1659 
1660 static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
1661 	.auth = WMI_10X_PEER_AUTH,
1662 	.qos = WMI_10X_PEER_QOS,
1663 	.need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
1664 	.need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
1665 	.apsd = WMI_10X_PEER_APSD,
1666 	.ht = WMI_10X_PEER_HT,
1667 	.bw40 = WMI_10X_PEER_40MHZ,
1668 	.stbc = WMI_10X_PEER_STBC,
1669 	.ldbc = WMI_10X_PEER_LDPC,
1670 	.dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
1671 	.static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
1672 	.spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
1673 	.vht = WMI_10X_PEER_VHT,
1674 	.bw80 = WMI_10X_PEER_80MHZ,
1675 	.bw160 = WMI_10X_PEER_160MHZ,
1676 };
1677 
1678 static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
1679 	.auth = WMI_10_2_PEER_AUTH,
1680 	.qos = WMI_10_2_PEER_QOS,
1681 	.need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
1682 	.need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
1683 	.apsd = WMI_10_2_PEER_APSD,
1684 	.ht = WMI_10_2_PEER_HT,
1685 	.bw40 = WMI_10_2_PEER_40MHZ,
1686 	.stbc = WMI_10_2_PEER_STBC,
1687 	.ldbc = WMI_10_2_PEER_LDPC,
1688 	.dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
1689 	.static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
1690 	.spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
1691 	.vht = WMI_10_2_PEER_VHT,
1692 	.bw80 = WMI_10_2_PEER_80MHZ,
1693 	.vht_2g = WMI_10_2_PEER_VHT_2G,
1694 	.pmf = WMI_10_2_PEER_PMF,
1695 	.bw160 = WMI_10_2_PEER_160MHZ,
1696 };
1697 
ath10k_wmi_put_wmi_channel(struct ath10k * ar,struct wmi_channel * ch,const struct wmi_channel_arg * arg)1698 void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
1699 				const struct wmi_channel_arg *arg)
1700 {
1701 	u32 flags = 0;
1702 	struct ieee80211_channel *chan = NULL;
1703 
1704 	memset(ch, 0, sizeof(*ch));
1705 
1706 	if (arg->passive)
1707 		flags |= WMI_CHAN_FLAG_PASSIVE;
1708 	if (arg->allow_ibss)
1709 		flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1710 	if (arg->allow_ht)
1711 		flags |= WMI_CHAN_FLAG_ALLOW_HT;
1712 	if (arg->allow_vht)
1713 		flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1714 	if (arg->ht40plus)
1715 		flags |= WMI_CHAN_FLAG_HT40_PLUS;
1716 	if (arg->chan_radar)
1717 		flags |= WMI_CHAN_FLAG_DFS;
1718 
1719 	ch->band_center_freq2 = 0;
1720 	ch->mhz = __cpu_to_le32(arg->freq);
1721 	ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
1722 	if (arg->mode == MODE_11AC_VHT80_80) {
1723 		ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
1724 		chan = ieee80211_get_channel(ar->hw->wiphy,
1725 					     arg->band_center_freq2 - 10);
1726 	}
1727 
1728 	if (arg->mode == MODE_11AC_VHT160) {
1729 		u32 band_center_freq1;
1730 		u32 band_center_freq2;
1731 
1732 		if (arg->freq > arg->band_center_freq1) {
1733 			band_center_freq1 = arg->band_center_freq1 + 40;
1734 			band_center_freq2 = arg->band_center_freq1 - 40;
1735 		} else {
1736 			band_center_freq1 = arg->band_center_freq1 - 40;
1737 			band_center_freq2 = arg->band_center_freq1 + 40;
1738 		}
1739 
1740 		ch->band_center_freq1 =
1741 					__cpu_to_le32(band_center_freq1);
1742 		/* Minus 10 to get a defined 5G channel frequency*/
1743 		chan = ieee80211_get_channel(ar->hw->wiphy,
1744 					     band_center_freq2 - 10);
1745 		/* The center frequency of the entire VHT160 */
1746 		ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
1747 	}
1748 
1749 	if (chan && chan->flags & IEEE80211_CHAN_RADAR)
1750 		flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
1751 
1752 	ch->min_power = arg->min_power;
1753 	ch->max_power = arg->max_power;
1754 	ch->reg_power = arg->max_reg_power;
1755 	ch->antenna_max = arg->max_antenna_gain;
1756 	ch->max_tx_power = arg->max_power;
1757 
1758 	/* mode & flags share storage */
1759 	ch->mode = arg->mode;
1760 	ch->flags |= __cpu_to_le32(flags);
1761 }
1762 
ath10k_wmi_wait_for_service_ready(struct ath10k * ar)1763 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
1764 {
1765 	unsigned long time_left, i;
1766 
1767 	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1768 						WMI_SERVICE_READY_TIMEOUT_HZ);
1769 	if (!time_left) {
1770 		/* Sometimes the PCI HIF doesn't receive interrupt
1771 		 * for the service ready message even if the buffer
1772 		 * was completed. PCIe sniffer shows that it's
1773 		 * because the corresponding CE ring doesn't fires
1774 		 * it. Workaround here by polling CE rings once.
1775 		 */
1776 		ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
1777 
1778 		for (i = 0; i < CE_COUNT; i++)
1779 			ath10k_hif_send_complete_check(ar, i, 1);
1780 
1781 		time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1782 							WMI_SERVICE_READY_TIMEOUT_HZ);
1783 		if (!time_left) {
1784 			ath10k_warn(ar, "polling timed out\n");
1785 			return -ETIMEDOUT;
1786 		}
1787 
1788 		ath10k_warn(ar, "service ready completion received, continuing normally\n");
1789 	}
1790 
1791 	return 0;
1792 }
1793 
ath10k_wmi_wait_for_unified_ready(struct ath10k * ar)1794 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
1795 {
1796 	unsigned long time_left;
1797 
1798 	time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
1799 						WMI_UNIFIED_READY_TIMEOUT_HZ);
1800 	if (!time_left)
1801 		return -ETIMEDOUT;
1802 	return 0;
1803 }
1804 
ath10k_wmi_alloc_skb(struct ath10k * ar,u32 len)1805 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
1806 {
1807 	struct sk_buff *skb;
1808 	u32 round_len = roundup(len, 4);
1809 
1810 	skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
1811 	if (!skb)
1812 		return NULL;
1813 
1814 	skb_reserve(skb, WMI_SKB_HEADROOM);
1815 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1816 		ath10k_warn(ar, "Unaligned WMI skb\n");
1817 
1818 	skb_put(skb, round_len);
1819 	memset(skb->data, 0, round_len);
1820 
1821 	return skb;
1822 }
1823 
ath10k_wmi_htc_tx_complete(struct ath10k * ar,struct sk_buff * skb)1824 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
1825 {
1826 	dev_kfree_skb(skb);
1827 }
1828 
ath10k_wmi_cmd_send_nowait(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1829 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
1830 			       u32 cmd_id)
1831 {
1832 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
1833 	struct wmi_cmd_hdr *cmd_hdr;
1834 	int ret;
1835 	u32 cmd = 0;
1836 
1837 	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1838 		return -ENOMEM;
1839 
1840 	cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
1841 
1842 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1843 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
1844 
1845 	memset(skb_cb, 0, sizeof(*skb_cb));
1846 	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
1847 	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
1848 
1849 	if (ret)
1850 		goto err_pull;
1851 
1852 	return 0;
1853 
1854 err_pull:
1855 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
1856 	return ret;
1857 }
1858 
ath10k_wmi_tx_beacon_nowait(struct ath10k_vif * arvif)1859 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
1860 {
1861 	struct ath10k *ar = arvif->ar;
1862 	struct ath10k_skb_cb *cb;
1863 	struct sk_buff *bcn;
1864 	bool dtim_zero;
1865 	bool deliver_cab;
1866 	int ret;
1867 
1868 	spin_lock_bh(&ar->data_lock);
1869 
1870 	bcn = arvif->beacon;
1871 
1872 	if (!bcn)
1873 		goto unlock;
1874 
1875 	cb = ATH10K_SKB_CB(bcn);
1876 
1877 	switch (arvif->beacon_state) {
1878 	case ATH10K_BEACON_SENDING:
1879 	case ATH10K_BEACON_SENT:
1880 		break;
1881 	case ATH10K_BEACON_SCHEDULED:
1882 		arvif->beacon_state = ATH10K_BEACON_SENDING;
1883 		spin_unlock_bh(&ar->data_lock);
1884 
1885 		dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
1886 		deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
1887 		ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
1888 							arvif->vdev_id,
1889 							bcn->data, bcn->len,
1890 							cb->paddr,
1891 							dtim_zero,
1892 							deliver_cab);
1893 
1894 		spin_lock_bh(&ar->data_lock);
1895 
1896 		if (ret == 0)
1897 			arvif->beacon_state = ATH10K_BEACON_SENT;
1898 		else
1899 			arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1900 	}
1901 
1902 unlock:
1903 	spin_unlock_bh(&ar->data_lock);
1904 }
1905 
ath10k_wmi_tx_beacons_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1906 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
1907 				       struct ieee80211_vif *vif)
1908 {
1909 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
1910 
1911 	ath10k_wmi_tx_beacon_nowait(arvif);
1912 }
1913 
ath10k_wmi_tx_beacons_nowait(struct ath10k * ar)1914 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
1915 {
1916 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1917 						   ATH10K_ITER_NORMAL_FLAGS,
1918 						   ath10k_wmi_tx_beacons_iter,
1919 						   NULL);
1920 }
1921 
ath10k_wmi_op_ep_tx_credits(struct ath10k * ar)1922 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
1923 {
1924 	/* try to send pending beacons first. they take priority */
1925 	ath10k_wmi_tx_beacons_nowait(ar);
1926 
1927 	wake_up(&ar->wmi.tx_credits_wq);
1928 }
1929 
ath10k_wmi_cmd_send(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1930 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
1931 {
1932 	int ret = -EOPNOTSUPP;
1933 
1934 	might_sleep();
1935 
1936 	if (cmd_id == WMI_CMD_UNSUPPORTED) {
1937 		ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
1938 			    cmd_id);
1939 		return ret;
1940 	}
1941 
1942 	wait_event_timeout(ar->wmi.tx_credits_wq, ({
1943 		/* try to send pending beacons first. they take priority */
1944 		ath10k_wmi_tx_beacons_nowait(ar);
1945 
1946 		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
1947 
1948 		if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1949 			ret = -ESHUTDOWN;
1950 
1951 		(ret != -EAGAIN);
1952 	}), 3 * HZ);
1953 
1954 	if (ret)
1955 		dev_kfree_skb_any(skb);
1956 
1957 	if (ret == -EAGAIN) {
1958 		ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
1959 			    cmd_id);
1960 		ath10k_core_start_recovery(ar);
1961 	}
1962 
1963 	return ret;
1964 }
1965 
1966 static struct sk_buff *
ath10k_wmi_op_gen_mgmt_tx(struct ath10k * ar,struct sk_buff * msdu)1967 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
1968 {
1969 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
1970 	struct ath10k_vif *arvif;
1971 	struct wmi_mgmt_tx_cmd *cmd;
1972 	struct ieee80211_hdr *hdr;
1973 	struct sk_buff *skb;
1974 	int len;
1975 	u32 vdev_id;
1976 	u32 buf_len = msdu->len;
1977 	u16 fc;
1978 	const u8 *peer_addr;
1979 
1980 	hdr = (struct ieee80211_hdr *)msdu->data;
1981 	fc = le16_to_cpu(hdr->frame_control);
1982 
1983 	if (cb->vif) {
1984 		arvif = (void *)cb->vif->drv_priv;
1985 		vdev_id = arvif->vdev_id;
1986 	} else {
1987 		vdev_id = 0;
1988 	}
1989 
1990 	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
1991 		return ERR_PTR(-EINVAL);
1992 
1993 	len = sizeof(cmd->hdr) + msdu->len;
1994 
1995 	if ((ieee80211_is_action(hdr->frame_control) ||
1996 	     ieee80211_is_deauth(hdr->frame_control) ||
1997 	     ieee80211_is_disassoc(hdr->frame_control)) &&
1998 	     ieee80211_has_protected(hdr->frame_control)) {
1999 		peer_addr = hdr->addr1;
2000 		if (is_multicast_ether_addr(peer_addr)) {
2001 			len += sizeof(struct ieee80211_mmie_16);
2002 			buf_len += sizeof(struct ieee80211_mmie_16);
2003 		} else {
2004 			if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
2005 			    cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
2006 				len += IEEE80211_GCMP_MIC_LEN;
2007 				buf_len += IEEE80211_GCMP_MIC_LEN;
2008 			} else {
2009 				len += IEEE80211_CCMP_MIC_LEN;
2010 				buf_len += IEEE80211_CCMP_MIC_LEN;
2011 			}
2012 		}
2013 	}
2014 
2015 	len = round_up(len, 4);
2016 
2017 	skb = ath10k_wmi_alloc_skb(ar, len);
2018 	if (!skb)
2019 		return ERR_PTR(-ENOMEM);
2020 
2021 	cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
2022 
2023 	cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
2024 	cmd->hdr.tx_rate = 0;
2025 	cmd->hdr.tx_power = 0;
2026 	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
2027 
2028 	ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
2029 	memcpy(cmd->buf, msdu->data, msdu->len);
2030 
2031 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
2032 		   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
2033 		   fc & IEEE80211_FCTL_STYPE);
2034 	trace_ath10k_tx_hdr(ar, skb->data, skb->len);
2035 	trace_ath10k_tx_payload(ar, skb->data, skb->len);
2036 
2037 	return skb;
2038 }
2039 
ath10k_wmi_event_scan_started(struct ath10k * ar)2040 static void ath10k_wmi_event_scan_started(struct ath10k *ar)
2041 {
2042 	lockdep_assert_held(&ar->data_lock);
2043 
2044 	switch (ar->scan.state) {
2045 	case ATH10K_SCAN_IDLE:
2046 	case ATH10K_SCAN_RUNNING:
2047 	case ATH10K_SCAN_ABORTING:
2048 		ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
2049 			    ath10k_scan_state_str(ar->scan.state),
2050 			    ar->scan.state);
2051 		break;
2052 	case ATH10K_SCAN_STARTING:
2053 		ar->scan.state = ATH10K_SCAN_RUNNING;
2054 
2055 		if (ar->scan.is_roc)
2056 			ieee80211_ready_on_channel(ar->hw);
2057 
2058 		complete(&ar->scan.started);
2059 		break;
2060 	}
2061 }
2062 
ath10k_wmi_event_scan_start_failed(struct ath10k * ar)2063 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
2064 {
2065 	lockdep_assert_held(&ar->data_lock);
2066 
2067 	switch (ar->scan.state) {
2068 	case ATH10K_SCAN_IDLE:
2069 	case ATH10K_SCAN_RUNNING:
2070 	case ATH10K_SCAN_ABORTING:
2071 		ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
2072 			    ath10k_scan_state_str(ar->scan.state),
2073 			    ar->scan.state);
2074 		break;
2075 	case ATH10K_SCAN_STARTING:
2076 		complete(&ar->scan.started);
2077 		__ath10k_scan_finish(ar);
2078 		break;
2079 	}
2080 }
2081 
ath10k_wmi_event_scan_completed(struct ath10k * ar)2082 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
2083 {
2084 	lockdep_assert_held(&ar->data_lock);
2085 
2086 	switch (ar->scan.state) {
2087 	case ATH10K_SCAN_IDLE:
2088 	case ATH10K_SCAN_STARTING:
2089 		/* One suspected reason scan can be completed while starting is
2090 		 * if firmware fails to deliver all scan events to the host,
2091 		 * e.g. when transport pipe is full. This has been observed
2092 		 * with spectral scan phyerr events starving wmi transport
2093 		 * pipe. In such case the "scan completed" event should be (and
2094 		 * is) ignored by the host as it may be just firmware's scan
2095 		 * state machine recovering.
2096 		 */
2097 		ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
2098 			    ath10k_scan_state_str(ar->scan.state),
2099 			    ar->scan.state);
2100 		break;
2101 	case ATH10K_SCAN_RUNNING:
2102 	case ATH10K_SCAN_ABORTING:
2103 		__ath10k_scan_finish(ar);
2104 		break;
2105 	}
2106 }
2107 
ath10k_wmi_event_scan_bss_chan(struct ath10k * ar)2108 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
2109 {
2110 	lockdep_assert_held(&ar->data_lock);
2111 
2112 	switch (ar->scan.state) {
2113 	case ATH10K_SCAN_IDLE:
2114 	case ATH10K_SCAN_STARTING:
2115 		ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
2116 			    ath10k_scan_state_str(ar->scan.state),
2117 			    ar->scan.state);
2118 		break;
2119 	case ATH10K_SCAN_RUNNING:
2120 	case ATH10K_SCAN_ABORTING:
2121 		ar->scan_channel = NULL;
2122 		break;
2123 	}
2124 }
2125 
ath10k_wmi_event_scan_foreign_chan(struct ath10k * ar,u32 freq)2126 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
2127 {
2128 	lockdep_assert_held(&ar->data_lock);
2129 
2130 	switch (ar->scan.state) {
2131 	case ATH10K_SCAN_IDLE:
2132 	case ATH10K_SCAN_STARTING:
2133 		ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
2134 			    ath10k_scan_state_str(ar->scan.state),
2135 			    ar->scan.state);
2136 		break;
2137 	case ATH10K_SCAN_RUNNING:
2138 	case ATH10K_SCAN_ABORTING:
2139 		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
2140 
2141 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
2142 			complete(&ar->scan.on_channel);
2143 		break;
2144 	}
2145 }
2146 
2147 static const char *
ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)2148 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
2149 			       enum wmi_scan_completion_reason reason)
2150 {
2151 	switch (type) {
2152 	case WMI_SCAN_EVENT_STARTED:
2153 		return "started";
2154 	case WMI_SCAN_EVENT_COMPLETED:
2155 		switch (reason) {
2156 		case WMI_SCAN_REASON_COMPLETED:
2157 			return "completed";
2158 		case WMI_SCAN_REASON_CANCELLED:
2159 			return "completed [cancelled]";
2160 		case WMI_SCAN_REASON_PREEMPTED:
2161 			return "completed [preempted]";
2162 		case WMI_SCAN_REASON_TIMEDOUT:
2163 			return "completed [timedout]";
2164 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
2165 			return "completed [internal err]";
2166 		case WMI_SCAN_REASON_MAX:
2167 			break;
2168 		}
2169 		return "completed [unknown]";
2170 	case WMI_SCAN_EVENT_BSS_CHANNEL:
2171 		return "bss channel";
2172 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2173 		return "foreign channel";
2174 	case WMI_SCAN_EVENT_DEQUEUED:
2175 		return "dequeued";
2176 	case WMI_SCAN_EVENT_PREEMPTED:
2177 		return "preempted";
2178 	case WMI_SCAN_EVENT_START_FAILED:
2179 		return "start failed";
2180 	case WMI_SCAN_EVENT_RESTARTED:
2181 		return "restarted";
2182 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2183 		return "foreign channel exit";
2184 	default:
2185 		return "unknown";
2186 	}
2187 }
2188 
ath10k_wmi_op_pull_scan_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_scan_ev_arg * arg)2189 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
2190 				      struct wmi_scan_ev_arg *arg)
2191 {
2192 	struct wmi_scan_event *ev = (void *)skb->data;
2193 
2194 	if (skb->len < sizeof(*ev))
2195 		return -EPROTO;
2196 
2197 	skb_pull(skb, sizeof(*ev));
2198 	arg->event_type = ev->event_type;
2199 	arg->reason = ev->reason;
2200 	arg->channel_freq = ev->channel_freq;
2201 	arg->scan_req_id = ev->scan_req_id;
2202 	arg->scan_id = ev->scan_id;
2203 	arg->vdev_id = ev->vdev_id;
2204 
2205 	return 0;
2206 }
2207 
ath10k_wmi_event_scan(struct ath10k * ar,struct sk_buff * skb)2208 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
2209 {
2210 	struct wmi_scan_ev_arg arg = {};
2211 	enum wmi_scan_event_type event_type;
2212 	enum wmi_scan_completion_reason reason;
2213 	u32 freq;
2214 	u32 req_id;
2215 	u32 scan_id;
2216 	u32 vdev_id;
2217 	int ret;
2218 
2219 	ret = ath10k_wmi_pull_scan(ar, skb, &arg);
2220 	if (ret) {
2221 		ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
2222 		return ret;
2223 	}
2224 
2225 	event_type = __le32_to_cpu(arg.event_type);
2226 	reason = __le32_to_cpu(arg.reason);
2227 	freq = __le32_to_cpu(arg.channel_freq);
2228 	req_id = __le32_to_cpu(arg.scan_req_id);
2229 	scan_id = __le32_to_cpu(arg.scan_id);
2230 	vdev_id = __le32_to_cpu(arg.vdev_id);
2231 
2232 	spin_lock_bh(&ar->data_lock);
2233 
2234 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2235 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
2236 		   ath10k_wmi_event_scan_type_str(event_type, reason),
2237 		   event_type, reason, freq, req_id, scan_id, vdev_id,
2238 		   ath10k_scan_state_str(ar->scan.state), ar->scan.state);
2239 
2240 	switch (event_type) {
2241 	case WMI_SCAN_EVENT_STARTED:
2242 		ath10k_wmi_event_scan_started(ar);
2243 		break;
2244 	case WMI_SCAN_EVENT_COMPLETED:
2245 		ath10k_wmi_event_scan_completed(ar);
2246 		break;
2247 	case WMI_SCAN_EVENT_BSS_CHANNEL:
2248 		ath10k_wmi_event_scan_bss_chan(ar);
2249 		break;
2250 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2251 		ath10k_wmi_event_scan_foreign_chan(ar, freq);
2252 		break;
2253 	case WMI_SCAN_EVENT_START_FAILED:
2254 		ath10k_warn(ar, "received scan start failure event\n");
2255 		ath10k_wmi_event_scan_start_failed(ar);
2256 		break;
2257 	case WMI_SCAN_EVENT_DEQUEUED:
2258 	case WMI_SCAN_EVENT_PREEMPTED:
2259 	case WMI_SCAN_EVENT_RESTARTED:
2260 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2261 	default:
2262 		break;
2263 	}
2264 
2265 	spin_unlock_bh(&ar->data_lock);
2266 	return 0;
2267 }
2268 
2269 /* If keys are configured, HW decrypts all frames
2270  * with protected bit set. Mark such frames as decrypted.
2271  */
ath10k_wmi_handle_wep_reauth(struct ath10k * ar,struct sk_buff * skb,struct ieee80211_rx_status * status)2272 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
2273 					 struct sk_buff *skb,
2274 					 struct ieee80211_rx_status *status)
2275 {
2276 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2277 	unsigned int hdrlen;
2278 	bool peer_key;
2279 	u8 *addr, keyidx;
2280 
2281 	if (!ieee80211_is_auth(hdr->frame_control) ||
2282 	    !ieee80211_has_protected(hdr->frame_control))
2283 		return;
2284 
2285 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
2286 	if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
2287 		return;
2288 
2289 	keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
2290 	addr = ieee80211_get_SA(hdr);
2291 
2292 	spin_lock_bh(&ar->data_lock);
2293 	peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
2294 	spin_unlock_bh(&ar->data_lock);
2295 
2296 	if (peer_key) {
2297 		ath10k_dbg(ar, ATH10K_DBG_MAC,
2298 			   "mac wep key present for peer %pM\n", addr);
2299 		status->flag |= RX_FLAG_DECRYPTED;
2300 	}
2301 }
2302 
ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2303 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2304 					 struct wmi_mgmt_rx_ev_arg *arg)
2305 {
2306 	struct wmi_mgmt_rx_event_v1 *ev_v1;
2307 	struct wmi_mgmt_rx_event_v2 *ev_v2;
2308 	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
2309 	struct wmi_mgmt_rx_ext_info *ext_info;
2310 	size_t pull_len;
2311 	u32 msdu_len;
2312 	u32 len;
2313 
2314 	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
2315 		     ar->running_fw->fw_file.fw_features)) {
2316 		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
2317 		ev_hdr = &ev_v2->hdr.v1;
2318 		pull_len = sizeof(*ev_v2);
2319 	} else {
2320 		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
2321 		ev_hdr = &ev_v1->hdr;
2322 		pull_len = sizeof(*ev_v1);
2323 	}
2324 
2325 	if (skb->len < pull_len)
2326 		return -EPROTO;
2327 
2328 	skb_pull(skb, pull_len);
2329 	arg->channel = ev_hdr->channel;
2330 	arg->buf_len = ev_hdr->buf_len;
2331 	arg->status = ev_hdr->status;
2332 	arg->snr = ev_hdr->snr;
2333 	arg->phy_mode = ev_hdr->phy_mode;
2334 	arg->rate = ev_hdr->rate;
2335 
2336 	msdu_len = __le32_to_cpu(arg->buf_len);
2337 	if (skb->len < msdu_len)
2338 		return -EPROTO;
2339 
2340 	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2341 		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2342 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2343 		memcpy(&arg->ext_info, ext_info,
2344 		       sizeof(struct wmi_mgmt_rx_ext_info));
2345 	}
2346 	/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
2347 	 * trailer with credit update. Trim the excess garbage.
2348 	 */
2349 	skb_trim(skb, msdu_len);
2350 
2351 	return 0;
2352 }
2353 
ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2354 static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2355 					      struct sk_buff *skb,
2356 					      struct wmi_mgmt_rx_ev_arg *arg)
2357 {
2358 	struct wmi_10_4_mgmt_rx_event *ev;
2359 	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
2360 	size_t pull_len;
2361 	u32 msdu_len;
2362 	struct wmi_mgmt_rx_ext_info *ext_info;
2363 	u32 len;
2364 
2365 	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
2366 	ev_hdr = &ev->hdr;
2367 	pull_len = sizeof(*ev);
2368 
2369 	if (skb->len < pull_len)
2370 		return -EPROTO;
2371 
2372 	skb_pull(skb, pull_len);
2373 	arg->channel = ev_hdr->channel;
2374 	arg->buf_len = ev_hdr->buf_len;
2375 	arg->status = ev_hdr->status;
2376 	arg->snr = ev_hdr->snr;
2377 	arg->phy_mode = ev_hdr->phy_mode;
2378 	arg->rate = ev_hdr->rate;
2379 
2380 	msdu_len = __le32_to_cpu(arg->buf_len);
2381 	if (skb->len < msdu_len)
2382 		return -EPROTO;
2383 
2384 	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2385 		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2386 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2387 		memcpy(&arg->ext_info, ext_info,
2388 		       sizeof(struct wmi_mgmt_rx_ext_info));
2389 	}
2390 
2391 	/* Make sure bytes added for padding are removed. */
2392 	skb_trim(skb, msdu_len);
2393 
2394 	return 0;
2395 }
2396 
ath10k_wmi_rx_is_decrypted(struct ath10k * ar,struct ieee80211_hdr * hdr)2397 static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
2398 				       struct ieee80211_hdr *hdr)
2399 {
2400 	if (!ieee80211_has_protected(hdr->frame_control))
2401 		return false;
2402 
2403 	/* FW delivers WEP Shared Auth frame with Protected Bit set and
2404 	 * encrypted payload. However in case of PMF it delivers decrypted
2405 	 * frames with Protected Bit set.
2406 	 */
2407 	if (ieee80211_is_auth(hdr->frame_control))
2408 		return false;
2409 
2410 	/* qca99x0 based FW delivers broadcast or multicast management frames
2411 	 * (ex: group privacy action frames in mesh) as encrypted payload.
2412 	 */
2413 	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
2414 	    ar->hw_params.sw_decrypt_mcast_mgmt)
2415 		return false;
2416 
2417 	return true;
2418 }
2419 
2420 static int
wmi_process_mgmt_tx_comp(struct ath10k * ar,struct mgmt_tx_compl_params * param)2421 wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
2422 {
2423 	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
2424 	struct ath10k_wmi *wmi = &ar->wmi;
2425 	struct ieee80211_tx_info *info;
2426 	struct sk_buff *msdu;
2427 	int ret;
2428 
2429 	spin_lock_bh(&ar->data_lock);
2430 
2431 	pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
2432 	if (!pkt_addr) {
2433 		ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
2434 			    param->desc_id);
2435 		ret = -ENOENT;
2436 		goto out;
2437 	}
2438 
2439 	msdu = pkt_addr->vaddr;
2440 	dma_unmap_single(ar->dev, pkt_addr->paddr,
2441 			 msdu->len, DMA_TO_DEVICE);
2442 	info = IEEE80211_SKB_CB(msdu);
2443 	kfree(pkt_addr);
2444 
2445 	if (param->status) {
2446 		info->flags &= ~IEEE80211_TX_STAT_ACK;
2447 	} else {
2448 		info->flags |= IEEE80211_TX_STAT_ACK;
2449 		info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
2450 					  param->ack_rssi;
2451 		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
2452 	}
2453 
2454 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
2455 
2456 	ret = 0;
2457 
2458 out:
2459 	idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
2460 	spin_unlock_bh(&ar->data_lock);
2461 	return ret;
2462 }
2463 
ath10k_wmi_event_mgmt_tx_compl(struct ath10k * ar,struct sk_buff * skb)2464 int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
2465 {
2466 	struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
2467 	struct mgmt_tx_compl_params param;
2468 	int ret;
2469 
2470 	ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
2471 	if (ret) {
2472 		ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
2473 		return ret;
2474 	}
2475 
2476 	memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2477 	param.desc_id = __le32_to_cpu(arg.desc_id);
2478 	param.status = __le32_to_cpu(arg.status);
2479 
2480 	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2481 		param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
2482 
2483 	wmi_process_mgmt_tx_comp(ar, &param);
2484 
2485 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
2486 
2487 	return 0;
2488 }
2489 
ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k * ar,struct sk_buff * skb)2490 int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
2491 {
2492 	struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
2493 	struct mgmt_tx_compl_params param;
2494 	u32 num_reports;
2495 	int i, ret;
2496 
2497 	ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
2498 	if (ret) {
2499 		ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret);
2500 		return ret;
2501 	}
2502 
2503 	num_reports = __le32_to_cpu(arg.num_reports);
2504 
2505 	for (i = 0; i < num_reports; i++) {
2506 		memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2507 		param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
2508 		param.status = __le32_to_cpu(arg.desc_ids[i]);
2509 
2510 		if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2511 			param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
2512 		wmi_process_mgmt_tx_comp(ar, &param);
2513 	}
2514 
2515 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
2516 
2517 	return 0;
2518 }
2519 
ath10k_wmi_event_mgmt_rx(struct ath10k * ar,struct sk_buff * skb)2520 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2521 {
2522 	struct wmi_mgmt_rx_ev_arg arg = {};
2523 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2524 	struct ieee80211_hdr *hdr;
2525 	struct ieee80211_supported_band *sband;
2526 	u32 rx_status;
2527 	u32 channel;
2528 	u32 phy_mode;
2529 	u32 snr, rssi;
2530 	u32 rate;
2531 	u16 fc;
2532 	int ret, i;
2533 
2534 	ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
2535 	if (ret) {
2536 		ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
2537 		dev_kfree_skb(skb);
2538 		return ret;
2539 	}
2540 
2541 	channel = __le32_to_cpu(arg.channel);
2542 	rx_status = __le32_to_cpu(arg.status);
2543 	snr = __le32_to_cpu(arg.snr);
2544 	phy_mode = __le32_to_cpu(arg.phy_mode);
2545 	rate = __le32_to_cpu(arg.rate);
2546 
2547 	memset(status, 0, sizeof(*status));
2548 
2549 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2550 		   "event mgmt rx status %08x\n", rx_status);
2551 
2552 	if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
2553 	    (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
2554 	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
2555 		dev_kfree_skb(skb);
2556 		return 0;
2557 	}
2558 
2559 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
2560 		status->flag |= RX_FLAG_MMIC_ERROR;
2561 
2562 	if (rx_status & WMI_RX_STATUS_EXT_INFO) {
2563 		status->mactime =
2564 			__le64_to_cpu(arg.ext_info.rx_mac_timestamp);
2565 		status->flag |= RX_FLAG_MACTIME_END;
2566 	}
2567 	/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
2568 	 * MODE_11B. This means phy_mode is not a reliable source for the band
2569 	 * of mgmt rx.
2570 	 */
2571 	if (channel >= 1 && channel <= 14) {
2572 		status->band = NL80211_BAND_2GHZ;
2573 	} else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
2574 		status->band = NL80211_BAND_5GHZ;
2575 	} else {
2576 		/* Shouldn't happen unless list of advertised channels to
2577 		 * mac80211 has been changed.
2578 		 */
2579 		WARN_ON_ONCE(1);
2580 		dev_kfree_skb(skb);
2581 		return 0;
2582 	}
2583 
2584 	if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
2585 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
2586 
2587 	sband = &ar->mac.sbands[status->band];
2588 
2589 	status->freq = ieee80211_channel_to_frequency(channel, status->band);
2590 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
2591 
2592 	BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
2593 
2594 	for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
2595 		status->chains &= ~BIT(i);
2596 		rssi = __le32_to_cpu(arg.rssi[i]);
2597 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
2598 
2599 		if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
2600 			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
2601 			status->chains |= BIT(i);
2602 		}
2603 	}
2604 
2605 	status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
2606 
2607 	hdr = (struct ieee80211_hdr *)skb->data;
2608 	fc = le16_to_cpu(hdr->frame_control);
2609 
2610 	/* Firmware is guaranteed to report all essential management frames via
2611 	 * WMI while it can deliver some extra via HTT. Since there can be
2612 	 * duplicates split the reporting wrt monitor/sniffing.
2613 	 */
2614 	status->flag |= RX_FLAG_SKIP_MONITOR;
2615 
2616 	ath10k_wmi_handle_wep_reauth(ar, skb, status);
2617 
2618 	if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
2619 		status->flag |= RX_FLAG_DECRYPTED;
2620 
2621 		if (!ieee80211_is_action(hdr->frame_control) &&
2622 		    !ieee80211_is_deauth(hdr->frame_control) &&
2623 		    !ieee80211_is_disassoc(hdr->frame_control)) {
2624 			status->flag |= RX_FLAG_IV_STRIPPED |
2625 					RX_FLAG_MMIC_STRIPPED;
2626 			hdr->frame_control = __cpu_to_le16(fc &
2627 					~IEEE80211_FCTL_PROTECTED);
2628 		}
2629 	}
2630 
2631 	if (ieee80211_is_beacon(hdr->frame_control))
2632 		ath10k_mac_handle_beacon(ar, skb);
2633 
2634 	if (ieee80211_is_beacon(hdr->frame_control) ||
2635 	    ieee80211_is_probe_resp(hdr->frame_control))
2636 		status->boottime_ns = ktime_get_boottime_ns();
2637 
2638 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2639 		   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
2640 		   skb, skb->len,
2641 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
2642 
2643 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2644 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
2645 		   status->freq, status->band, status->signal,
2646 		   status->rate_idx);
2647 
2648 	ieee80211_rx_ni(ar->hw, skb);
2649 
2650 	return 0;
2651 }
2652 
freq_to_idx(struct ath10k * ar,int freq)2653 static int freq_to_idx(struct ath10k *ar, int freq)
2654 {
2655 	struct ieee80211_supported_band *sband;
2656 	int band, ch, idx = 0;
2657 
2658 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2659 		sband = ar->hw->wiphy->bands[band];
2660 		if (!sband)
2661 			continue;
2662 
2663 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
2664 			if (sband->channels[ch].center_freq == freq)
2665 				goto exit;
2666 	}
2667 
2668 exit:
2669 	return idx;
2670 }
2671 
ath10k_wmi_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2672 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
2673 					 struct wmi_ch_info_ev_arg *arg)
2674 {
2675 	struct wmi_chan_info_event *ev = (void *)skb->data;
2676 
2677 	if (skb->len < sizeof(*ev))
2678 		return -EPROTO;
2679 
2680 	skb_pull(skb, sizeof(*ev));
2681 	arg->err_code = ev->err_code;
2682 	arg->freq = ev->freq;
2683 	arg->cmd_flags = ev->cmd_flags;
2684 	arg->noise_floor = ev->noise_floor;
2685 	arg->rx_clear_count = ev->rx_clear_count;
2686 	arg->cycle_count = ev->cycle_count;
2687 
2688 	return 0;
2689 }
2690 
ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2691 static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
2692 					      struct sk_buff *skb,
2693 					      struct wmi_ch_info_ev_arg *arg)
2694 {
2695 	struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
2696 
2697 	if (skb->len < sizeof(*ev))
2698 		return -EPROTO;
2699 
2700 	skb_pull(skb, sizeof(*ev));
2701 	arg->err_code = ev->err_code;
2702 	arg->freq = ev->freq;
2703 	arg->cmd_flags = ev->cmd_flags;
2704 	arg->noise_floor = ev->noise_floor;
2705 	arg->rx_clear_count = ev->rx_clear_count;
2706 	arg->cycle_count = ev->cycle_count;
2707 	arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
2708 	arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
2709 	arg->rx_frame_count = ev->rx_frame_count;
2710 
2711 	return 0;
2712 }
2713 
2714 /*
2715  * Handle the channel info event for firmware which only sends one
2716  * chan_info event per scanned channel.
2717  */
ath10k_wmi_event_chan_info_unpaired(struct ath10k * ar,struct chan_info_params * params)2718 static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
2719 						struct chan_info_params *params)
2720 {
2721 	struct survey_info *survey;
2722 	int idx;
2723 
2724 	if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2725 		ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
2726 		return;
2727 	}
2728 
2729 	idx = freq_to_idx(ar, params->freq);
2730 	if (idx >= ARRAY_SIZE(ar->survey)) {
2731 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2732 			    params->freq, idx);
2733 		return;
2734 	}
2735 
2736 	survey = &ar->survey[idx];
2737 
2738 	if (!params->mac_clk_mhz)
2739 		return;
2740 
2741 	memset(survey, 0, sizeof(*survey));
2742 
2743 	survey->noise = params->noise_floor;
2744 	survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
2745 	survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
2746 	survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
2747 			  SURVEY_INFO_TIME_BUSY;
2748 }
2749 
2750 /*
2751  * Handle the channel info event for firmware which sends chan_info
2752  * event in pairs(start and stop events) for every scanned channel.
2753  */
ath10k_wmi_event_chan_info_paired(struct ath10k * ar,struct chan_info_params * params)2754 static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
2755 					      struct chan_info_params *params)
2756 {
2757 	struct survey_info *survey;
2758 	int idx;
2759 
2760 	idx = freq_to_idx(ar, params->freq);
2761 	if (idx >= ARRAY_SIZE(ar->survey)) {
2762 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2763 			    params->freq, idx);
2764 		return;
2765 	}
2766 
2767 	if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2768 		if (ar->ch_info_can_report_survey) {
2769 			survey = &ar->survey[idx];
2770 			survey->noise = params->noise_floor;
2771 			survey->filled = SURVEY_INFO_NOISE_DBM;
2772 
2773 			ath10k_hw_fill_survey_time(ar,
2774 						   survey,
2775 						   params->cycle_count,
2776 						   params->rx_clear_count,
2777 						   ar->survey_last_cycle_count,
2778 						   ar->survey_last_rx_clear_count);
2779 		}
2780 
2781 		ar->ch_info_can_report_survey = false;
2782 	} else {
2783 		ar->ch_info_can_report_survey = true;
2784 	}
2785 
2786 	if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
2787 		ar->survey_last_rx_clear_count = params->rx_clear_count;
2788 		ar->survey_last_cycle_count = params->cycle_count;
2789 	}
2790 }
2791 
ath10k_wmi_event_chan_info(struct ath10k * ar,struct sk_buff * skb)2792 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
2793 {
2794 	struct chan_info_params ch_info_param;
2795 	struct wmi_ch_info_ev_arg arg = {};
2796 	int ret;
2797 
2798 	ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
2799 	if (ret) {
2800 		ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
2801 		return;
2802 	}
2803 
2804 	ch_info_param.err_code = __le32_to_cpu(arg.err_code);
2805 	ch_info_param.freq = __le32_to_cpu(arg.freq);
2806 	ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
2807 	ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
2808 	ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
2809 	ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
2810 	ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
2811 
2812 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2813 		   "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
2814 		   ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
2815 		   ch_info_param.noise_floor, ch_info_param.rx_clear_count,
2816 		   ch_info_param.cycle_count);
2817 
2818 	spin_lock_bh(&ar->data_lock);
2819 
2820 	switch (ar->scan.state) {
2821 	case ATH10K_SCAN_IDLE:
2822 	case ATH10K_SCAN_STARTING:
2823 		ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
2824 		goto exit;
2825 	case ATH10K_SCAN_RUNNING:
2826 	case ATH10K_SCAN_ABORTING:
2827 		break;
2828 	}
2829 
2830 	if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
2831 		     ar->running_fw->fw_file.fw_features))
2832 		ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
2833 	else
2834 		ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
2835 
2836 exit:
2837 	spin_unlock_bh(&ar->data_lock);
2838 }
2839 
ath10k_wmi_event_echo(struct ath10k * ar,struct sk_buff * skb)2840 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
2841 {
2842 	struct wmi_echo_ev_arg arg = {};
2843 	int ret;
2844 
2845 	ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
2846 	if (ret) {
2847 		ath10k_warn(ar, "failed to parse echo: %d\n", ret);
2848 		return;
2849 	}
2850 
2851 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2852 		   "wmi event echo value 0x%08x\n",
2853 		   le32_to_cpu(arg.value));
2854 
2855 	if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
2856 		complete(&ar->wmi.barrier);
2857 }
2858 
ath10k_wmi_event_debug_mesg(struct ath10k * ar,struct sk_buff * skb)2859 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
2860 {
2861 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
2862 		   skb->len);
2863 
2864 	trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
2865 
2866 	return 0;
2867 }
2868 
ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base * src,struct ath10k_fw_stats_pdev * dst)2869 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
2870 				     struct ath10k_fw_stats_pdev *dst)
2871 {
2872 	dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
2873 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
2874 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
2875 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
2876 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
2877 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
2878 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
2879 }
2880 
ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2881 void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
2882 				   struct ath10k_fw_stats_pdev *dst)
2883 {
2884 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
2885 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2886 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2887 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2888 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2889 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
2890 	dst->local_freed = __le32_to_cpu(src->local_freed);
2891 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
2892 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2893 	dst->underrun = __le32_to_cpu(src->underrun);
2894 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
2895 	dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2896 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
2897 	dst->data_rc = __le32_to_cpu(src->data_rc);
2898 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
2899 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2900 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2901 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2902 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2903 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2904 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2905 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2906 }
2907 
2908 static void
ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2909 ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
2910 				   struct ath10k_fw_stats_pdev *dst)
2911 {
2912 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
2913 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2914 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2915 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2916 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2917 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
2918 	dst->local_freed = __le32_to_cpu(src->local_freed);
2919 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
2920 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2921 	dst->underrun = __le32_to_cpu(src->underrun);
2922 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
2923 	dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2924 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
2925 	dst->data_rc = __le32_to_cpu(src->data_rc);
2926 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
2927 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2928 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2929 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2930 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2931 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2932 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2933 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2934 	dst->hw_paused = __le32_to_cpu(src->hw_paused);
2935 	dst->seq_posted = __le32_to_cpu(src->seq_posted);
2936 	dst->seq_failed_queueing =
2937 		__le32_to_cpu(src->seq_failed_queueing);
2938 	dst->seq_completed = __le32_to_cpu(src->seq_completed);
2939 	dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
2940 	dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
2941 	dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
2942 	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2943 	dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
2944 	dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
2945 	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2946 	dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
2947 }
2948 
ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx * src,struct ath10k_fw_stats_pdev * dst)2949 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
2950 				   struct ath10k_fw_stats_pdev *dst)
2951 {
2952 	dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
2953 	dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
2954 	dst->r0_frags = __le32_to_cpu(src->r0_frags);
2955 	dst->r1_frags = __le32_to_cpu(src->r1_frags);
2956 	dst->r2_frags = __le32_to_cpu(src->r2_frags);
2957 	dst->r3_frags = __le32_to_cpu(src->r3_frags);
2958 	dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
2959 	dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
2960 	dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
2961 	dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
2962 	dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
2963 	dst->phy_errs = __le32_to_cpu(src->phy_errs);
2964 	dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
2965 	dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
2966 }
2967 
ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra * src,struct ath10k_fw_stats_pdev * dst)2968 void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
2969 				      struct ath10k_fw_stats_pdev *dst)
2970 {
2971 	dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
2972 	dst->rts_bad = __le32_to_cpu(src->rts_bad);
2973 	dst->rts_good = __le32_to_cpu(src->rts_good);
2974 	dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
2975 	dst->no_beacons = __le32_to_cpu(src->no_beacons);
2976 	dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
2977 }
2978 
ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats * src,struct ath10k_fw_stats_peer * dst)2979 void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
2980 				struct ath10k_fw_stats_peer *dst)
2981 {
2982 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2983 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2984 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2985 }
2986 
2987 static void
ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats * src,struct ath10k_fw_stats_peer * dst)2988 ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
2989 				struct ath10k_fw_stats_peer *dst)
2990 {
2991 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2992 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2993 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2994 	dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2995 }
2996 
2997 static void
ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd * src,struct ath10k_fw_stats_vdev_extd * dst)2998 ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
2999 				struct ath10k_fw_stats_vdev_extd *dst)
3000 {
3001 	dst->vdev_id = __le32_to_cpu(src->vdev_id);
3002 	dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
3003 	dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
3004 	dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
3005 	dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
3006 	dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
3007 	dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
3008 	dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
3009 	dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
3010 	dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
3011 	dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
3012 	dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
3013 	dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
3014 	dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
3015 	dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
3016 	dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
3017 }
3018 
ath10k_wmi_main_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3019 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
3020 					    struct sk_buff *skb,
3021 					    struct ath10k_fw_stats *stats)
3022 {
3023 	const struct wmi_stats_event *ev = (void *)skb->data;
3024 	u32 num_pdev_stats, num_peer_stats;
3025 	int i;
3026 
3027 	if (!skb_pull(skb, sizeof(*ev)))
3028 		return -EPROTO;
3029 
3030 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3031 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3032 
3033 	for (i = 0; i < num_pdev_stats; i++) {
3034 		const struct wmi_pdev_stats *src;
3035 		struct ath10k_fw_stats_pdev *dst;
3036 
3037 		src = (void *)skb->data;
3038 		if (!skb_pull(skb, sizeof(*src)))
3039 			return -EPROTO;
3040 
3041 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3042 		if (!dst)
3043 			continue;
3044 
3045 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3046 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3047 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3048 
3049 		list_add_tail(&dst->list, &stats->pdevs);
3050 	}
3051 
3052 	/* fw doesn't implement vdev stats */
3053 
3054 	for (i = 0; i < num_peer_stats; i++) {
3055 		const struct wmi_peer_stats *src;
3056 		struct ath10k_fw_stats_peer *dst;
3057 
3058 		src = (void *)skb->data;
3059 		if (!skb_pull(skb, sizeof(*src)))
3060 			return -EPROTO;
3061 
3062 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3063 		if (!dst)
3064 			continue;
3065 
3066 		ath10k_wmi_pull_peer_stats(src, dst);
3067 		list_add_tail(&dst->list, &stats->peers);
3068 	}
3069 
3070 	return 0;
3071 }
3072 
ath10k_wmi_10x_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3073 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
3074 					   struct sk_buff *skb,
3075 					   struct ath10k_fw_stats *stats)
3076 {
3077 	const struct wmi_stats_event *ev = (void *)skb->data;
3078 	u32 num_pdev_stats, num_peer_stats;
3079 	int i;
3080 
3081 	if (!skb_pull(skb, sizeof(*ev)))
3082 		return -EPROTO;
3083 
3084 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3085 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3086 
3087 	for (i = 0; i < num_pdev_stats; i++) {
3088 		const struct wmi_10x_pdev_stats *src;
3089 		struct ath10k_fw_stats_pdev *dst;
3090 
3091 		src = (void *)skb->data;
3092 		if (!skb_pull(skb, sizeof(*src)))
3093 			return -EPROTO;
3094 
3095 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3096 		if (!dst)
3097 			continue;
3098 
3099 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3100 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3101 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3102 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3103 
3104 		list_add_tail(&dst->list, &stats->pdevs);
3105 	}
3106 
3107 	/* fw doesn't implement vdev stats */
3108 
3109 	for (i = 0; i < num_peer_stats; i++) {
3110 		const struct wmi_10x_peer_stats *src;
3111 		struct ath10k_fw_stats_peer *dst;
3112 
3113 		src = (void *)skb->data;
3114 		if (!skb_pull(skb, sizeof(*src)))
3115 			return -EPROTO;
3116 
3117 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3118 		if (!dst)
3119 			continue;
3120 
3121 		ath10k_wmi_pull_peer_stats(&src->old, dst);
3122 
3123 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3124 
3125 		list_add_tail(&dst->list, &stats->peers);
3126 	}
3127 
3128 	return 0;
3129 }
3130 
ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3131 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
3132 					    struct sk_buff *skb,
3133 					    struct ath10k_fw_stats *stats)
3134 {
3135 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3136 	u32 num_pdev_stats;
3137 	u32 num_pdev_ext_stats;
3138 	u32 num_peer_stats;
3139 	int i;
3140 
3141 	if (!skb_pull(skb, sizeof(*ev)))
3142 		return -EPROTO;
3143 
3144 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3145 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3146 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3147 
3148 	for (i = 0; i < num_pdev_stats; i++) {
3149 		const struct wmi_10_2_pdev_stats *src;
3150 		struct ath10k_fw_stats_pdev *dst;
3151 
3152 		src = (void *)skb->data;
3153 		if (!skb_pull(skb, sizeof(*src)))
3154 			return -EPROTO;
3155 
3156 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3157 		if (!dst)
3158 			continue;
3159 
3160 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3161 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3162 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3163 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3164 		/* FIXME: expose 10.2 specific values */
3165 
3166 		list_add_tail(&dst->list, &stats->pdevs);
3167 	}
3168 
3169 	for (i = 0; i < num_pdev_ext_stats; i++) {
3170 		const struct wmi_10_2_pdev_ext_stats *src;
3171 
3172 		src = (void *)skb->data;
3173 		if (!skb_pull(skb, sizeof(*src)))
3174 			return -EPROTO;
3175 
3176 		/* FIXME: expose values to userspace
3177 		 *
3178 		 * Note: Even though this loop seems to do nothing it is
3179 		 * required to parse following sub-structures properly.
3180 		 */
3181 	}
3182 
3183 	/* fw doesn't implement vdev stats */
3184 
3185 	for (i = 0; i < num_peer_stats; i++) {
3186 		const struct wmi_10_2_peer_stats *src;
3187 		struct ath10k_fw_stats_peer *dst;
3188 
3189 		src = (void *)skb->data;
3190 		if (!skb_pull(skb, sizeof(*src)))
3191 			return -EPROTO;
3192 
3193 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3194 		if (!dst)
3195 			continue;
3196 
3197 		ath10k_wmi_pull_peer_stats(&src->old, dst);
3198 
3199 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3200 		/* FIXME: expose 10.2 specific values */
3201 
3202 		list_add_tail(&dst->list, &stats->peers);
3203 	}
3204 
3205 	return 0;
3206 }
3207 
ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3208 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
3209 					      struct sk_buff *skb,
3210 					      struct ath10k_fw_stats *stats)
3211 {
3212 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3213 	u32 num_pdev_stats;
3214 	u32 num_pdev_ext_stats;
3215 	u32 num_peer_stats;
3216 	int i;
3217 
3218 	if (!skb_pull(skb, sizeof(*ev)))
3219 		return -EPROTO;
3220 
3221 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3222 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3223 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3224 
3225 	for (i = 0; i < num_pdev_stats; i++) {
3226 		const struct wmi_10_2_pdev_stats *src;
3227 		struct ath10k_fw_stats_pdev *dst;
3228 
3229 		src = (void *)skb->data;
3230 		if (!skb_pull(skb, sizeof(*src)))
3231 			return -EPROTO;
3232 
3233 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3234 		if (!dst)
3235 			continue;
3236 
3237 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3238 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3239 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3240 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3241 		/* FIXME: expose 10.2 specific values */
3242 
3243 		list_add_tail(&dst->list, &stats->pdevs);
3244 	}
3245 
3246 	for (i = 0; i < num_pdev_ext_stats; i++) {
3247 		const struct wmi_10_2_pdev_ext_stats *src;
3248 
3249 		src = (void *)skb->data;
3250 		if (!skb_pull(skb, sizeof(*src)))
3251 			return -EPROTO;
3252 
3253 		/* FIXME: expose values to userspace
3254 		 *
3255 		 * Note: Even though this loop seems to do nothing it is
3256 		 * required to parse following sub-structures properly.
3257 		 */
3258 	}
3259 
3260 	/* fw doesn't implement vdev stats */
3261 
3262 	for (i = 0; i < num_peer_stats; i++) {
3263 		const struct wmi_10_2_4_ext_peer_stats *src;
3264 		struct ath10k_fw_stats_peer *dst;
3265 		int stats_len;
3266 
3267 		if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
3268 			stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
3269 		else
3270 			stats_len = sizeof(struct wmi_10_2_4_peer_stats);
3271 
3272 		src = (void *)skb->data;
3273 		if (!skb_pull(skb, stats_len))
3274 			return -EPROTO;
3275 
3276 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3277 		if (!dst)
3278 			continue;
3279 
3280 		ath10k_wmi_pull_peer_stats(&src->common.old, dst);
3281 
3282 		dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
3283 
3284 		if (ath10k_peer_stats_enabled(ar))
3285 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
3286 		/* FIXME: expose 10.2 specific values */
3287 
3288 		list_add_tail(&dst->list, &stats->peers);
3289 	}
3290 
3291 	return 0;
3292 }
3293 
ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3294 static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
3295 					    struct sk_buff *skb,
3296 					    struct ath10k_fw_stats *stats)
3297 {
3298 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3299 	u32 num_pdev_stats;
3300 	u32 num_pdev_ext_stats;
3301 	u32 num_vdev_stats;
3302 	u32 num_peer_stats;
3303 	u32 num_bcnflt_stats;
3304 	u32 stats_id;
3305 	int i;
3306 
3307 	if (!skb_pull(skb, sizeof(*ev)))
3308 		return -EPROTO;
3309 
3310 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3311 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3312 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
3313 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3314 	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
3315 	stats_id = __le32_to_cpu(ev->stats_id);
3316 
3317 	for (i = 0; i < num_pdev_stats; i++) {
3318 		const struct wmi_10_4_pdev_stats *src;
3319 		struct ath10k_fw_stats_pdev *dst;
3320 
3321 		src = (void *)skb->data;
3322 		if (!skb_pull(skb, sizeof(*src)))
3323 			return -EPROTO;
3324 
3325 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3326 		if (!dst)
3327 			continue;
3328 
3329 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3330 		ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
3331 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3332 		dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
3333 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3334 
3335 		list_add_tail(&dst->list, &stats->pdevs);
3336 	}
3337 
3338 	for (i = 0; i < num_pdev_ext_stats; i++) {
3339 		const struct wmi_10_2_pdev_ext_stats *src;
3340 
3341 		src = (void *)skb->data;
3342 		if (!skb_pull(skb, sizeof(*src)))
3343 			return -EPROTO;
3344 
3345 		/* FIXME: expose values to userspace
3346 		 *
3347 		 * Note: Even though this loop seems to do nothing it is
3348 		 * required to parse following sub-structures properly.
3349 		 */
3350 	}
3351 
3352 	for (i = 0; i < num_vdev_stats; i++) {
3353 		const struct wmi_vdev_stats *src;
3354 
3355 		/* Ignore vdev stats here as it has only vdev id. Actual vdev
3356 		 * stats will be retrieved from vdev extended stats.
3357 		 */
3358 		src = (void *)skb->data;
3359 		if (!skb_pull(skb, sizeof(*src)))
3360 			return -EPROTO;
3361 	}
3362 
3363 	for (i = 0; i < num_peer_stats; i++) {
3364 		const struct wmi_10_4_peer_stats *src;
3365 		struct ath10k_fw_stats_peer *dst;
3366 
3367 		src = (void *)skb->data;
3368 		if (!skb_pull(skb, sizeof(*src)))
3369 			return -EPROTO;
3370 
3371 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3372 		if (!dst)
3373 			continue;
3374 
3375 		ath10k_wmi_10_4_pull_peer_stats(src, dst);
3376 		list_add_tail(&dst->list, &stats->peers);
3377 	}
3378 
3379 	for (i = 0; i < num_bcnflt_stats; i++) {
3380 		const struct wmi_10_4_bss_bcn_filter_stats *src;
3381 
3382 		src = (void *)skb->data;
3383 		if (!skb_pull(skb, sizeof(*src)))
3384 			return -EPROTO;
3385 
3386 		/* FIXME: expose values to userspace
3387 		 *
3388 		 * Note: Even though this loop seems to do nothing it is
3389 		 * required to parse following sub-structures properly.
3390 		 */
3391 	}
3392 
3393 	if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
3394 		stats->extended = true;
3395 
3396 		for (i = 0; i < num_peer_stats; i++) {
3397 			const struct wmi_10_4_peer_extd_stats *src;
3398 			struct ath10k_fw_extd_stats_peer *dst;
3399 
3400 			src = (void *)skb->data;
3401 			if (!skb_pull(skb, sizeof(*src)))
3402 				return -EPROTO;
3403 
3404 			dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3405 			if (!dst)
3406 				continue;
3407 
3408 			ether_addr_copy(dst->peer_macaddr,
3409 					src->peer_macaddr.addr);
3410 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
3411 			list_add_tail(&dst->list, &stats->peers_extd);
3412 		}
3413 	}
3414 
3415 	if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
3416 		for (i = 0; i < num_vdev_stats; i++) {
3417 			const struct wmi_vdev_stats_extd *src;
3418 			struct ath10k_fw_stats_vdev_extd *dst;
3419 
3420 			src = (void *)skb->data;
3421 			if (!skb_pull(skb, sizeof(*src)))
3422 				return -EPROTO;
3423 
3424 			dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3425 			if (!dst)
3426 				continue;
3427 			ath10k_wmi_10_4_pull_vdev_stats(src, dst);
3428 			list_add_tail(&dst->list, &stats->vdevs);
3429 		}
3430 	}
3431 
3432 	return 0;
3433 }
3434 
ath10k_wmi_event_update_stats(struct ath10k * ar,struct sk_buff * skb)3435 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
3436 {
3437 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
3438 	ath10k_debug_fw_stats_process(ar, skb);
3439 }
3440 
3441 static int
ath10k_wmi_op_pull_vdev_start_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_vdev_start_ev_arg * arg)3442 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
3443 				 struct wmi_vdev_start_ev_arg *arg)
3444 {
3445 	struct wmi_vdev_start_response_event *ev = (void *)skb->data;
3446 
3447 	if (skb->len < sizeof(*ev))
3448 		return -EPROTO;
3449 
3450 	skb_pull(skb, sizeof(*ev));
3451 	arg->vdev_id = ev->vdev_id;
3452 	arg->req_id = ev->req_id;
3453 	arg->resp_type = ev->resp_type;
3454 	arg->status = ev->status;
3455 
3456 	return 0;
3457 }
3458 
ath10k_wmi_event_vdev_start_resp(struct ath10k * ar,struct sk_buff * skb)3459 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
3460 {
3461 	struct wmi_vdev_start_ev_arg arg = {};
3462 	int ret;
3463 	u32 status;
3464 
3465 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
3466 
3467 	ar->last_wmi_vdev_start_status = 0;
3468 
3469 	ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
3470 	if (ret) {
3471 		ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
3472 		ar->last_wmi_vdev_start_status = ret;
3473 		goto out;
3474 	}
3475 
3476 	status = __le32_to_cpu(arg.status);
3477 	if (WARN_ON_ONCE(status)) {
3478 		ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
3479 			    status, (status == WMI_VDEV_START_CHAN_INVALID) ?
3480 			    "chan-invalid" : "unknown");
3481 		/* Setup is done one way or another though, so we should still
3482 		 * do the completion, so don't return here.
3483 		 */
3484 		ar->last_wmi_vdev_start_status = -EINVAL;
3485 	}
3486 
3487 out:
3488 	complete(&ar->vdev_setup_done);
3489 }
3490 
ath10k_wmi_event_vdev_stopped(struct ath10k * ar,struct sk_buff * skb)3491 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
3492 {
3493 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
3494 	complete(&ar->vdev_setup_done);
3495 }
3496 
3497 static int
ath10k_wmi_op_pull_peer_kick_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_peer_kick_ev_arg * arg)3498 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
3499 				struct wmi_peer_kick_ev_arg *arg)
3500 {
3501 	struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
3502 
3503 	if (skb->len < sizeof(*ev))
3504 		return -EPROTO;
3505 
3506 	skb_pull(skb, sizeof(*ev));
3507 	arg->mac_addr = ev->peer_macaddr.addr;
3508 
3509 	return 0;
3510 }
3511 
ath10k_wmi_event_peer_sta_kickout(struct ath10k * ar,struct sk_buff * skb)3512 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
3513 {
3514 	struct wmi_peer_kick_ev_arg arg = {};
3515 	struct ieee80211_sta *sta;
3516 	int ret;
3517 
3518 	ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
3519 	if (ret) {
3520 		ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
3521 			    ret);
3522 		return;
3523 	}
3524 
3525 	ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n",
3526 		   arg.mac_addr);
3527 
3528 	rcu_read_lock();
3529 
3530 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
3531 	if (!sta) {
3532 		ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
3533 			    arg.mac_addr);
3534 		goto exit;
3535 	}
3536 
3537 	ieee80211_report_low_ack(sta, 10);
3538 
3539 exit:
3540 	rcu_read_unlock();
3541 }
3542 
3543 /*
3544  * FIXME
3545  *
3546  * We don't report to mac80211 sleep state of connected
3547  * stations. Due to this mac80211 can't fill in TIM IE
3548  * correctly.
3549  *
3550  * I know of no way of getting nullfunc frames that contain
3551  * sleep transition from connected stations - these do not
3552  * seem to be sent from the target to the host. There also
3553  * doesn't seem to be a dedicated event for that. So the
3554  * only way left to do this would be to read tim_bitmap
3555  * during SWBA.
3556  *
3557  * We could probably try using tim_bitmap from SWBA to tell
3558  * mac80211 which stations are asleep and which are not. The
3559  * problem here is calling mac80211 functions so many times
3560  * could take too long and make us miss the time to submit
3561  * the beacon to the target.
3562  *
3563  * So as a workaround we try to extend the TIM IE if there
3564  * is unicast buffered for stations with aid > 7 and fill it
3565  * in ourselves.
3566  */
ath10k_wmi_update_tim(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_tim_info_arg * tim_info)3567 static void ath10k_wmi_update_tim(struct ath10k *ar,
3568 				  struct ath10k_vif *arvif,
3569 				  struct sk_buff *bcn,
3570 				  const struct wmi_tim_info_arg *tim_info)
3571 {
3572 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
3573 	struct ieee80211_tim_ie *tim;
3574 	u8 *ies, *ie;
3575 	u8 ie_len, pvm_len;
3576 	__le32 t;
3577 	u32 v, tim_len;
3578 
3579 	/* When FW reports 0 in tim_len, ensure at least first byte
3580 	 * in tim_bitmap is considered for pvm calculation.
3581 	 */
3582 	tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
3583 
3584 	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
3585 	 * we must copy the bitmap upon change and reuse it later
3586 	 */
3587 	if (__le32_to_cpu(tim_info->tim_changed)) {
3588 		int i;
3589 
3590 		if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
3591 			ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
3592 				    tim_len, sizeof(arvif->u.ap.tim_bitmap));
3593 			tim_len = sizeof(arvif->u.ap.tim_bitmap);
3594 		}
3595 
3596 		for (i = 0; i < tim_len; i++) {
3597 			t = tim_info->tim_bitmap[i / 4];
3598 			v = __le32_to_cpu(t);
3599 			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
3600 		}
3601 
3602 		/* FW reports either length 0 or length based on max supported
3603 		 * station. so we calculate this on our own
3604 		 */
3605 		arvif->u.ap.tim_len = 0;
3606 		for (i = 0; i < tim_len; i++)
3607 			if (arvif->u.ap.tim_bitmap[i])
3608 				arvif->u.ap.tim_len = i;
3609 
3610 		arvif->u.ap.tim_len++;
3611 	}
3612 
3613 	ies = bcn->data;
3614 	ies += ieee80211_hdrlen(hdr->frame_control);
3615 	ies += 12; /* fixed parameters */
3616 
3617 	ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
3618 				    (u8 *)skb_tail_pointer(bcn) - ies);
3619 	if (!ie) {
3620 		if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
3621 			ath10k_warn(ar, "no tim ie found;\n");
3622 		return;
3623 	}
3624 
3625 	tim = (void *)ie + 2;
3626 	ie_len = ie[1];
3627 	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
3628 
3629 	if (pvm_len < arvif->u.ap.tim_len) {
3630 		int expand_size = tim_len - pvm_len;
3631 		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
3632 		void *next_ie = ie + 2 + ie_len;
3633 
3634 		if (skb_put(bcn, expand_size)) {
3635 			memmove(next_ie + expand_size, next_ie, move_size);
3636 
3637 			ie[1] += expand_size;
3638 			ie_len += expand_size;
3639 			pvm_len += expand_size;
3640 		} else {
3641 			ath10k_warn(ar, "tim expansion failed\n");
3642 		}
3643 	}
3644 
3645 	if (pvm_len > tim_len) {
3646 		ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
3647 		return;
3648 	}
3649 
3650 	tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
3651 	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
3652 
3653 	if (tim->dtim_count == 0) {
3654 		ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
3655 
3656 		if (__le32_to_cpu(tim_info->tim_mcast) == 1)
3657 			ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
3658 	}
3659 
3660 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
3661 		   tim->dtim_count, tim->dtim_period,
3662 		   tim->bitmap_ctrl, pvm_len);
3663 }
3664 
ath10k_wmi_update_noa(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_p2p_noa_info * noa)3665 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
3666 				  struct sk_buff *bcn,
3667 				  const struct wmi_p2p_noa_info *noa)
3668 {
3669 	if (!arvif->vif->p2p)
3670 		return;
3671 
3672 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
3673 
3674 	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
3675 		ath10k_p2p_noa_update(arvif, noa);
3676 
3677 	if (arvif->u.ap.noa_data)
3678 		if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
3679 			skb_put_data(bcn, arvif->u.ap.noa_data,
3680 				     arvif->u.ap.noa_len);
3681 }
3682 
ath10k_wmi_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3683 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
3684 				      struct wmi_swba_ev_arg *arg)
3685 {
3686 	struct wmi_host_swba_event *ev = (void *)skb->data;
3687 	u32 map;
3688 	size_t i;
3689 
3690 	if (skb->len < sizeof(*ev))
3691 		return -EPROTO;
3692 
3693 	skb_pull(skb, sizeof(*ev));
3694 	arg->vdev_map = ev->vdev_map;
3695 
3696 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3697 		if (!(map & BIT(0)))
3698 			continue;
3699 
3700 		/* If this happens there were some changes in firmware and
3701 		 * ath10k should update the max size of tim_info array.
3702 		 */
3703 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3704 			break;
3705 
3706 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3707 		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3708 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3709 			return -EPROTO;
3710 		}
3711 
3712 		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3713 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3714 		arg->tim_info[i].tim_bitmap =
3715 				ev->bcn_info[i].tim_info.tim_bitmap;
3716 		arg->tim_info[i].tim_changed =
3717 				ev->bcn_info[i].tim_info.tim_changed;
3718 		arg->tim_info[i].tim_num_ps_pending =
3719 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3720 
3721 		arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
3722 		i++;
3723 	}
3724 
3725 	return 0;
3726 }
3727 
ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3728 static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
3729 					     struct sk_buff *skb,
3730 					     struct wmi_swba_ev_arg *arg)
3731 {
3732 	struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
3733 	u32 map;
3734 	size_t i;
3735 
3736 	if (skb->len < sizeof(*ev))
3737 		return -EPROTO;
3738 
3739 	skb_pull(skb, sizeof(*ev));
3740 	arg->vdev_map = ev->vdev_map;
3741 
3742 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3743 		if (!(map & BIT(0)))
3744 			continue;
3745 
3746 		/* If this happens there were some changes in firmware and
3747 		 * ath10k should update the max size of tim_info array.
3748 		 */
3749 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3750 			break;
3751 
3752 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3753 		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3754 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3755 			return -EPROTO;
3756 		}
3757 
3758 		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3759 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3760 		arg->tim_info[i].tim_bitmap =
3761 				ev->bcn_info[i].tim_info.tim_bitmap;
3762 		arg->tim_info[i].tim_changed =
3763 				ev->bcn_info[i].tim_info.tim_changed;
3764 		arg->tim_info[i].tim_num_ps_pending =
3765 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3766 		i++;
3767 	}
3768 
3769 	return 0;
3770 }
3771 
ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3772 static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
3773 					   struct sk_buff *skb,
3774 					   struct wmi_swba_ev_arg *arg)
3775 {
3776 	struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
3777 	u32 map, tim_len;
3778 	size_t i;
3779 
3780 	if (skb->len < sizeof(*ev))
3781 		return -EPROTO;
3782 
3783 	skb_pull(skb, sizeof(*ev));
3784 	arg->vdev_map = ev->vdev_map;
3785 
3786 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3787 		if (!(map & BIT(0)))
3788 			continue;
3789 
3790 		/* If this happens there were some changes in firmware and
3791 		 * ath10k should update the max size of tim_info array.
3792 		 */
3793 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3794 			break;
3795 
3796 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3797 		      sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3798 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3799 			return -EPROTO;
3800 		}
3801 
3802 		tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
3803 		if (tim_len) {
3804 			/* Exclude 4 byte guard length */
3805 			tim_len -= 4;
3806 			arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
3807 		} else {
3808 			arg->tim_info[i].tim_len = 0;
3809 		}
3810 
3811 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3812 		arg->tim_info[i].tim_bitmap =
3813 				ev->bcn_info[i].tim_info.tim_bitmap;
3814 		arg->tim_info[i].tim_changed =
3815 				ev->bcn_info[i].tim_info.tim_changed;
3816 		arg->tim_info[i].tim_num_ps_pending =
3817 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3818 
3819 		/* 10.4 firmware doesn't have p2p support. notice of absence
3820 		 * info can be ignored for now.
3821 		 */
3822 
3823 		i++;
3824 	}
3825 
3826 	return 0;
3827 }
3828 
ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k * ar)3829 static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
3830 {
3831 	return WMI_TXBF_CONF_BEFORE_ASSOC;
3832 }
3833 
ath10k_wmi_event_host_swba(struct ath10k * ar,struct sk_buff * skb)3834 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
3835 {
3836 	struct wmi_swba_ev_arg arg = {};
3837 	u32 map;
3838 	int i = -1;
3839 	const struct wmi_tim_info_arg *tim_info;
3840 	const struct wmi_p2p_noa_info *noa_info;
3841 	struct ath10k_vif *arvif;
3842 	struct sk_buff *bcn;
3843 	dma_addr_t paddr;
3844 	int ret, vdev_id = 0;
3845 
3846 	ret = ath10k_wmi_pull_swba(ar, skb, &arg);
3847 	if (ret) {
3848 		ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
3849 		return;
3850 	}
3851 
3852 	map = __le32_to_cpu(arg.vdev_map);
3853 
3854 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
3855 		   map);
3856 
3857 	for (; map; map >>= 1, vdev_id++) {
3858 		if (!(map & 0x1))
3859 			continue;
3860 
3861 		i++;
3862 
3863 		if (i >= WMI_MAX_AP_VDEV) {
3864 			ath10k_warn(ar, "swba has corrupted vdev map\n");
3865 			break;
3866 		}
3867 
3868 		tim_info = &arg.tim_info[i];
3869 		noa_info = arg.noa_info[i];
3870 
3871 		ath10k_dbg(ar, ATH10K_DBG_MGMT,
3872 			   "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
3873 			   i,
3874 			   __le32_to_cpu(tim_info->tim_len),
3875 			   __le32_to_cpu(tim_info->tim_mcast),
3876 			   __le32_to_cpu(tim_info->tim_changed),
3877 			   __le32_to_cpu(tim_info->tim_num_ps_pending),
3878 			   __le32_to_cpu(tim_info->tim_bitmap[3]),
3879 			   __le32_to_cpu(tim_info->tim_bitmap[2]),
3880 			   __le32_to_cpu(tim_info->tim_bitmap[1]),
3881 			   __le32_to_cpu(tim_info->tim_bitmap[0]));
3882 
3883 		/* TODO: Only first 4 word from tim_bitmap is dumped.
3884 		 * Extend debug code to dump full tim_bitmap.
3885 		 */
3886 
3887 		arvif = ath10k_get_arvif(ar, vdev_id);
3888 		if (arvif == NULL) {
3889 			ath10k_warn(ar, "no vif for vdev_id %d found\n",
3890 				    vdev_id);
3891 			continue;
3892 		}
3893 
3894 		/* mac80211 would have already asked us to stop beaconing and
3895 		 * bring the vdev down, so continue in that case
3896 		 */
3897 		if (!arvif->is_up)
3898 			continue;
3899 
3900 		/* There are no completions for beacons so wait for next SWBA
3901 		 * before telling mac80211 to decrement CSA counter
3902 		 *
3903 		 * Once CSA counter is completed stop sending beacons until
3904 		 * actual channel switch is done
3905 		 */
3906 		if (arvif->vif->bss_conf.csa_active &&
3907 		    ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
3908 			ieee80211_csa_finish(arvif->vif);
3909 			continue;
3910 		}
3911 
3912 		bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0);
3913 		if (!bcn) {
3914 			ath10k_warn(ar, "could not get mac80211 beacon\n");
3915 			continue;
3916 		}
3917 
3918 		ath10k_tx_h_seq_no(arvif->vif, bcn);
3919 		ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
3920 		ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
3921 
3922 		spin_lock_bh(&ar->data_lock);
3923 
3924 		if (arvif->beacon) {
3925 			switch (arvif->beacon_state) {
3926 			case ATH10K_BEACON_SENT:
3927 				break;
3928 			case ATH10K_BEACON_SCHEDULED:
3929 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
3930 					    arvif->vdev_id);
3931 				break;
3932 			case ATH10K_BEACON_SENDING:
3933 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
3934 					    arvif->vdev_id);
3935 				dev_kfree_skb(bcn);
3936 				goto skip;
3937 			}
3938 
3939 			ath10k_mac_vif_beacon_free(arvif);
3940 		}
3941 
3942 		if (!arvif->beacon_buf) {
3943 			paddr = dma_map_single(arvif->ar->dev, bcn->data,
3944 					       bcn->len, DMA_TO_DEVICE);
3945 			ret = dma_mapping_error(arvif->ar->dev, paddr);
3946 			if (ret) {
3947 				ath10k_warn(ar, "failed to map beacon: %d\n",
3948 					    ret);
3949 				dev_kfree_skb_any(bcn);
3950 				goto skip;
3951 			}
3952 
3953 			ATH10K_SKB_CB(bcn)->paddr = paddr;
3954 		} else {
3955 			if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
3956 				ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
3957 					    bcn->len, IEEE80211_MAX_FRAME_LEN);
3958 				skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
3959 			}
3960 			memcpy(arvif->beacon_buf, bcn->data, bcn->len);
3961 			ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
3962 		}
3963 
3964 		arvif->beacon = bcn;
3965 		arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
3966 
3967 		trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
3968 		trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
3969 
3970 skip:
3971 		spin_unlock_bh(&ar->data_lock);
3972 	}
3973 
3974 	ath10k_wmi_tx_beacons_nowait(ar);
3975 }
3976 
ath10k_wmi_event_tbttoffset_update(struct ath10k * ar,struct sk_buff * skb)3977 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
3978 {
3979 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
3980 }
3981 
ath10k_radar_detected(struct ath10k * ar)3982 static void ath10k_radar_detected(struct ath10k *ar)
3983 {
3984 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
3985 	ATH10K_DFS_STAT_INC(ar, radar_detected);
3986 
3987 	/* Control radar events reporting in debugfs file
3988 	 * dfs_block_radar_events
3989 	 */
3990 	if (ar->dfs_block_radar_events)
3991 		ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
3992 	else
3993 		ieee80211_radar_detected(ar->hw);
3994 }
3995 
ath10k_radar_confirmation_work(struct work_struct * work)3996 static void ath10k_radar_confirmation_work(struct work_struct *work)
3997 {
3998 	struct ath10k *ar = container_of(work, struct ath10k,
3999 					 radar_confirmation_work);
4000 	struct ath10k_radar_found_info radar_info;
4001 	int ret, time_left;
4002 
4003 	reinit_completion(&ar->wmi.radar_confirm);
4004 
4005 	spin_lock_bh(&ar->data_lock);
4006 	memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
4007 	spin_unlock_bh(&ar->data_lock);
4008 
4009 	ret = ath10k_wmi_report_radar_found(ar, &radar_info);
4010 	if (ret) {
4011 		ath10k_warn(ar, "failed to send radar found %d\n", ret);
4012 		goto wait_complete;
4013 	}
4014 
4015 	time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
4016 						ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
4017 	if (time_left) {
4018 		/* DFS Confirmation status event received and
4019 		 * necessary action completed.
4020 		 */
4021 		goto wait_complete;
4022 	} else {
4023 		/* DFS Confirmation event not received from FW.Considering this
4024 		 * as real radar.
4025 		 */
4026 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4027 			   "dfs confirmation not received from fw, considering as radar\n");
4028 		goto radar_detected;
4029 	}
4030 
4031 radar_detected:
4032 	ath10k_radar_detected(ar);
4033 
4034 	/* Reset state to allow sending confirmation on consecutive radar
4035 	 * detections, unless radar confirmation is disabled/stopped.
4036 	 */
4037 wait_complete:
4038 	spin_lock_bh(&ar->data_lock);
4039 	if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
4040 		ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
4041 	spin_unlock_bh(&ar->data_lock);
4042 }
4043 
ath10k_dfs_radar_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_radar_report * rr,u64 tsf)4044 static void ath10k_dfs_radar_report(struct ath10k *ar,
4045 				    struct wmi_phyerr_ev_arg *phyerr,
4046 				    const struct phyerr_radar_report *rr,
4047 				    u64 tsf)
4048 {
4049 	u32 reg0, reg1, tsf32l;
4050 	struct ieee80211_channel *ch;
4051 	struct pulse_event pe;
4052 	struct radar_detector_specs rs;
4053 	u64 tsf64;
4054 	u8 rssi, width;
4055 	struct ath10k_radar_found_info *radar_info;
4056 
4057 	reg0 = __le32_to_cpu(rr->reg0);
4058 	reg1 = __le32_to_cpu(rr->reg1);
4059 
4060 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4061 		   "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
4062 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
4063 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
4064 		   MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
4065 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
4066 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4067 		   "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
4068 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
4069 		   MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
4070 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
4071 		   MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
4072 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
4073 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4074 		   "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
4075 		   MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
4076 		   MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
4077 
4078 	if (!ar->dfs_detector)
4079 		return;
4080 
4081 	spin_lock_bh(&ar->data_lock);
4082 	ch = ar->rx_channel;
4083 
4084 	/* fetch target operating channel during channel change */
4085 	if (!ch)
4086 		ch = ar->tgt_oper_chan;
4087 
4088 	spin_unlock_bh(&ar->data_lock);
4089 
4090 	if (!ch) {
4091 		ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
4092 		goto radar_detected;
4093 	}
4094 
4095 	/* report event to DFS pattern detector */
4096 	tsf32l = phyerr->tsf_timestamp;
4097 	tsf64 = tsf & (~0xFFFFFFFFULL);
4098 	tsf64 |= tsf32l;
4099 
4100 	width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
4101 	rssi = phyerr->rssi_combined;
4102 
4103 	/* hardware store this as 8 bit signed value,
4104 	 * set to zero if negative number
4105 	 */
4106 	if (rssi & 0x80)
4107 		rssi = 0;
4108 
4109 	pe.ts = tsf64;
4110 	pe.freq = ch->center_freq;
4111 	pe.width = width;
4112 	pe.rssi = rssi;
4113 	pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
4114 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4115 		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
4116 		   pe.freq, pe.width, pe.rssi, pe.ts);
4117 
4118 	ATH10K_DFS_STAT_INC(ar, pulses_detected);
4119 
4120 	if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
4121 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4122 			   "dfs no pulse pattern detected, yet\n");
4123 		return;
4124 	}
4125 
4126 	if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
4127 	    ar->dfs_detector->region == NL80211_DFS_FCC) {
4128 		/* Consecutive radar indications need not be
4129 		 * sent to the firmware until we get confirmation
4130 		 * for the previous detected radar.
4131 		 */
4132 		spin_lock_bh(&ar->data_lock);
4133 		if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
4134 			spin_unlock_bh(&ar->data_lock);
4135 			return;
4136 		}
4137 		ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
4138 		radar_info = &ar->last_radar_info;
4139 
4140 		radar_info->pri_min = rs.pri_min;
4141 		radar_info->pri_max = rs.pri_max;
4142 		radar_info->width_min = rs.width_min;
4143 		radar_info->width_max = rs.width_max;
4144 		/*TODO Find sidx_min and sidx_max */
4145 		radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4146 		radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4147 
4148 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4149 			   "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
4150 			   radar_info->pri_min, radar_info->pri_max,
4151 			   radar_info->width_min, radar_info->width_max,
4152 			   radar_info->sidx_min, radar_info->sidx_max);
4153 		ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
4154 		spin_unlock_bh(&ar->data_lock);
4155 		return;
4156 	}
4157 
4158 radar_detected:
4159 	ath10k_radar_detected(ar);
4160 }
4161 
ath10k_dfs_fft_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_fft_report * fftr,u64 tsf)4162 static int ath10k_dfs_fft_report(struct ath10k *ar,
4163 				 struct wmi_phyerr_ev_arg *phyerr,
4164 				 const struct phyerr_fft_report *fftr,
4165 				 u64 tsf)
4166 {
4167 	u32 reg0, reg1;
4168 	u8 rssi, peak_mag;
4169 
4170 	reg0 = __le32_to_cpu(fftr->reg0);
4171 	reg1 = __le32_to_cpu(fftr->reg1);
4172 	rssi = phyerr->rssi_combined;
4173 
4174 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4175 		   "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
4176 		   MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
4177 		   MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
4178 		   MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
4179 		   MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
4180 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4181 		   "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
4182 		   MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
4183 		   MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
4184 		   MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
4185 		   MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
4186 
4187 	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
4188 
4189 	/* false event detection */
4190 	if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
4191 	    peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
4192 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
4193 		ATH10K_DFS_STAT_INC(ar, pulses_discarded);
4194 		return -EINVAL;
4195 	}
4196 
4197 	return 0;
4198 }
4199 
ath10k_wmi_event_dfs(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4200 void ath10k_wmi_event_dfs(struct ath10k *ar,
4201 			  struct wmi_phyerr_ev_arg *phyerr,
4202 			  u64 tsf)
4203 {
4204 	int buf_len, tlv_len, res, i = 0;
4205 	const struct phyerr_tlv *tlv;
4206 	const struct phyerr_radar_report *rr;
4207 	const struct phyerr_fft_report *fftr;
4208 	const u8 *tlv_buf;
4209 
4210 	buf_len = phyerr->buf_len;
4211 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4212 		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
4213 		   phyerr->phy_err_code, phyerr->rssi_combined,
4214 		   phyerr->tsf_timestamp, tsf, buf_len);
4215 
4216 	/* Skip event if DFS disabled */
4217 	if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
4218 		return;
4219 
4220 	ATH10K_DFS_STAT_INC(ar, pulses_total);
4221 
4222 	while (i < buf_len) {
4223 		if (i + sizeof(*tlv) > buf_len) {
4224 			ath10k_warn(ar, "too short buf for tlv header (%d)\n",
4225 				    i);
4226 			return;
4227 		}
4228 
4229 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4230 		tlv_len = __le16_to_cpu(tlv->len);
4231 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4232 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4233 			   "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
4234 			   tlv_len, tlv->tag, tlv->sig);
4235 
4236 		switch (tlv->tag) {
4237 		case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
4238 			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
4239 				ath10k_warn(ar, "too short radar pulse summary (%d)\n",
4240 					    i);
4241 				return;
4242 			}
4243 
4244 			rr = (struct phyerr_radar_report *)tlv_buf;
4245 			ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
4246 			break;
4247 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4248 			if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
4249 				ath10k_warn(ar, "too short fft report (%d)\n",
4250 					    i);
4251 				return;
4252 			}
4253 
4254 			fftr = (struct phyerr_fft_report *)tlv_buf;
4255 			res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
4256 			if (res)
4257 				return;
4258 			break;
4259 		}
4260 
4261 		i += sizeof(*tlv) + tlv_len;
4262 	}
4263 }
4264 
ath10k_wmi_event_spectral_scan(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4265 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4266 				    struct wmi_phyerr_ev_arg *phyerr,
4267 				    u64 tsf)
4268 {
4269 	int buf_len, tlv_len, res, i = 0;
4270 	struct phyerr_tlv *tlv;
4271 	const void *tlv_buf;
4272 	const struct phyerr_fft_report *fftr;
4273 	size_t fftr_len;
4274 
4275 	buf_len = phyerr->buf_len;
4276 
4277 	while (i < buf_len) {
4278 		if (i + sizeof(*tlv) > buf_len) {
4279 			ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
4280 				    i);
4281 			return;
4282 		}
4283 
4284 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4285 		tlv_len = __le16_to_cpu(tlv->len);
4286 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4287 
4288 		if (i + sizeof(*tlv) + tlv_len > buf_len) {
4289 			ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
4290 				    i);
4291 			return;
4292 		}
4293 
4294 		switch (tlv->tag) {
4295 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4296 			if (sizeof(*fftr) > tlv_len) {
4297 				ath10k_warn(ar, "failed to parse fft report at byte %d\n",
4298 					    i);
4299 				return;
4300 			}
4301 
4302 			fftr_len = tlv_len - sizeof(*fftr);
4303 			fftr = tlv_buf;
4304 			res = ath10k_spectral_process_fft(ar, phyerr,
4305 							  fftr, fftr_len,
4306 							  tsf);
4307 			if (res < 0) {
4308 				ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
4309 					   res);
4310 				return;
4311 			}
4312 			break;
4313 		}
4314 
4315 		i += sizeof(*tlv) + tlv_len;
4316 	}
4317 }
4318 
ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4319 static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4320 					    struct sk_buff *skb,
4321 					    struct wmi_phyerr_hdr_arg *arg)
4322 {
4323 	struct wmi_phyerr_event *ev = (void *)skb->data;
4324 
4325 	if (skb->len < sizeof(*ev))
4326 		return -EPROTO;
4327 
4328 	arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
4329 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4330 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4331 	arg->buf_len = skb->len - sizeof(*ev);
4332 	arg->phyerrs = ev->phyerrs;
4333 
4334 	return 0;
4335 }
4336 
ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4337 static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4338 						 struct sk_buff *skb,
4339 						 struct wmi_phyerr_hdr_arg *arg)
4340 {
4341 	struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
4342 
4343 	if (skb->len < sizeof(*ev))
4344 		return -EPROTO;
4345 
4346 	/* 10.4 firmware always reports only one phyerr */
4347 	arg->num_phyerrs = 1;
4348 
4349 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4350 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4351 	arg->buf_len = skb->len;
4352 	arg->phyerrs = skb->data;
4353 
4354 	return 0;
4355 }
4356 
ath10k_wmi_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4357 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
4358 				 const void *phyerr_buf,
4359 				 int left_len,
4360 				 struct wmi_phyerr_ev_arg *arg)
4361 {
4362 	const struct wmi_phyerr *phyerr = phyerr_buf;
4363 	int i;
4364 
4365 	if (left_len < sizeof(*phyerr)) {
4366 		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4367 			    left_len, sizeof(*phyerr));
4368 		return -EINVAL;
4369 	}
4370 
4371 	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4372 	arg->freq1 = __le16_to_cpu(phyerr->freq1);
4373 	arg->freq2 = __le16_to_cpu(phyerr->freq2);
4374 	arg->rssi_combined = phyerr->rssi_combined;
4375 	arg->chan_width_mhz = phyerr->chan_width_mhz;
4376 	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4377 	arg->buf = phyerr->buf;
4378 	arg->hdr_len = sizeof(*phyerr);
4379 
4380 	for (i = 0; i < 4; i++)
4381 		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4382 
4383 	switch (phyerr->phy_err_code) {
4384 	case PHY_ERROR_GEN_SPECTRAL_SCAN:
4385 		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4386 		break;
4387 	case PHY_ERROR_GEN_FALSE_RADAR_EXT:
4388 		arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
4389 		break;
4390 	case PHY_ERROR_GEN_RADAR:
4391 		arg->phy_err_code = PHY_ERROR_RADAR;
4392 		break;
4393 	default:
4394 		arg->phy_err_code = PHY_ERROR_UNKNOWN;
4395 		break;
4396 	}
4397 
4398 	return 0;
4399 }
4400 
ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4401 static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
4402 					     const void *phyerr_buf,
4403 					     int left_len,
4404 					     struct wmi_phyerr_ev_arg *arg)
4405 {
4406 	const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
4407 	u32 phy_err_mask;
4408 	int i;
4409 
4410 	if (left_len < sizeof(*phyerr)) {
4411 		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4412 			    left_len, sizeof(*phyerr));
4413 		return -EINVAL;
4414 	}
4415 
4416 	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4417 	arg->freq1 = __le16_to_cpu(phyerr->freq1);
4418 	arg->freq2 = __le16_to_cpu(phyerr->freq2);
4419 	arg->rssi_combined = phyerr->rssi_combined;
4420 	arg->chan_width_mhz = phyerr->chan_width_mhz;
4421 	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4422 	arg->buf = phyerr->buf;
4423 	arg->hdr_len = sizeof(*phyerr);
4424 
4425 	for (i = 0; i < 4; i++)
4426 		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4427 
4428 	phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
4429 
4430 	if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
4431 		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4432 	else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
4433 		arg->phy_err_code = PHY_ERROR_RADAR;
4434 	else
4435 		arg->phy_err_code = PHY_ERROR_UNKNOWN;
4436 
4437 	return 0;
4438 }
4439 
ath10k_wmi_event_phyerr(struct ath10k * ar,struct sk_buff * skb)4440 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
4441 {
4442 	struct wmi_phyerr_hdr_arg hdr_arg = {};
4443 	struct wmi_phyerr_ev_arg phyerr_arg = {};
4444 	const void *phyerr;
4445 	u32 count, i, buf_len, phy_err_code;
4446 	u64 tsf;
4447 	int left_len, ret;
4448 
4449 	ATH10K_DFS_STAT_INC(ar, phy_errors);
4450 
4451 	ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
4452 	if (ret) {
4453 		ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
4454 		return;
4455 	}
4456 
4457 	/* Check number of included events */
4458 	count = hdr_arg.num_phyerrs;
4459 
4460 	left_len = hdr_arg.buf_len;
4461 
4462 	tsf = hdr_arg.tsf_u32;
4463 	tsf <<= 32;
4464 	tsf |= hdr_arg.tsf_l32;
4465 
4466 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4467 		   "wmi event phyerr count %d tsf64 0x%llX\n",
4468 		   count, tsf);
4469 
4470 	phyerr = hdr_arg.phyerrs;
4471 	for (i = 0; i < count; i++) {
4472 		ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
4473 		if (ret) {
4474 			ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
4475 				    i);
4476 			return;
4477 		}
4478 
4479 		left_len -= phyerr_arg.hdr_len;
4480 		buf_len = phyerr_arg.buf_len;
4481 		phy_err_code = phyerr_arg.phy_err_code;
4482 
4483 		if (left_len < buf_len) {
4484 			ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
4485 			return;
4486 		}
4487 
4488 		left_len -= buf_len;
4489 
4490 		switch (phy_err_code) {
4491 		case PHY_ERROR_RADAR:
4492 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4493 			break;
4494 		case PHY_ERROR_SPECTRAL_SCAN:
4495 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4496 			break;
4497 		case PHY_ERROR_FALSE_RADAR_EXT:
4498 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4499 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4500 			break;
4501 		default:
4502 			break;
4503 		}
4504 
4505 		phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
4506 	}
4507 }
4508 
4509 static int
ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_dfs_status_ev_arg * arg)4510 ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
4511 				      struct wmi_dfs_status_ev_arg *arg)
4512 {
4513 	struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
4514 
4515 	if (skb->len < sizeof(*ev))
4516 		return -EPROTO;
4517 
4518 	arg->status = ev->status;
4519 
4520 	return 0;
4521 }
4522 
4523 static void
ath10k_wmi_event_dfs_status_check(struct ath10k * ar,struct sk_buff * skb)4524 ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
4525 {
4526 	struct wmi_dfs_status_ev_arg status_arg = {};
4527 	int ret;
4528 
4529 	ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
4530 
4531 	if (ret) {
4532 		ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
4533 		return;
4534 	}
4535 
4536 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4537 		   "dfs status event received from fw: %d\n",
4538 		   status_arg.status);
4539 
4540 	/* Even in case of radar detection failure we follow the same
4541 	 * behaviour as if radar is detected i.e to switch to a different
4542 	 * channel.
4543 	 */
4544 	if (status_arg.status == WMI_HW_RADAR_DETECTED ||
4545 	    status_arg.status == WMI_RADAR_DETECTION_FAIL)
4546 		ath10k_radar_detected(ar);
4547 	complete(&ar->wmi.radar_confirm);
4548 }
4549 
ath10k_wmi_event_roam(struct ath10k * ar,struct sk_buff * skb)4550 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
4551 {
4552 	struct wmi_roam_ev_arg arg = {};
4553 	int ret;
4554 	u32 vdev_id;
4555 	u32 reason;
4556 	s32 rssi;
4557 
4558 	ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
4559 	if (ret) {
4560 		ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
4561 		return;
4562 	}
4563 
4564 	vdev_id = __le32_to_cpu(arg.vdev_id);
4565 	reason = __le32_to_cpu(arg.reason);
4566 	rssi = __le32_to_cpu(arg.rssi);
4567 	rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
4568 
4569 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4570 		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
4571 		   vdev_id, reason, rssi);
4572 
4573 	if (reason >= WMI_ROAM_REASON_MAX)
4574 		ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
4575 			    reason, vdev_id);
4576 
4577 	switch (reason) {
4578 	case WMI_ROAM_REASON_BEACON_MISS:
4579 		ath10k_mac_handle_beacon_miss(ar, vdev_id);
4580 		break;
4581 	case WMI_ROAM_REASON_BETTER_AP:
4582 	case WMI_ROAM_REASON_LOW_RSSI:
4583 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
4584 	case WMI_ROAM_REASON_HO_FAILED:
4585 		ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
4586 			    reason, vdev_id);
4587 		break;
4588 	}
4589 }
4590 
ath10k_wmi_event_profile_match(struct ath10k * ar,struct sk_buff * skb)4591 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
4592 {
4593 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
4594 }
4595 
ath10k_wmi_event_debug_print(struct ath10k * ar,struct sk_buff * skb)4596 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
4597 {
4598 	char buf[101], c;
4599 	int i;
4600 
4601 	for (i = 0; i < sizeof(buf) - 1; i++) {
4602 		if (i >= skb->len)
4603 			break;
4604 
4605 		c = skb->data[i];
4606 
4607 		if (c == '\0')
4608 			break;
4609 
4610 		if (isascii(c) && isprint(c))
4611 			buf[i] = c;
4612 		else
4613 			buf[i] = '.';
4614 	}
4615 
4616 	if (i == sizeof(buf) - 1)
4617 		ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
4618 
4619 	/* for some reason the debug prints end with \n, remove that */
4620 	if (skb->data[i - 1] == '\n')
4621 		i--;
4622 
4623 	/* the last byte is always reserved for the null character */
4624 	buf[i] = '\0';
4625 
4626 	ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
4627 }
4628 
ath10k_wmi_event_pdev_qvit(struct ath10k * ar,struct sk_buff * skb)4629 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
4630 {
4631 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
4632 }
4633 
ath10k_wmi_event_wlan_profile_data(struct ath10k * ar,struct sk_buff * skb)4634 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
4635 {
4636 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
4637 }
4638 
ath10k_wmi_event_rtt_measurement_report(struct ath10k * ar,struct sk_buff * skb)4639 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4640 					     struct sk_buff *skb)
4641 {
4642 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
4643 }
4644 
ath10k_wmi_event_tsf_measurement_report(struct ath10k * ar,struct sk_buff * skb)4645 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4646 					     struct sk_buff *skb)
4647 {
4648 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
4649 }
4650 
ath10k_wmi_event_rtt_error_report(struct ath10k * ar,struct sk_buff * skb)4651 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
4652 {
4653 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
4654 }
4655 
ath10k_wmi_event_wow_wakeup_host(struct ath10k * ar,struct sk_buff * skb)4656 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
4657 {
4658 	struct wmi_wow_ev_arg ev = {};
4659 	int ret;
4660 
4661 	complete(&ar->wow.wakeup_completed);
4662 
4663 	ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
4664 	if (ret) {
4665 		ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
4666 		return;
4667 	}
4668 
4669 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
4670 		   wow_reason(ev.wake_reason));
4671 }
4672 
ath10k_wmi_event_dcs_interference(struct ath10k * ar,struct sk_buff * skb)4673 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
4674 {
4675 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
4676 }
4677 
ath10k_tpc_config_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type)4678 static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
4679 				     struct wmi_pdev_tpc_config_event *ev,
4680 				     u32 rate_idx, u32 num_chains,
4681 				     u32 rate_code, u8 type)
4682 {
4683 	u8 tpc, num_streams, preamble, ch, stm_idx;
4684 
4685 	num_streams = ATH10K_HW_NSS(rate_code);
4686 	preamble = ATH10K_HW_PREAMBLE(rate_code);
4687 	ch = num_chains - 1;
4688 
4689 	tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
4690 
4691 	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4692 		goto out;
4693 
4694 	if (preamble == WMI_RATE_PREAMBLE_CCK)
4695 		goto out;
4696 
4697 	stm_idx = num_streams - 1;
4698 	if (num_chains <= num_streams)
4699 		goto out;
4700 
4701 	switch (type) {
4702 	case WMI_TPC_TABLE_TYPE_STBC:
4703 		tpc = min_t(u8, tpc,
4704 			    ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
4705 		break;
4706 	case WMI_TPC_TABLE_TYPE_TXBF:
4707 		tpc = min_t(u8, tpc,
4708 			    ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
4709 		break;
4710 	case WMI_TPC_TABLE_TYPE_CDD:
4711 		tpc = min_t(u8, tpc,
4712 			    ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
4713 		break;
4714 	default:
4715 		ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
4716 		tpc = 0;
4717 		break;
4718 	}
4719 
4720 out:
4721 	return tpc;
4722 }
4723 
ath10k_tpc_config_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,struct ath10k_tpc_stats * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)4724 static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
4725 					  struct wmi_pdev_tpc_config_event *ev,
4726 					  struct ath10k_tpc_stats *tpc_stats,
4727 					  u8 *rate_code, u16 *pream_table, u8 type)
4728 {
4729 	u32 i, j, pream_idx, flags;
4730 	u8 tpc[WMI_TPC_TX_N_CHAIN];
4731 	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
4732 	char buff[WMI_TPC_BUF_SIZE];
4733 
4734 	flags = __le32_to_cpu(ev->flags);
4735 
4736 	switch (type) {
4737 	case WMI_TPC_TABLE_TYPE_CDD:
4738 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
4739 			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
4740 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4741 			return;
4742 		}
4743 		break;
4744 	case WMI_TPC_TABLE_TYPE_STBC:
4745 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
4746 			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
4747 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4748 			return;
4749 		}
4750 		break;
4751 	case WMI_TPC_TABLE_TYPE_TXBF:
4752 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
4753 			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
4754 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4755 			return;
4756 		}
4757 		break;
4758 	default:
4759 		ath10k_dbg(ar, ATH10K_DBG_WMI,
4760 			   "invalid table type in wmi tpc event: %d\n", type);
4761 		return;
4762 	}
4763 
4764 	pream_idx = 0;
4765 	for (i = 0; i < tpc_stats->rate_max; i++) {
4766 		memset(tpc_value, 0, sizeof(tpc_value));
4767 		memset(buff, 0, sizeof(buff));
4768 		if (i == pream_table[pream_idx])
4769 			pream_idx++;
4770 
4771 		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
4772 			tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
4773 							    rate_code[i],
4774 							    type);
4775 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
4776 			strlcat(tpc_value, buff, sizeof(tpc_value));
4777 		}
4778 		tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
4779 		tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
4780 		memcpy(tpc_stats->tpc_table[type].tpc_value[i],
4781 		       tpc_value, sizeof(tpc_value));
4782 	}
4783 }
4784 
ath10k_wmi_tpc_config_get_rate_code(u8 * rate_code,u16 * pream_table,u32 num_tx_chain)4785 void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
4786 					 u32 num_tx_chain)
4787 {
4788 	u32 i, j, pream_idx;
4789 	u8 rate_idx;
4790 
4791 	/* Create the rate code table based on the chains supported */
4792 	rate_idx = 0;
4793 	pream_idx = 0;
4794 
4795 	/* Fill CCK rate code */
4796 	for (i = 0; i < 4; i++) {
4797 		rate_code[rate_idx] =
4798 			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
4799 		rate_idx++;
4800 	}
4801 	pream_table[pream_idx] = rate_idx;
4802 	pream_idx++;
4803 
4804 	/* Fill OFDM rate code */
4805 	for (i = 0; i < 8; i++) {
4806 		rate_code[rate_idx] =
4807 			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
4808 		rate_idx++;
4809 	}
4810 	pream_table[pream_idx] = rate_idx;
4811 	pream_idx++;
4812 
4813 	/* Fill HT20 rate code */
4814 	for (i = 0; i < num_tx_chain; i++) {
4815 		for (j = 0; j < 8; j++) {
4816 			rate_code[rate_idx] =
4817 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4818 			rate_idx++;
4819 		}
4820 	}
4821 	pream_table[pream_idx] = rate_idx;
4822 	pream_idx++;
4823 
4824 	/* Fill HT40 rate code */
4825 	for (i = 0; i < num_tx_chain; i++) {
4826 		for (j = 0; j < 8; j++) {
4827 			rate_code[rate_idx] =
4828 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4829 			rate_idx++;
4830 		}
4831 	}
4832 	pream_table[pream_idx] = rate_idx;
4833 	pream_idx++;
4834 
4835 	/* Fill VHT20 rate code */
4836 	for (i = 0; i < num_tx_chain; i++) {
4837 		for (j = 0; j < 10; j++) {
4838 			rate_code[rate_idx] =
4839 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4840 			rate_idx++;
4841 		}
4842 	}
4843 	pream_table[pream_idx] = rate_idx;
4844 	pream_idx++;
4845 
4846 	/* Fill VHT40 rate code */
4847 	for (i = 0; i < num_tx_chain; i++) {
4848 		for (j = 0; j < 10; j++) {
4849 			rate_code[rate_idx] =
4850 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4851 			rate_idx++;
4852 		}
4853 	}
4854 	pream_table[pream_idx] = rate_idx;
4855 	pream_idx++;
4856 
4857 	/* Fill VHT80 rate code */
4858 	for (i = 0; i < num_tx_chain; i++) {
4859 		for (j = 0; j < 10; j++) {
4860 			rate_code[rate_idx] =
4861 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4862 			rate_idx++;
4863 		}
4864 	}
4865 	pream_table[pream_idx] = rate_idx;
4866 	pream_idx++;
4867 
4868 	rate_code[rate_idx++] =
4869 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4870 	rate_code[rate_idx++] =
4871 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4872 	rate_code[rate_idx++] =
4873 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4874 	rate_code[rate_idx++] =
4875 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4876 	rate_code[rate_idx++] =
4877 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4878 
4879 	pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
4880 }
4881 
ath10k_wmi_event_pdev_tpc_config(struct ath10k * ar,struct sk_buff * skb)4882 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4883 {
4884 	u32 num_tx_chain, rate_max;
4885 	u8 rate_code[WMI_TPC_RATE_MAX];
4886 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4887 	struct wmi_pdev_tpc_config_event *ev;
4888 	struct ath10k_tpc_stats *tpc_stats;
4889 
4890 	ev = (struct wmi_pdev_tpc_config_event *)skb->data;
4891 
4892 	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4893 
4894 	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
4895 		ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
4896 			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
4897 		return;
4898 	}
4899 
4900 	rate_max = __le32_to_cpu(ev->rate_max);
4901 	if (rate_max > WMI_TPC_RATE_MAX) {
4902 		ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
4903 			    rate_max, WMI_TPC_RATE_MAX);
4904 		rate_max = WMI_TPC_RATE_MAX;
4905 	}
4906 
4907 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4908 	if (!tpc_stats)
4909 		return;
4910 
4911 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4912 					    num_tx_chain);
4913 
4914 	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
4915 	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
4916 	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
4917 	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
4918 	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
4919 	tpc_stats->twice_antenna_reduction =
4920 		__le32_to_cpu(ev->twice_antenna_reduction);
4921 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4922 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4923 	tpc_stats->num_tx_chain = num_tx_chain;
4924 	tpc_stats->rate_max = rate_max;
4925 
4926 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4927 				      rate_code, pream_table,
4928 				      WMI_TPC_TABLE_TYPE_CDD);
4929 	ath10k_tpc_config_disp_tables(ar, ev,  tpc_stats,
4930 				      rate_code, pream_table,
4931 				      WMI_TPC_TABLE_TYPE_STBC);
4932 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4933 				      rate_code, pream_table,
4934 				      WMI_TPC_TABLE_TYPE_TXBF);
4935 
4936 	ath10k_debug_tpc_stats_process(ar, tpc_stats);
4937 
4938 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4939 		   "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
4940 		   __le32_to_cpu(ev->chan_freq),
4941 		   __le32_to_cpu(ev->phy_mode),
4942 		   __le32_to_cpu(ev->ctl),
4943 		   __le32_to_cpu(ev->reg_domain),
4944 		   a_sle32_to_cpu(ev->twice_antenna_gain),
4945 		   __le32_to_cpu(ev->twice_antenna_reduction),
4946 		   __le32_to_cpu(ev->power_limit),
4947 		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
4948 		   __le32_to_cpu(ev->num_tx_chain),
4949 		   __le32_to_cpu(ev->rate_max));
4950 }
4951 
4952 static u8
ath10k_wmi_tpc_final_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type,u32 pream_idx)4953 ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
4954 			      struct wmi_pdev_tpc_final_table_event *ev,
4955 			      u32 rate_idx, u32 num_chains,
4956 			      u32 rate_code, u8 type, u32 pream_idx)
4957 {
4958 	u8 tpc, num_streams, preamble, ch, stm_idx;
4959 	s8 pow_agcdd, pow_agstbc, pow_agtxbf;
4960 	int pream;
4961 
4962 	num_streams = ATH10K_HW_NSS(rate_code);
4963 	preamble = ATH10K_HW_PREAMBLE(rate_code);
4964 	ch = num_chains - 1;
4965 	stm_idx = num_streams - 1;
4966 	pream = -1;
4967 
4968 	if (__le32_to_cpu(ev->chan_freq) <= 2483) {
4969 		switch (pream_idx) {
4970 		case WMI_TPC_PREAM_2GHZ_CCK:
4971 			pream = 0;
4972 			break;
4973 		case WMI_TPC_PREAM_2GHZ_OFDM:
4974 			pream = 1;
4975 			break;
4976 		case WMI_TPC_PREAM_2GHZ_HT20:
4977 		case WMI_TPC_PREAM_2GHZ_VHT20:
4978 			pream = 2;
4979 			break;
4980 		case WMI_TPC_PREAM_2GHZ_HT40:
4981 		case WMI_TPC_PREAM_2GHZ_VHT40:
4982 			pream = 3;
4983 			break;
4984 		case WMI_TPC_PREAM_2GHZ_VHT80:
4985 			pream = 4;
4986 			break;
4987 		default:
4988 			pream = -1;
4989 			break;
4990 		}
4991 	}
4992 
4993 	if (__le32_to_cpu(ev->chan_freq) >= 5180) {
4994 		switch (pream_idx) {
4995 		case WMI_TPC_PREAM_5GHZ_OFDM:
4996 			pream = 0;
4997 			break;
4998 		case WMI_TPC_PREAM_5GHZ_HT20:
4999 		case WMI_TPC_PREAM_5GHZ_VHT20:
5000 			pream = 1;
5001 			break;
5002 		case WMI_TPC_PREAM_5GHZ_HT40:
5003 		case WMI_TPC_PREAM_5GHZ_VHT40:
5004 			pream = 2;
5005 			break;
5006 		case WMI_TPC_PREAM_5GHZ_VHT80:
5007 			pream = 3;
5008 			break;
5009 		case WMI_TPC_PREAM_5GHZ_HTCUP:
5010 			pream = 4;
5011 			break;
5012 		default:
5013 			pream = -1;
5014 			break;
5015 		}
5016 	}
5017 
5018 	if (pream == -1) {
5019 		ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
5020 			    pream_idx, __le32_to_cpu(ev->chan_freq));
5021 		tpc = 0;
5022 		goto out;
5023 	}
5024 
5025 	if (pream == 4)
5026 		tpc = min_t(u8, ev->rates_array[rate_idx],
5027 			    ev->max_reg_allow_pow[ch]);
5028 	else
5029 		tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
5030 				      ev->max_reg_allow_pow[ch]),
5031 			    ev->ctl_power_table[0][pream][stm_idx]);
5032 
5033 	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
5034 		goto out;
5035 
5036 	if (preamble == WMI_RATE_PREAMBLE_CCK)
5037 		goto out;
5038 
5039 	if (num_chains <= num_streams)
5040 		goto out;
5041 
5042 	switch (type) {
5043 	case WMI_TPC_TABLE_TYPE_STBC:
5044 		pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
5045 		if (pream == 4)
5046 			tpc = min_t(u8, tpc, pow_agstbc);
5047 		else
5048 			tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
5049 				    ev->ctl_power_table[0][pream][stm_idx]);
5050 		break;
5051 	case WMI_TPC_TABLE_TYPE_TXBF:
5052 		pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
5053 		if (pream == 4)
5054 			tpc = min_t(u8, tpc, pow_agtxbf);
5055 		else
5056 			tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
5057 				    ev->ctl_power_table[1][pream][stm_idx]);
5058 		break;
5059 	case WMI_TPC_TABLE_TYPE_CDD:
5060 		pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
5061 		if (pream == 4)
5062 			tpc = min_t(u8, tpc, pow_agcdd);
5063 		else
5064 			tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
5065 				    ev->ctl_power_table[0][pream][stm_idx]);
5066 		break;
5067 	default:
5068 		ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
5069 		tpc = 0;
5070 		break;
5071 	}
5072 
5073 out:
5074 	return tpc;
5075 }
5076 
5077 static void
ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,struct ath10k_tpc_stats_final * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)5078 ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
5079 				       struct wmi_pdev_tpc_final_table_event *ev,
5080 				       struct ath10k_tpc_stats_final *tpc_stats,
5081 				       u8 *rate_code, u16 *pream_table, u8 type)
5082 {
5083 	u32 i, j, pream_idx, flags;
5084 	u8 tpc[WMI_TPC_TX_N_CHAIN];
5085 	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
5086 	char buff[WMI_TPC_BUF_SIZE];
5087 
5088 	flags = __le32_to_cpu(ev->flags);
5089 
5090 	switch (type) {
5091 	case WMI_TPC_TABLE_TYPE_CDD:
5092 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
5093 			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
5094 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5095 			return;
5096 		}
5097 		break;
5098 	case WMI_TPC_TABLE_TYPE_STBC:
5099 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
5100 			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
5101 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5102 			return;
5103 		}
5104 		break;
5105 	case WMI_TPC_TABLE_TYPE_TXBF:
5106 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
5107 			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
5108 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5109 			return;
5110 		}
5111 		break;
5112 	default:
5113 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5114 			   "invalid table type in wmi tpc event: %d\n", type);
5115 		return;
5116 	}
5117 
5118 	pream_idx = 0;
5119 	for (i = 0; i < tpc_stats->rate_max; i++) {
5120 		memset(tpc_value, 0, sizeof(tpc_value));
5121 		memset(buff, 0, sizeof(buff));
5122 		if (i == pream_table[pream_idx])
5123 			pream_idx++;
5124 
5125 		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
5126 			tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
5127 							       rate_code[i],
5128 							       type, pream_idx);
5129 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
5130 			strlcat(tpc_value, buff, sizeof(tpc_value));
5131 		}
5132 		tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
5133 		tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
5134 		memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
5135 		       tpc_value, sizeof(tpc_value));
5136 	}
5137 }
5138 
ath10k_wmi_event_tpc_final_table(struct ath10k * ar,struct sk_buff * skb)5139 void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
5140 {
5141 	u32 num_tx_chain, rate_max;
5142 	u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
5143 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
5144 	struct wmi_pdev_tpc_final_table_event *ev;
5145 	struct ath10k_tpc_stats_final *tpc_stats;
5146 
5147 	ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
5148 
5149 	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
5150 	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
5151 		ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
5152 			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
5153 		return;
5154 	}
5155 
5156 	rate_max = __le32_to_cpu(ev->rate_max);
5157 	if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
5158 		ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
5159 			    rate_max, WMI_TPC_FINAL_RATE_MAX);
5160 		rate_max = WMI_TPC_FINAL_RATE_MAX;
5161 	}
5162 
5163 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
5164 	if (!tpc_stats)
5165 		return;
5166 
5167 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
5168 					    num_tx_chain);
5169 
5170 	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
5171 	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
5172 	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
5173 	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
5174 	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
5175 	tpc_stats->twice_antenna_reduction =
5176 		__le32_to_cpu(ev->twice_antenna_reduction);
5177 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
5178 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
5179 	tpc_stats->num_tx_chain = num_tx_chain;
5180 	tpc_stats->rate_max = rate_max;
5181 
5182 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5183 					       rate_code, pream_table,
5184 					       WMI_TPC_TABLE_TYPE_CDD);
5185 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev,  tpc_stats,
5186 					       rate_code, pream_table,
5187 					       WMI_TPC_TABLE_TYPE_STBC);
5188 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5189 					       rate_code, pream_table,
5190 					       WMI_TPC_TABLE_TYPE_TXBF);
5191 
5192 	ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
5193 
5194 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5195 		   "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
5196 		   __le32_to_cpu(ev->chan_freq),
5197 		   __le32_to_cpu(ev->phy_mode),
5198 		   __le32_to_cpu(ev->ctl),
5199 		   __le32_to_cpu(ev->reg_domain),
5200 		   a_sle32_to_cpu(ev->twice_antenna_gain),
5201 		   __le32_to_cpu(ev->twice_antenna_reduction),
5202 		   __le32_to_cpu(ev->power_limit),
5203 		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
5204 		   __le32_to_cpu(ev->num_tx_chain),
5205 		   __le32_to_cpu(ev->rate_max));
5206 }
5207 
5208 static void
ath10k_wmi_handle_tdls_peer_event(struct ath10k * ar,struct sk_buff * skb)5209 ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
5210 {
5211 	struct wmi_tdls_peer_event *ev;
5212 	struct ath10k_peer *peer;
5213 	struct ath10k_vif *arvif;
5214 	int vdev_id;
5215 	int peer_status;
5216 	int peer_reason;
5217 	u8 reason;
5218 
5219 	if (skb->len < sizeof(*ev)) {
5220 		ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
5221 			   skb->len);
5222 		return;
5223 	}
5224 
5225 	ev = (struct wmi_tdls_peer_event *)skb->data;
5226 	vdev_id = __le32_to_cpu(ev->vdev_id);
5227 	peer_status = __le32_to_cpu(ev->peer_status);
5228 	peer_reason = __le32_to_cpu(ev->peer_reason);
5229 
5230 	spin_lock_bh(&ar->data_lock);
5231 	peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
5232 	spin_unlock_bh(&ar->data_lock);
5233 
5234 	if (!peer) {
5235 		ath10k_warn(ar, "failed to find peer entry for %pM\n",
5236 			    ev->peer_macaddr.addr);
5237 		return;
5238 	}
5239 
5240 	switch (peer_status) {
5241 	case WMI_TDLS_SHOULD_TEARDOWN:
5242 		switch (peer_reason) {
5243 		case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
5244 		case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
5245 		case WMI_TDLS_TEARDOWN_REASON_RSSI:
5246 			reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
5247 			break;
5248 		default:
5249 			reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
5250 			break;
5251 		}
5252 
5253 		arvif = ath10k_get_arvif(ar, vdev_id);
5254 		if (!arvif) {
5255 			ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
5256 				    vdev_id);
5257 			return;
5258 		}
5259 
5260 		ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
5261 					    NL80211_TDLS_TEARDOWN, reason,
5262 					    GFP_ATOMIC);
5263 
5264 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5265 			   "received tdls teardown event for peer %pM reason %u\n",
5266 			   ev->peer_macaddr.addr, peer_reason);
5267 		break;
5268 	default:
5269 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5270 			   "received unknown tdls peer event %u\n",
5271 			   peer_status);
5272 		break;
5273 	}
5274 }
5275 
5276 static void
ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k * ar,struct sk_buff * skb)5277 ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
5278 {
5279 	struct wmi_peer_sta_ps_state_chg_event *ev;
5280 	struct ieee80211_sta *sta;
5281 	struct ath10k_sta *arsta;
5282 	u8 peer_addr[ETH_ALEN];
5283 
5284 	lockdep_assert_held(&ar->data_lock);
5285 
5286 	ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
5287 	ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
5288 
5289 	rcu_read_lock();
5290 
5291 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
5292 
5293 	if (!sta) {
5294 		ath10k_warn(ar, "failed to find station entry %pM\n",
5295 			    peer_addr);
5296 		goto exit;
5297 	}
5298 
5299 	arsta = (struct ath10k_sta *)sta->drv_priv;
5300 	arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
5301 
5302 exit:
5303 	rcu_read_unlock();
5304 }
5305 
ath10k_wmi_event_pdev_ftm_intg(struct ath10k * ar,struct sk_buff * skb)5306 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
5307 {
5308 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
5309 }
5310 
ath10k_wmi_event_gtk_offload_status(struct ath10k * ar,struct sk_buff * skb)5311 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
5312 {
5313 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
5314 }
5315 
ath10k_wmi_event_gtk_rekey_fail(struct ath10k * ar,struct sk_buff * skb)5316 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
5317 {
5318 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
5319 }
5320 
ath10k_wmi_event_delba_complete(struct ath10k * ar,struct sk_buff * skb)5321 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
5322 {
5323 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
5324 }
5325 
ath10k_wmi_event_addba_complete(struct ath10k * ar,struct sk_buff * skb)5326 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
5327 {
5328 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
5329 }
5330 
ath10k_wmi_event_vdev_install_key_complete(struct ath10k * ar,struct sk_buff * skb)5331 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
5332 						struct sk_buff *skb)
5333 {
5334 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
5335 }
5336 
ath10k_wmi_event_inst_rssi_stats(struct ath10k * ar,struct sk_buff * skb)5337 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
5338 {
5339 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
5340 }
5341 
ath10k_wmi_event_vdev_standby_req(struct ath10k * ar,struct sk_buff * skb)5342 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
5343 {
5344 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
5345 }
5346 
ath10k_wmi_event_vdev_resume_req(struct ath10k * ar,struct sk_buff * skb)5347 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
5348 {
5349 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
5350 }
5351 
ath10k_wmi_alloc_chunk(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5352 static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5353 				  u32 num_units, u32 unit_len)
5354 {
5355 	dma_addr_t paddr;
5356 	u32 pool_size;
5357 	int idx = ar->wmi.num_mem_chunks;
5358 	void *vaddr;
5359 
5360 	pool_size = num_units * round_up(unit_len, 4);
5361 	vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5362 
5363 	if (!vaddr)
5364 		return -ENOMEM;
5365 
5366 	ar->wmi.mem_chunks[idx].vaddr = vaddr;
5367 	ar->wmi.mem_chunks[idx].paddr = paddr;
5368 	ar->wmi.mem_chunks[idx].len = pool_size;
5369 	ar->wmi.mem_chunks[idx].req_id = req_id;
5370 	ar->wmi.num_mem_chunks++;
5371 
5372 	return num_units;
5373 }
5374 
ath10k_wmi_alloc_host_mem(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5375 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
5376 				     u32 num_units, u32 unit_len)
5377 {
5378 	int ret;
5379 
5380 	while (num_units) {
5381 		ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
5382 		if (ret < 0)
5383 			return ret;
5384 
5385 		num_units -= ret;
5386 	}
5387 
5388 	return 0;
5389 }
5390 
5391 static bool
ath10k_wmi_is_host_mem_allocated(struct ath10k * ar,const struct wlan_host_mem_req ** mem_reqs,u32 num_mem_reqs)5392 ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
5393 				 const struct wlan_host_mem_req **mem_reqs,
5394 				 u32 num_mem_reqs)
5395 {
5396 	u32 req_id, num_units, unit_size, num_unit_info;
5397 	u32 pool_size;
5398 	int i, j;
5399 	bool found;
5400 
5401 	if (ar->wmi.num_mem_chunks != num_mem_reqs)
5402 		return false;
5403 
5404 	for (i = 0; i < num_mem_reqs; ++i) {
5405 		req_id = __le32_to_cpu(mem_reqs[i]->req_id);
5406 		num_units = __le32_to_cpu(mem_reqs[i]->num_units);
5407 		unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
5408 		num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
5409 
5410 		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5411 			if (ar->num_active_peers)
5412 				num_units = ar->num_active_peers + 1;
5413 			else
5414 				num_units = ar->max_num_peers + 1;
5415 		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5416 			num_units = ar->max_num_peers + 1;
5417 		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5418 			num_units = ar->max_num_vdevs + 1;
5419 		}
5420 
5421 		found = false;
5422 		for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
5423 			if (ar->wmi.mem_chunks[j].req_id == req_id) {
5424 				pool_size = num_units * round_up(unit_size, 4);
5425 				if (ar->wmi.mem_chunks[j].len == pool_size) {
5426 					found = true;
5427 					break;
5428 				}
5429 			}
5430 		}
5431 		if (!found)
5432 			return false;
5433 	}
5434 
5435 	return true;
5436 }
5437 
5438 static int
ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5439 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5440 				   struct wmi_svc_rdy_ev_arg *arg)
5441 {
5442 	struct wmi_service_ready_event *ev;
5443 	size_t i, n;
5444 
5445 	if (skb->len < sizeof(*ev))
5446 		return -EPROTO;
5447 
5448 	ev = (void *)skb->data;
5449 	skb_pull(skb, sizeof(*ev));
5450 	arg->min_tx_power = ev->hw_min_tx_power;
5451 	arg->max_tx_power = ev->hw_max_tx_power;
5452 	arg->ht_cap = ev->ht_cap_info;
5453 	arg->vht_cap = ev->vht_cap_info;
5454 	arg->vht_supp_mcs = ev->vht_supp_mcs;
5455 	arg->sw_ver0 = ev->sw_version;
5456 	arg->sw_ver1 = ev->sw_version_1;
5457 	arg->phy_capab = ev->phy_capability;
5458 	arg->num_rf_chains = ev->num_rf_chains;
5459 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5460 	arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5461 	arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5462 	arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5463 	arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5464 	arg->num_mem_reqs = ev->num_mem_reqs;
5465 	arg->service_map = ev->wmi_service_bitmap;
5466 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5467 
5468 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5469 		  ARRAY_SIZE(arg->mem_reqs));
5470 	for (i = 0; i < n; i++)
5471 		arg->mem_reqs[i] = &ev->mem_reqs[i];
5472 
5473 	if (skb->len <
5474 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5475 		return -EPROTO;
5476 
5477 	return 0;
5478 }
5479 
5480 static int
ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5481 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5482 				  struct wmi_svc_rdy_ev_arg *arg)
5483 {
5484 	struct wmi_10x_service_ready_event *ev;
5485 	int i, n;
5486 
5487 	if (skb->len < sizeof(*ev))
5488 		return -EPROTO;
5489 
5490 	ev = (void *)skb->data;
5491 	skb_pull(skb, sizeof(*ev));
5492 	arg->min_tx_power = ev->hw_min_tx_power;
5493 	arg->max_tx_power = ev->hw_max_tx_power;
5494 	arg->ht_cap = ev->ht_cap_info;
5495 	arg->vht_cap = ev->vht_cap_info;
5496 	arg->vht_supp_mcs = ev->vht_supp_mcs;
5497 	arg->sw_ver0 = ev->sw_version;
5498 	arg->phy_capab = ev->phy_capability;
5499 	arg->num_rf_chains = ev->num_rf_chains;
5500 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5501 	arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5502 	arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5503 	arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5504 	arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5505 	arg->num_mem_reqs = ev->num_mem_reqs;
5506 	arg->service_map = ev->wmi_service_bitmap;
5507 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5508 
5509 	/* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
5510 	 * different values. We would need a translation to handle that,
5511 	 * but as we don't currently need anything from sys_cap_info from
5512 	 * WMI interface (only from WMI-TLV) safest it to skip it.
5513 	 */
5514 
5515 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5516 		  ARRAY_SIZE(arg->mem_reqs));
5517 	for (i = 0; i < n; i++)
5518 		arg->mem_reqs[i] = &ev->mem_reqs[i];
5519 
5520 	if (skb->len <
5521 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5522 		return -EPROTO;
5523 
5524 	return 0;
5525 }
5526 
ath10k_wmi_event_service_ready_work(struct work_struct * work)5527 static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
5528 {
5529 	struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
5530 	struct sk_buff *skb = ar->svc_rdy_skb;
5531 	struct wmi_svc_rdy_ev_arg arg = {};
5532 	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
5533 	int ret;
5534 	bool allocated;
5535 
5536 	if (!skb) {
5537 		ath10k_warn(ar, "invalid service ready event skb\n");
5538 		return;
5539 	}
5540 
5541 	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
5542 	if (ret) {
5543 		ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
5544 		return;
5545 	}
5546 
5547 	ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
5548 			   arg.service_map_len);
5549 
5550 	ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
5551 	ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
5552 	ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
5553 	ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
5554 	ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
5555 	ar->fw_version_major =
5556 		(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
5557 	ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
5558 	ar->fw_version_release =
5559 		(__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
5560 	ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
5561 	ar->phy_capability = __le32_to_cpu(arg.phy_capab);
5562 	ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
5563 	ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
5564 	ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
5565 	ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
5566 	ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
5567 	ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
5568 	ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
5569 
5570 	ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
5571 			arg.service_map, arg.service_map_len);
5572 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
5573 		   ar->sys_cap_info);
5574 
5575 	if (ar->num_rf_chains > ar->max_spatial_stream) {
5576 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
5577 			    ar->num_rf_chains, ar->max_spatial_stream);
5578 		ar->num_rf_chains = ar->max_spatial_stream;
5579 	}
5580 
5581 	if (!ar->cfg_tx_chainmask) {
5582 		ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
5583 		ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
5584 	}
5585 
5586 	if (strlen(ar->hw->wiphy->fw_version) == 0) {
5587 		snprintf(ar->hw->wiphy->fw_version,
5588 			 sizeof(ar->hw->wiphy->fw_version),
5589 			 "%u.%u.%u.%u",
5590 			 ar->fw_version_major,
5591 			 ar->fw_version_minor,
5592 			 ar->fw_version_release,
5593 			 ar->fw_version_build);
5594 	}
5595 
5596 	num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
5597 	if (num_mem_reqs > WMI_MAX_MEM_REQS) {
5598 		ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
5599 			    num_mem_reqs);
5600 		return;
5601 	}
5602 
5603 	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
5604 		if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
5605 			     ar->running_fw->fw_file.fw_features))
5606 			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
5607 					       ar->max_num_vdevs;
5608 		else
5609 			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
5610 					       ar->max_num_vdevs;
5611 
5612 		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
5613 				    ar->max_num_vdevs;
5614 		ar->num_tids = ar->num_active_peers * 2;
5615 		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
5616 	}
5617 
5618 	/* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
5619 	 * and WMI_SERVICE_IRAM_TIDS, etc.
5620 	 */
5621 
5622 	allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
5623 						     num_mem_reqs);
5624 	if (allocated)
5625 		goto skip_mem_alloc;
5626 
5627 	/* Either this event is received during boot time or there is a change
5628 	 * in memory requirement from firmware when compared to last request.
5629 	 * Free any old memory and do a fresh allocation based on the current
5630 	 * memory requirement.
5631 	 */
5632 	ath10k_wmi_free_host_mem(ar);
5633 
5634 	for (i = 0; i < num_mem_reqs; ++i) {
5635 		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
5636 		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
5637 		unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
5638 		num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
5639 
5640 		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5641 			if (ar->num_active_peers)
5642 				num_units = ar->num_active_peers + 1;
5643 			else
5644 				num_units = ar->max_num_peers + 1;
5645 		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5646 			/* number of units to allocate is number of
5647 			 * peers, 1 extra for self peer on target
5648 			 * this needs to be tied, host and target
5649 			 * can get out of sync
5650 			 */
5651 			num_units = ar->max_num_peers + 1;
5652 		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5653 			num_units = ar->max_num_vdevs + 1;
5654 		}
5655 
5656 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5657 			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
5658 			   req_id,
5659 			   __le32_to_cpu(arg.mem_reqs[i]->num_units),
5660 			   num_unit_info,
5661 			   unit_size,
5662 			   num_units);
5663 
5664 		ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
5665 						unit_size);
5666 		if (ret)
5667 			return;
5668 	}
5669 
5670 skip_mem_alloc:
5671 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5672 		   "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
5673 		   __le32_to_cpu(arg.min_tx_power),
5674 		   __le32_to_cpu(arg.max_tx_power),
5675 		   __le32_to_cpu(arg.ht_cap),
5676 		   __le32_to_cpu(arg.vht_cap),
5677 		   __le32_to_cpu(arg.vht_supp_mcs),
5678 		   __le32_to_cpu(arg.sw_ver0),
5679 		   __le32_to_cpu(arg.sw_ver1),
5680 		   __le32_to_cpu(arg.fw_build),
5681 		   __le32_to_cpu(arg.phy_capab),
5682 		   __le32_to_cpu(arg.num_rf_chains),
5683 		   __le32_to_cpu(arg.eeprom_rd),
5684 		   __le32_to_cpu(arg.low_2ghz_chan),
5685 		   __le32_to_cpu(arg.high_2ghz_chan),
5686 		   __le32_to_cpu(arg.low_5ghz_chan),
5687 		   __le32_to_cpu(arg.high_5ghz_chan),
5688 		   __le32_to_cpu(arg.num_mem_reqs));
5689 
5690 	dev_kfree_skb(skb);
5691 	ar->svc_rdy_skb = NULL;
5692 	complete(&ar->wmi.service_ready);
5693 }
5694 
ath10k_wmi_event_service_ready(struct ath10k * ar,struct sk_buff * skb)5695 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
5696 {
5697 	ar->svc_rdy_skb = skb;
5698 	queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
5699 }
5700 
ath10k_wmi_op_pull_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_rdy_ev_arg * arg)5701 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5702 				     struct wmi_rdy_ev_arg *arg)
5703 {
5704 	struct wmi_ready_event *ev = (void *)skb->data;
5705 
5706 	if (skb->len < sizeof(*ev))
5707 		return -EPROTO;
5708 
5709 	skb_pull(skb, sizeof(*ev));
5710 	arg->sw_version = ev->sw_version;
5711 	arg->abi_version = ev->abi_version;
5712 	arg->status = ev->status;
5713 	arg->mac_addr = ev->mac_addr.addr;
5714 
5715 	return 0;
5716 }
5717 
ath10k_wmi_op_pull_roam_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_roam_ev_arg * arg)5718 static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
5719 				      struct wmi_roam_ev_arg *arg)
5720 {
5721 	struct wmi_roam_ev *ev = (void *)skb->data;
5722 
5723 	if (skb->len < sizeof(*ev))
5724 		return -EPROTO;
5725 
5726 	skb_pull(skb, sizeof(*ev));
5727 	arg->vdev_id = ev->vdev_id;
5728 	arg->reason = ev->reason;
5729 
5730 	return 0;
5731 }
5732 
ath10k_wmi_op_pull_echo_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_echo_ev_arg * arg)5733 static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
5734 				      struct sk_buff *skb,
5735 				      struct wmi_echo_ev_arg *arg)
5736 {
5737 	struct wmi_echo_event *ev = (void *)skb->data;
5738 
5739 	arg->value = ev->value;
5740 
5741 	return 0;
5742 }
5743 
ath10k_wmi_event_ready(struct ath10k * ar,struct sk_buff * skb)5744 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
5745 {
5746 	struct wmi_rdy_ev_arg arg = {};
5747 	int ret;
5748 
5749 	ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
5750 	if (ret) {
5751 		ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
5752 		return ret;
5753 	}
5754 
5755 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5756 		   "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
5757 		   __le32_to_cpu(arg.sw_version),
5758 		   __le32_to_cpu(arg.abi_version),
5759 		   arg.mac_addr,
5760 		   __le32_to_cpu(arg.status));
5761 
5762 	if (is_zero_ether_addr(ar->mac_addr))
5763 		ether_addr_copy(ar->mac_addr, arg.mac_addr);
5764 	complete(&ar->wmi.unified_ready);
5765 	return 0;
5766 }
5767 
ath10k_wmi_event_service_available(struct ath10k * ar,struct sk_buff * skb)5768 void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
5769 {
5770 	int ret;
5771 	struct wmi_svc_avail_ev_arg arg = {};
5772 
5773 	ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
5774 	if (ret) {
5775 		ath10k_warn(ar, "failed to parse service available event: %d\n",
5776 			    ret);
5777 	}
5778 
5779 	/*
5780 	 * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
5781 	 * for the below logic to work.
5782 	 */
5783 	if (arg.service_map_ext_valid)
5784 		ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
5785 				       __le32_to_cpu(arg.service_map_ext_len));
5786 }
5787 
ath10k_wmi_event_temperature(struct ath10k * ar,struct sk_buff * skb)5788 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
5789 {
5790 	const struct wmi_pdev_temperature_event *ev;
5791 
5792 	ev = (struct wmi_pdev_temperature_event *)skb->data;
5793 	if (WARN_ON(skb->len < sizeof(*ev)))
5794 		return -EPROTO;
5795 
5796 	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
5797 	return 0;
5798 }
5799 
ath10k_wmi_event_pdev_bss_chan_info(struct ath10k * ar,struct sk_buff * skb)5800 static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
5801 					       struct sk_buff *skb)
5802 {
5803 	struct wmi_pdev_bss_chan_info_event *ev;
5804 	struct survey_info *survey;
5805 	u64 busy, total, tx, rx, rx_bss;
5806 	u32 freq, noise_floor;
5807 	u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
5808 	int idx;
5809 
5810 	ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
5811 	if (WARN_ON(skb->len < sizeof(*ev)))
5812 		return -EPROTO;
5813 
5814 	freq        = __le32_to_cpu(ev->freq);
5815 	noise_floor = __le32_to_cpu(ev->noise_floor);
5816 	busy        = __le64_to_cpu(ev->cycle_busy);
5817 	total       = __le64_to_cpu(ev->cycle_total);
5818 	tx          = __le64_to_cpu(ev->cycle_tx);
5819 	rx          = __le64_to_cpu(ev->cycle_rx);
5820 	rx_bss      = __le64_to_cpu(ev->cycle_rx_bss);
5821 
5822 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5823 		   "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5824 		   freq, noise_floor, busy, total, tx, rx, rx_bss);
5825 
5826 	spin_lock_bh(&ar->data_lock);
5827 	idx = freq_to_idx(ar, freq);
5828 	if (idx >= ARRAY_SIZE(ar->survey)) {
5829 		ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5830 			    freq, idx);
5831 		goto exit;
5832 	}
5833 
5834 	survey = &ar->survey[idx];
5835 
5836 	survey->noise     = noise_floor;
5837 	survey->time      = div_u64(total, cc_freq_hz);
5838 	survey->time_busy = div_u64(busy, cc_freq_hz);
5839 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
5840 	survey->time_tx   = div_u64(tx, cc_freq_hz);
5841 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
5842 			     SURVEY_INFO_TIME |
5843 			     SURVEY_INFO_TIME_BUSY |
5844 			     SURVEY_INFO_TIME_RX |
5845 			     SURVEY_INFO_TIME_TX);
5846 exit:
5847 	spin_unlock_bh(&ar->data_lock);
5848 	complete(&ar->bss_survey_done);
5849 	return 0;
5850 }
5851 
ath10k_wmi_queue_set_coverage_class_work(struct ath10k * ar)5852 static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
5853 {
5854 	if (ar->hw_params.hw_ops->set_coverage_class) {
5855 		spin_lock_bh(&ar->data_lock);
5856 
5857 		/* This call only ensures that the modified coverage class
5858 		 * persists in case the firmware sets the registers back to
5859 		 * their default value. So calling it is only necessary if the
5860 		 * coverage class has a non-zero value.
5861 		 */
5862 		if (ar->fw_coverage.coverage_class)
5863 			queue_work(ar->workqueue, &ar->set_coverage_class_work);
5864 
5865 		spin_unlock_bh(&ar->data_lock);
5866 	}
5867 }
5868 
ath10k_wmi_op_rx(struct ath10k * ar,struct sk_buff * skb)5869 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
5870 {
5871 	struct wmi_cmd_hdr *cmd_hdr;
5872 	enum wmi_event_id id;
5873 
5874 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5875 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5876 
5877 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5878 		goto out;
5879 
5880 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5881 
5882 	switch (id) {
5883 	case WMI_MGMT_RX_EVENTID:
5884 		ath10k_wmi_event_mgmt_rx(ar, skb);
5885 		/* mgmt_rx() owns the skb now! */
5886 		return;
5887 	case WMI_SCAN_EVENTID:
5888 		ath10k_wmi_event_scan(ar, skb);
5889 		ath10k_wmi_queue_set_coverage_class_work(ar);
5890 		break;
5891 	case WMI_CHAN_INFO_EVENTID:
5892 		ath10k_wmi_event_chan_info(ar, skb);
5893 		break;
5894 	case WMI_ECHO_EVENTID:
5895 		ath10k_wmi_event_echo(ar, skb);
5896 		break;
5897 	case WMI_DEBUG_MESG_EVENTID:
5898 		ath10k_wmi_event_debug_mesg(ar, skb);
5899 		ath10k_wmi_queue_set_coverage_class_work(ar);
5900 		break;
5901 	case WMI_UPDATE_STATS_EVENTID:
5902 		ath10k_wmi_event_update_stats(ar, skb);
5903 		break;
5904 	case WMI_VDEV_START_RESP_EVENTID:
5905 		ath10k_wmi_event_vdev_start_resp(ar, skb);
5906 		ath10k_wmi_queue_set_coverage_class_work(ar);
5907 		break;
5908 	case WMI_VDEV_STOPPED_EVENTID:
5909 		ath10k_wmi_event_vdev_stopped(ar, skb);
5910 		ath10k_wmi_queue_set_coverage_class_work(ar);
5911 		break;
5912 	case WMI_PEER_STA_KICKOUT_EVENTID:
5913 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
5914 		break;
5915 	case WMI_HOST_SWBA_EVENTID:
5916 		ath10k_wmi_event_host_swba(ar, skb);
5917 		break;
5918 	case WMI_TBTTOFFSET_UPDATE_EVENTID:
5919 		ath10k_wmi_event_tbttoffset_update(ar, skb);
5920 		break;
5921 	case WMI_PHYERR_EVENTID:
5922 		ath10k_wmi_event_phyerr(ar, skb);
5923 		break;
5924 	case WMI_ROAM_EVENTID:
5925 		ath10k_wmi_event_roam(ar, skb);
5926 		ath10k_wmi_queue_set_coverage_class_work(ar);
5927 		break;
5928 	case WMI_PROFILE_MATCH:
5929 		ath10k_wmi_event_profile_match(ar, skb);
5930 		break;
5931 	case WMI_DEBUG_PRINT_EVENTID:
5932 		ath10k_wmi_event_debug_print(ar, skb);
5933 		ath10k_wmi_queue_set_coverage_class_work(ar);
5934 		break;
5935 	case WMI_PDEV_QVIT_EVENTID:
5936 		ath10k_wmi_event_pdev_qvit(ar, skb);
5937 		break;
5938 	case WMI_WLAN_PROFILE_DATA_EVENTID:
5939 		ath10k_wmi_event_wlan_profile_data(ar, skb);
5940 		break;
5941 	case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
5942 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
5943 		break;
5944 	case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
5945 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
5946 		break;
5947 	case WMI_RTT_ERROR_REPORT_EVENTID:
5948 		ath10k_wmi_event_rtt_error_report(ar, skb);
5949 		break;
5950 	case WMI_WOW_WAKEUP_HOST_EVENTID:
5951 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
5952 		break;
5953 	case WMI_DCS_INTERFERENCE_EVENTID:
5954 		ath10k_wmi_event_dcs_interference(ar, skb);
5955 		break;
5956 	case WMI_PDEV_TPC_CONFIG_EVENTID:
5957 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
5958 		break;
5959 	case WMI_PDEV_FTM_INTG_EVENTID:
5960 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
5961 		break;
5962 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
5963 		ath10k_wmi_event_gtk_offload_status(ar, skb);
5964 		break;
5965 	case WMI_GTK_REKEY_FAIL_EVENTID:
5966 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
5967 		break;
5968 	case WMI_TX_DELBA_COMPLETE_EVENTID:
5969 		ath10k_wmi_event_delba_complete(ar, skb);
5970 		break;
5971 	case WMI_TX_ADDBA_COMPLETE_EVENTID:
5972 		ath10k_wmi_event_addba_complete(ar, skb);
5973 		break;
5974 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
5975 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
5976 		break;
5977 	case WMI_SERVICE_READY_EVENTID:
5978 		ath10k_wmi_event_service_ready(ar, skb);
5979 		return;
5980 	case WMI_READY_EVENTID:
5981 		ath10k_wmi_event_ready(ar, skb);
5982 		ath10k_wmi_queue_set_coverage_class_work(ar);
5983 		break;
5984 	case WMI_SERVICE_AVAILABLE_EVENTID:
5985 		ath10k_wmi_event_service_available(ar, skb);
5986 		break;
5987 	default:
5988 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
5989 		break;
5990 	}
5991 
5992 out:
5993 	dev_kfree_skb(skb);
5994 }
5995 
ath10k_wmi_10_1_op_rx(struct ath10k * ar,struct sk_buff * skb)5996 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
5997 {
5998 	struct wmi_cmd_hdr *cmd_hdr;
5999 	enum wmi_10x_event_id id;
6000 	bool consumed;
6001 
6002 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6003 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6004 
6005 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6006 		goto out;
6007 
6008 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6009 
6010 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6011 
6012 	/* Ready event must be handled normally also in UTF mode so that we
6013 	 * know the UTF firmware has booted, others we are just bypass WMI
6014 	 * events to testmode.
6015 	 */
6016 	if (consumed && id != WMI_10X_READY_EVENTID) {
6017 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6018 			   "wmi testmode consumed 0x%x\n", id);
6019 		goto out;
6020 	}
6021 
6022 	switch (id) {
6023 	case WMI_10X_MGMT_RX_EVENTID:
6024 		ath10k_wmi_event_mgmt_rx(ar, skb);
6025 		/* mgmt_rx() owns the skb now! */
6026 		return;
6027 	case WMI_10X_SCAN_EVENTID:
6028 		ath10k_wmi_event_scan(ar, skb);
6029 		ath10k_wmi_queue_set_coverage_class_work(ar);
6030 		break;
6031 	case WMI_10X_CHAN_INFO_EVENTID:
6032 		ath10k_wmi_event_chan_info(ar, skb);
6033 		break;
6034 	case WMI_10X_ECHO_EVENTID:
6035 		ath10k_wmi_event_echo(ar, skb);
6036 		break;
6037 	case WMI_10X_DEBUG_MESG_EVENTID:
6038 		ath10k_wmi_event_debug_mesg(ar, skb);
6039 		ath10k_wmi_queue_set_coverage_class_work(ar);
6040 		break;
6041 	case WMI_10X_UPDATE_STATS_EVENTID:
6042 		ath10k_wmi_event_update_stats(ar, skb);
6043 		break;
6044 	case WMI_10X_VDEV_START_RESP_EVENTID:
6045 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6046 		ath10k_wmi_queue_set_coverage_class_work(ar);
6047 		break;
6048 	case WMI_10X_VDEV_STOPPED_EVENTID:
6049 		ath10k_wmi_event_vdev_stopped(ar, skb);
6050 		ath10k_wmi_queue_set_coverage_class_work(ar);
6051 		break;
6052 	case WMI_10X_PEER_STA_KICKOUT_EVENTID:
6053 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6054 		break;
6055 	case WMI_10X_HOST_SWBA_EVENTID:
6056 		ath10k_wmi_event_host_swba(ar, skb);
6057 		break;
6058 	case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
6059 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6060 		break;
6061 	case WMI_10X_PHYERR_EVENTID:
6062 		ath10k_wmi_event_phyerr(ar, skb);
6063 		break;
6064 	case WMI_10X_ROAM_EVENTID:
6065 		ath10k_wmi_event_roam(ar, skb);
6066 		ath10k_wmi_queue_set_coverage_class_work(ar);
6067 		break;
6068 	case WMI_10X_PROFILE_MATCH:
6069 		ath10k_wmi_event_profile_match(ar, skb);
6070 		break;
6071 	case WMI_10X_DEBUG_PRINT_EVENTID:
6072 		ath10k_wmi_event_debug_print(ar, skb);
6073 		ath10k_wmi_queue_set_coverage_class_work(ar);
6074 		break;
6075 	case WMI_10X_PDEV_QVIT_EVENTID:
6076 		ath10k_wmi_event_pdev_qvit(ar, skb);
6077 		break;
6078 	case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
6079 		ath10k_wmi_event_wlan_profile_data(ar, skb);
6080 		break;
6081 	case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
6082 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
6083 		break;
6084 	case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
6085 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
6086 		break;
6087 	case WMI_10X_RTT_ERROR_REPORT_EVENTID:
6088 		ath10k_wmi_event_rtt_error_report(ar, skb);
6089 		break;
6090 	case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
6091 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
6092 		break;
6093 	case WMI_10X_DCS_INTERFERENCE_EVENTID:
6094 		ath10k_wmi_event_dcs_interference(ar, skb);
6095 		break;
6096 	case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
6097 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6098 		break;
6099 	case WMI_10X_INST_RSSI_STATS_EVENTID:
6100 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
6101 		break;
6102 	case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
6103 		ath10k_wmi_event_vdev_standby_req(ar, skb);
6104 		break;
6105 	case WMI_10X_VDEV_RESUME_REQ_EVENTID:
6106 		ath10k_wmi_event_vdev_resume_req(ar, skb);
6107 		break;
6108 	case WMI_10X_SERVICE_READY_EVENTID:
6109 		ath10k_wmi_event_service_ready(ar, skb);
6110 		return;
6111 	case WMI_10X_READY_EVENTID:
6112 		ath10k_wmi_event_ready(ar, skb);
6113 		ath10k_wmi_queue_set_coverage_class_work(ar);
6114 		break;
6115 	case WMI_10X_PDEV_UTF_EVENTID:
6116 		/* ignore utf events */
6117 		break;
6118 	default:
6119 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6120 		break;
6121 	}
6122 
6123 out:
6124 	dev_kfree_skb(skb);
6125 }
6126 
ath10k_wmi_10_2_op_rx(struct ath10k * ar,struct sk_buff * skb)6127 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
6128 {
6129 	struct wmi_cmd_hdr *cmd_hdr;
6130 	enum wmi_10_2_event_id id;
6131 	bool consumed;
6132 
6133 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6134 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6135 
6136 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6137 		goto out;
6138 
6139 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6140 
6141 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6142 
6143 	/* Ready event must be handled normally also in UTF mode so that we
6144 	 * know the UTF firmware has booted, others we are just bypass WMI
6145 	 * events to testmode.
6146 	 */
6147 	if (consumed && id != WMI_10_2_READY_EVENTID) {
6148 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6149 			   "wmi testmode consumed 0x%x\n", id);
6150 		goto out;
6151 	}
6152 
6153 	switch (id) {
6154 	case WMI_10_2_MGMT_RX_EVENTID:
6155 		ath10k_wmi_event_mgmt_rx(ar, skb);
6156 		/* mgmt_rx() owns the skb now! */
6157 		return;
6158 	case WMI_10_2_SCAN_EVENTID:
6159 		ath10k_wmi_event_scan(ar, skb);
6160 		ath10k_wmi_queue_set_coverage_class_work(ar);
6161 		break;
6162 	case WMI_10_2_CHAN_INFO_EVENTID:
6163 		ath10k_wmi_event_chan_info(ar, skb);
6164 		break;
6165 	case WMI_10_2_ECHO_EVENTID:
6166 		ath10k_wmi_event_echo(ar, skb);
6167 		break;
6168 	case WMI_10_2_DEBUG_MESG_EVENTID:
6169 		ath10k_wmi_event_debug_mesg(ar, skb);
6170 		ath10k_wmi_queue_set_coverage_class_work(ar);
6171 		break;
6172 	case WMI_10_2_UPDATE_STATS_EVENTID:
6173 		ath10k_wmi_event_update_stats(ar, skb);
6174 		break;
6175 	case WMI_10_2_VDEV_START_RESP_EVENTID:
6176 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6177 		ath10k_wmi_queue_set_coverage_class_work(ar);
6178 		break;
6179 	case WMI_10_2_VDEV_STOPPED_EVENTID:
6180 		ath10k_wmi_event_vdev_stopped(ar, skb);
6181 		ath10k_wmi_queue_set_coverage_class_work(ar);
6182 		break;
6183 	case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
6184 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6185 		break;
6186 	case WMI_10_2_HOST_SWBA_EVENTID:
6187 		ath10k_wmi_event_host_swba(ar, skb);
6188 		break;
6189 	case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
6190 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6191 		break;
6192 	case WMI_10_2_PHYERR_EVENTID:
6193 		ath10k_wmi_event_phyerr(ar, skb);
6194 		break;
6195 	case WMI_10_2_ROAM_EVENTID:
6196 		ath10k_wmi_event_roam(ar, skb);
6197 		ath10k_wmi_queue_set_coverage_class_work(ar);
6198 		break;
6199 	case WMI_10_2_PROFILE_MATCH:
6200 		ath10k_wmi_event_profile_match(ar, skb);
6201 		break;
6202 	case WMI_10_2_DEBUG_PRINT_EVENTID:
6203 		ath10k_wmi_event_debug_print(ar, skb);
6204 		ath10k_wmi_queue_set_coverage_class_work(ar);
6205 		break;
6206 	case WMI_10_2_PDEV_QVIT_EVENTID:
6207 		ath10k_wmi_event_pdev_qvit(ar, skb);
6208 		break;
6209 	case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
6210 		ath10k_wmi_event_wlan_profile_data(ar, skb);
6211 		break;
6212 	case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
6213 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
6214 		break;
6215 	case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
6216 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
6217 		break;
6218 	case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
6219 		ath10k_wmi_event_rtt_error_report(ar, skb);
6220 		break;
6221 	case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
6222 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
6223 		break;
6224 	case WMI_10_2_DCS_INTERFERENCE_EVENTID:
6225 		ath10k_wmi_event_dcs_interference(ar, skb);
6226 		break;
6227 	case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
6228 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6229 		break;
6230 	case WMI_10_2_INST_RSSI_STATS_EVENTID:
6231 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
6232 		break;
6233 	case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
6234 		ath10k_wmi_event_vdev_standby_req(ar, skb);
6235 		ath10k_wmi_queue_set_coverage_class_work(ar);
6236 		break;
6237 	case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
6238 		ath10k_wmi_event_vdev_resume_req(ar, skb);
6239 		ath10k_wmi_queue_set_coverage_class_work(ar);
6240 		break;
6241 	case WMI_10_2_SERVICE_READY_EVENTID:
6242 		ath10k_wmi_event_service_ready(ar, skb);
6243 		return;
6244 	case WMI_10_2_READY_EVENTID:
6245 		ath10k_wmi_event_ready(ar, skb);
6246 		ath10k_wmi_queue_set_coverage_class_work(ar);
6247 		break;
6248 	case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
6249 		ath10k_wmi_event_temperature(ar, skb);
6250 		break;
6251 	case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
6252 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6253 		break;
6254 	case WMI_10_2_RTT_KEEPALIVE_EVENTID:
6255 	case WMI_10_2_GPIO_INPUT_EVENTID:
6256 	case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
6257 	case WMI_10_2_GENERIC_BUFFER_EVENTID:
6258 	case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
6259 	case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
6260 	case WMI_10_2_WDS_PEER_EVENTID:
6261 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6262 			   "received event id %d not implemented\n", id);
6263 		break;
6264 	case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
6265 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6266 		break;
6267 	default:
6268 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6269 		break;
6270 	}
6271 
6272 out:
6273 	dev_kfree_skb(skb);
6274 }
6275 
ath10k_wmi_10_4_op_rx(struct ath10k * ar,struct sk_buff * skb)6276 static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
6277 {
6278 	struct wmi_cmd_hdr *cmd_hdr;
6279 	enum wmi_10_4_event_id id;
6280 	bool consumed;
6281 
6282 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6283 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6284 
6285 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6286 		goto out;
6287 
6288 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6289 
6290 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6291 
6292 	/* Ready event must be handled normally also in UTF mode so that we
6293 	 * know the UTF firmware has booted, others we are just bypass WMI
6294 	 * events to testmode.
6295 	 */
6296 	if (consumed && id != WMI_10_4_READY_EVENTID) {
6297 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6298 			   "wmi testmode consumed 0x%x\n", id);
6299 		goto out;
6300 	}
6301 
6302 	switch (id) {
6303 	case WMI_10_4_MGMT_RX_EVENTID:
6304 		ath10k_wmi_event_mgmt_rx(ar, skb);
6305 		/* mgmt_rx() owns the skb now! */
6306 		return;
6307 	case WMI_10_4_ECHO_EVENTID:
6308 		ath10k_wmi_event_echo(ar, skb);
6309 		break;
6310 	case WMI_10_4_DEBUG_MESG_EVENTID:
6311 		ath10k_wmi_event_debug_mesg(ar, skb);
6312 		ath10k_wmi_queue_set_coverage_class_work(ar);
6313 		break;
6314 	case WMI_10_4_SERVICE_READY_EVENTID:
6315 		ath10k_wmi_event_service_ready(ar, skb);
6316 		return;
6317 	case WMI_10_4_SCAN_EVENTID:
6318 		ath10k_wmi_event_scan(ar, skb);
6319 		ath10k_wmi_queue_set_coverage_class_work(ar);
6320 		break;
6321 	case WMI_10_4_CHAN_INFO_EVENTID:
6322 		ath10k_wmi_event_chan_info(ar, skb);
6323 		break;
6324 	case WMI_10_4_PHYERR_EVENTID:
6325 		ath10k_wmi_event_phyerr(ar, skb);
6326 		break;
6327 	case WMI_10_4_READY_EVENTID:
6328 		ath10k_wmi_event_ready(ar, skb);
6329 		ath10k_wmi_queue_set_coverage_class_work(ar);
6330 		break;
6331 	case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
6332 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6333 		break;
6334 	case WMI_10_4_ROAM_EVENTID:
6335 		ath10k_wmi_event_roam(ar, skb);
6336 		ath10k_wmi_queue_set_coverage_class_work(ar);
6337 		break;
6338 	case WMI_10_4_HOST_SWBA_EVENTID:
6339 		ath10k_wmi_event_host_swba(ar, skb);
6340 		break;
6341 	case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
6342 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6343 		break;
6344 	case WMI_10_4_DEBUG_PRINT_EVENTID:
6345 		ath10k_wmi_event_debug_print(ar, skb);
6346 		ath10k_wmi_queue_set_coverage_class_work(ar);
6347 		break;
6348 	case WMI_10_4_VDEV_START_RESP_EVENTID:
6349 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6350 		ath10k_wmi_queue_set_coverage_class_work(ar);
6351 		break;
6352 	case WMI_10_4_VDEV_STOPPED_EVENTID:
6353 		ath10k_wmi_event_vdev_stopped(ar, skb);
6354 		ath10k_wmi_queue_set_coverage_class_work(ar);
6355 		break;
6356 	case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
6357 	case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
6358 	case WMI_10_4_WDS_PEER_EVENTID:
6359 	case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
6360 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6361 			   "received event id %d not implemented\n", id);
6362 		break;
6363 	case WMI_10_4_UPDATE_STATS_EVENTID:
6364 		ath10k_wmi_event_update_stats(ar, skb);
6365 		break;
6366 	case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
6367 		ath10k_wmi_event_temperature(ar, skb);
6368 		break;
6369 	case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
6370 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6371 		break;
6372 	case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
6373 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6374 		break;
6375 	case WMI_10_4_TDLS_PEER_EVENTID:
6376 		ath10k_wmi_handle_tdls_peer_event(ar, skb);
6377 		break;
6378 	case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
6379 		ath10k_wmi_event_tpc_final_table(ar, skb);
6380 		break;
6381 	case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
6382 		ath10k_wmi_event_dfs_status_check(ar, skb);
6383 		break;
6384 	case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
6385 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6386 		break;
6387 	default:
6388 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6389 		break;
6390 	}
6391 
6392 out:
6393 	dev_kfree_skb(skb);
6394 }
6395 
ath10k_wmi_process_rx(struct ath10k * ar,struct sk_buff * skb)6396 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
6397 {
6398 	int ret;
6399 
6400 	ret = ath10k_wmi_rx(ar, skb);
6401 	if (ret)
6402 		ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
6403 }
6404 
ath10k_wmi_connect(struct ath10k * ar)6405 int ath10k_wmi_connect(struct ath10k *ar)
6406 {
6407 	int status;
6408 	struct ath10k_htc_svc_conn_req conn_req;
6409 	struct ath10k_htc_svc_conn_resp conn_resp;
6410 
6411 	memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
6412 
6413 	memset(&conn_req, 0, sizeof(conn_req));
6414 	memset(&conn_resp, 0, sizeof(conn_resp));
6415 
6416 	/* these fields are the same for all service endpoints */
6417 	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
6418 	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
6419 	conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
6420 
6421 	/* connect to control service */
6422 	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
6423 
6424 	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
6425 	if (status) {
6426 		ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
6427 			    status);
6428 		return status;
6429 	}
6430 
6431 	ar->wmi.eid = conn_resp.eid;
6432 	return 0;
6433 }
6434 
6435 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k * ar,const u8 macaddr[ETH_ALEN])6436 ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar,
6437 					const u8 macaddr[ETH_ALEN])
6438 {
6439 	struct wmi_pdev_set_base_macaddr_cmd *cmd;
6440 	struct sk_buff *skb;
6441 
6442 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6443 	if (!skb)
6444 		return ERR_PTR(-ENOMEM);
6445 
6446 	cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
6447 	ether_addr_copy(cmd->mac_addr.addr, macaddr);
6448 
6449 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6450 		   "wmi pdev basemac %pM\n", macaddr);
6451 	return skb;
6452 }
6453 
6454 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6455 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
6456 			      u16 ctl2g, u16 ctl5g,
6457 			      enum wmi_dfs_region dfs_reg)
6458 {
6459 	struct wmi_pdev_set_regdomain_cmd *cmd;
6460 	struct sk_buff *skb;
6461 
6462 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6463 	if (!skb)
6464 		return ERR_PTR(-ENOMEM);
6465 
6466 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
6467 	cmd->reg_domain = __cpu_to_le32(rd);
6468 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6469 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6470 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6471 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6472 
6473 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6474 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
6475 		   rd, rd2g, rd5g, ctl2g, ctl5g);
6476 	return skb;
6477 }
6478 
6479 static struct sk_buff *
ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6480 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
6481 				  rd5g, u16 ctl2g, u16 ctl5g,
6482 				  enum wmi_dfs_region dfs_reg)
6483 {
6484 	struct wmi_pdev_set_regdomain_cmd_10x *cmd;
6485 	struct sk_buff *skb;
6486 
6487 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6488 	if (!skb)
6489 		return ERR_PTR(-ENOMEM);
6490 
6491 	cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
6492 	cmd->reg_domain = __cpu_to_le32(rd);
6493 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6494 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6495 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6496 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6497 	cmd->dfs_domain = __cpu_to_le32(dfs_reg);
6498 
6499 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6500 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
6501 		   rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
6502 	return skb;
6503 }
6504 
6505 static struct sk_buff *
ath10k_wmi_op_gen_pdev_suspend(struct ath10k * ar,u32 suspend_opt)6506 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
6507 {
6508 	struct wmi_pdev_suspend_cmd *cmd;
6509 	struct sk_buff *skb;
6510 
6511 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6512 	if (!skb)
6513 		return ERR_PTR(-ENOMEM);
6514 
6515 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
6516 	cmd->suspend_opt = __cpu_to_le32(suspend_opt);
6517 
6518 	return skb;
6519 }
6520 
6521 static struct sk_buff *
ath10k_wmi_op_gen_pdev_resume(struct ath10k * ar)6522 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
6523 {
6524 	struct sk_buff *skb;
6525 
6526 	skb = ath10k_wmi_alloc_skb(ar, 0);
6527 	if (!skb)
6528 		return ERR_PTR(-ENOMEM);
6529 
6530 	return skb;
6531 }
6532 
6533 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_param(struct ath10k * ar,u32 id,u32 value)6534 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
6535 {
6536 	struct wmi_pdev_set_param_cmd *cmd;
6537 	struct sk_buff *skb;
6538 
6539 	if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
6540 		ath10k_warn(ar, "pdev param %d not supported by firmware\n",
6541 			    id);
6542 		return ERR_PTR(-EOPNOTSUPP);
6543 	}
6544 
6545 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6546 	if (!skb)
6547 		return ERR_PTR(-ENOMEM);
6548 
6549 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
6550 	cmd->param_id    = __cpu_to_le32(id);
6551 	cmd->param_value = __cpu_to_le32(value);
6552 
6553 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
6554 		   id, value);
6555 	return skb;
6556 }
6557 
ath10k_wmi_put_host_mem_chunks(struct ath10k * ar,struct wmi_host_mem_chunks * chunks)6558 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
6559 				    struct wmi_host_mem_chunks *chunks)
6560 {
6561 	struct host_memory_chunk *chunk;
6562 	int i;
6563 
6564 	chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
6565 
6566 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
6567 		chunk = &chunks->items[i];
6568 		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
6569 		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
6570 		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
6571 
6572 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6573 			   "wmi chunk %d len %d requested, addr 0x%llx\n",
6574 			   i,
6575 			   ar->wmi.mem_chunks[i].len,
6576 			   (unsigned long long)ar->wmi.mem_chunks[i].paddr);
6577 	}
6578 }
6579 
ath10k_wmi_op_gen_init(struct ath10k * ar)6580 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
6581 {
6582 	struct wmi_init_cmd *cmd;
6583 	struct sk_buff *buf;
6584 	struct wmi_resource_config config = {};
6585 	u32 val;
6586 
6587 	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
6588 	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
6589 	config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
6590 
6591 	config.num_offload_reorder_bufs =
6592 		__cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
6593 
6594 	config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
6595 	config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
6596 	config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
6597 	config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
6598 	config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
6599 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6600 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6601 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6602 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
6603 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6604 	config.scan_max_pending_reqs =
6605 		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
6606 
6607 	config.bmiss_offload_max_vdev =
6608 		__cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
6609 
6610 	config.roam_offload_max_vdev =
6611 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
6612 
6613 	config.roam_offload_max_ap_profiles =
6614 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
6615 
6616 	config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
6617 	config.num_mcast_table_elems =
6618 		__cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
6619 
6620 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
6621 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
6622 	config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
6623 	config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
6624 	config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
6625 
6626 	val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6627 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6628 
6629 	config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
6630 
6631 	config.gtk_offload_max_vdev =
6632 		__cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
6633 
6634 	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
6635 	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
6636 
6637 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6638 						   ar->wmi.num_mem_chunks));
6639 	if (!buf)
6640 		return ERR_PTR(-ENOMEM);
6641 
6642 	cmd = (struct wmi_init_cmd *)buf->data;
6643 
6644 	memcpy(&cmd->resource_config, &config, sizeof(config));
6645 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6646 
6647 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
6648 	return buf;
6649 }
6650 
ath10k_wmi_10_1_op_gen_init(struct ath10k * ar)6651 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
6652 {
6653 	struct wmi_init_cmd_10x *cmd;
6654 	struct sk_buff *buf;
6655 	struct wmi_resource_config_10x config = {};
6656 	u32 val;
6657 
6658 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6659 	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6660 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6661 	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6662 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6663 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6664 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6665 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6666 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6667 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6668 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6669 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6670 	config.scan_max_pending_reqs =
6671 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6672 
6673 	config.bmiss_offload_max_vdev =
6674 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6675 
6676 	config.roam_offload_max_vdev =
6677 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6678 
6679 	config.roam_offload_max_ap_profiles =
6680 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6681 
6682 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6683 	config.num_mcast_table_elems =
6684 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6685 
6686 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6687 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6688 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6689 	config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
6690 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6691 
6692 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6693 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6694 
6695 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6696 
6697 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6698 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6699 
6700 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6701 						   ar->wmi.num_mem_chunks));
6702 	if (!buf)
6703 		return ERR_PTR(-ENOMEM);
6704 
6705 	cmd = (struct wmi_init_cmd_10x *)buf->data;
6706 
6707 	memcpy(&cmd->resource_config, &config, sizeof(config));
6708 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6709 
6710 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
6711 	return buf;
6712 }
6713 
ath10k_wmi_10_2_op_gen_init(struct ath10k * ar)6714 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
6715 {
6716 	struct wmi_init_cmd_10_2 *cmd;
6717 	struct sk_buff *buf;
6718 	struct wmi_resource_config_10x config = {};
6719 	u32 val, features;
6720 
6721 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6722 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6723 
6724 	if (ath10k_peer_stats_enabled(ar)) {
6725 		config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
6726 		config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
6727 	} else {
6728 		config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6729 		config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6730 	}
6731 
6732 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6733 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6734 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6735 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6736 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6737 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6738 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6739 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6740 
6741 	config.scan_max_pending_reqs =
6742 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6743 
6744 	config.bmiss_offload_max_vdev =
6745 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6746 
6747 	config.roam_offload_max_vdev =
6748 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6749 
6750 	config.roam_offload_max_ap_profiles =
6751 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6752 
6753 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6754 	config.num_mcast_table_elems =
6755 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6756 
6757 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6758 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6759 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6760 	config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
6761 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6762 
6763 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6764 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6765 
6766 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6767 
6768 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6769 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6770 
6771 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6772 						   ar->wmi.num_mem_chunks));
6773 	if (!buf)
6774 		return ERR_PTR(-ENOMEM);
6775 
6776 	cmd = (struct wmi_init_cmd_10_2 *)buf->data;
6777 
6778 	features = WMI_10_2_RX_BATCH_MODE;
6779 
6780 	if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
6781 	    test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
6782 		features |= WMI_10_2_COEX_GPIO;
6783 
6784 	if (ath10k_peer_stats_enabled(ar))
6785 		features |= WMI_10_2_PEER_STATS;
6786 
6787 	if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
6788 		features |= WMI_10_2_BSS_CHAN_INFO;
6789 
6790 	cmd->resource_config.feature_mask = __cpu_to_le32(features);
6791 
6792 	memcpy(&cmd->resource_config.common, &config, sizeof(config));
6793 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6794 
6795 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
6796 	return buf;
6797 }
6798 
ath10k_wmi_10_4_op_gen_init(struct ath10k * ar)6799 static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
6800 {
6801 	struct wmi_init_cmd_10_4 *cmd;
6802 	struct sk_buff *buf;
6803 	struct wmi_resource_config_10_4 config = {};
6804 
6805 	config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
6806 	config.num_peers = __cpu_to_le32(ar->max_num_peers);
6807 	config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
6808 	config.num_tids = __cpu_to_le32(ar->num_tids);
6809 
6810 	config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
6811 	config.num_offload_reorder_buffs =
6812 			__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
6813 	config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
6814 	config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
6815 	config.tx_chain_mask  = __cpu_to_le32(ar->hw_params.tx_chain_mask);
6816 	config.rx_chain_mask  = __cpu_to_le32(ar->hw_params.rx_chain_mask);
6817 
6818 	config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6819 	config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6820 	config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6821 	config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
6822 
6823 	config.rx_decap_mode	    = __cpu_to_le32(ar->wmi.rx_decap_mode);
6824 	config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
6825 	config.bmiss_offload_max_vdev =
6826 			__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
6827 	config.roam_offload_max_vdev  =
6828 			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
6829 	config.roam_offload_max_ap_profiles =
6830 			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
6831 	config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
6832 	config.num_mcast_table_elems =
6833 			__cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
6834 
6835 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
6836 	config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
6837 	config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
6838 	config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
6839 	config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
6840 
6841 	config.rx_skip_defrag_timeout_dup_detection_check =
6842 	  __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
6843 
6844 	config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
6845 	config.gtk_offload_max_vdev =
6846 			__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
6847 	config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
6848 	config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
6849 	config.max_peer_ext_stats =
6850 			__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
6851 	config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
6852 
6853 	config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
6854 	config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
6855 	config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
6856 	config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
6857 
6858 	config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
6859 	config.tt_support =
6860 			__cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
6861 	config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
6862 	config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
6863 	config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
6864 
6865 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6866 						   ar->wmi.num_mem_chunks));
6867 	if (!buf)
6868 		return ERR_PTR(-ENOMEM);
6869 
6870 	cmd = (struct wmi_init_cmd_10_4 *)buf->data;
6871 	memcpy(&cmd->resource_config, &config, sizeof(config));
6872 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6873 
6874 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
6875 	return buf;
6876 }
6877 
ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg * arg)6878 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
6879 {
6880 	if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
6881 		return -EINVAL;
6882 	if (arg->n_channels > ARRAY_SIZE(arg->channels))
6883 		return -EINVAL;
6884 	if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
6885 		return -EINVAL;
6886 	if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
6887 		return -EINVAL;
6888 
6889 	return 0;
6890 }
6891 
6892 static size_t
ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg * arg)6893 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
6894 {
6895 	int len = 0;
6896 
6897 	if (arg->ie_len) {
6898 		len += sizeof(struct wmi_ie_data);
6899 		len += roundup(arg->ie_len, 4);
6900 	}
6901 
6902 	if (arg->n_channels) {
6903 		len += sizeof(struct wmi_chan_list);
6904 		len += sizeof(__le32) * arg->n_channels;
6905 	}
6906 
6907 	if (arg->n_ssids) {
6908 		len += sizeof(struct wmi_ssid_list);
6909 		len += sizeof(struct wmi_ssid) * arg->n_ssids;
6910 	}
6911 
6912 	if (arg->n_bssids) {
6913 		len += sizeof(struct wmi_bssid_list);
6914 		len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6915 	}
6916 
6917 	return len;
6918 }
6919 
ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common * cmn,const struct wmi_start_scan_arg * arg)6920 void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
6921 				      const struct wmi_start_scan_arg *arg)
6922 {
6923 	u32 scan_id;
6924 	u32 scan_req_id;
6925 
6926 	scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
6927 	scan_id |= arg->scan_id;
6928 
6929 	scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
6930 	scan_req_id |= arg->scan_req_id;
6931 
6932 	cmn->scan_id            = __cpu_to_le32(scan_id);
6933 	cmn->scan_req_id        = __cpu_to_le32(scan_req_id);
6934 	cmn->vdev_id            = __cpu_to_le32(arg->vdev_id);
6935 	cmn->scan_priority      = __cpu_to_le32(arg->scan_priority);
6936 	cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
6937 	cmn->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
6938 	cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
6939 	cmn->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
6940 	cmn->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
6941 	cmn->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
6942 	cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
6943 	cmn->idle_time          = __cpu_to_le32(arg->idle_time);
6944 	cmn->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
6945 	cmn->probe_delay        = __cpu_to_le32(arg->probe_delay);
6946 	cmn->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
6947 }
6948 
6949 static void
ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs * tlvs,const struct wmi_start_scan_arg * arg)6950 ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
6951 			       const struct wmi_start_scan_arg *arg)
6952 {
6953 	struct wmi_ie_data *ie;
6954 	struct wmi_chan_list *channels;
6955 	struct wmi_ssid_list *ssids;
6956 	struct wmi_bssid_list *bssids;
6957 	void *ptr = tlvs->tlvs;
6958 	int i;
6959 
6960 	if (arg->n_channels) {
6961 		channels = ptr;
6962 		channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
6963 		channels->num_chan = __cpu_to_le32(arg->n_channels);
6964 
6965 		for (i = 0; i < arg->n_channels; i++)
6966 			channels->channel_list[i].freq =
6967 				__cpu_to_le16(arg->channels[i]);
6968 
6969 		ptr += sizeof(*channels);
6970 		ptr += sizeof(__le32) * arg->n_channels;
6971 	}
6972 
6973 	if (arg->n_ssids) {
6974 		ssids = ptr;
6975 		ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
6976 		ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
6977 
6978 		for (i = 0; i < arg->n_ssids; i++) {
6979 			ssids->ssids[i].ssid_len =
6980 				__cpu_to_le32(arg->ssids[i].len);
6981 			memcpy(&ssids->ssids[i].ssid,
6982 			       arg->ssids[i].ssid,
6983 			       arg->ssids[i].len);
6984 		}
6985 
6986 		ptr += sizeof(*ssids);
6987 		ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
6988 	}
6989 
6990 	if (arg->n_bssids) {
6991 		bssids = ptr;
6992 		bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
6993 		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
6994 
6995 		for (i = 0; i < arg->n_bssids; i++)
6996 			ether_addr_copy(bssids->bssid_list[i].addr,
6997 					arg->bssids[i].bssid);
6998 
6999 		ptr += sizeof(*bssids);
7000 		ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
7001 	}
7002 
7003 	if (arg->ie_len) {
7004 		ie = ptr;
7005 		ie->tag = __cpu_to_le32(WMI_IE_TAG);
7006 		ie->ie_len = __cpu_to_le32(arg->ie_len);
7007 		memcpy(ie->ie_data, arg->ie, arg->ie_len);
7008 
7009 		ptr += sizeof(*ie);
7010 		ptr += roundup(arg->ie_len, 4);
7011 	}
7012 }
7013 
7014 static struct sk_buff *
ath10k_wmi_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7015 ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
7016 			     const struct wmi_start_scan_arg *arg)
7017 {
7018 	struct wmi_start_scan_cmd *cmd;
7019 	struct sk_buff *skb;
7020 	size_t len;
7021 	int ret;
7022 
7023 	ret = ath10k_wmi_start_scan_verify(arg);
7024 	if (ret)
7025 		return ERR_PTR(ret);
7026 
7027 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7028 	skb = ath10k_wmi_alloc_skb(ar, len);
7029 	if (!skb)
7030 		return ERR_PTR(-ENOMEM);
7031 
7032 	cmd = (struct wmi_start_scan_cmd *)skb->data;
7033 
7034 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7035 	ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
7036 
7037 	cmd->burst_duration_ms = __cpu_to_le32(0);
7038 
7039 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
7040 	return skb;
7041 }
7042 
7043 static struct sk_buff *
ath10k_wmi_10x_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7044 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
7045 				 const struct wmi_start_scan_arg *arg)
7046 {
7047 	struct wmi_10x_start_scan_cmd *cmd;
7048 	struct sk_buff *skb;
7049 	size_t len;
7050 	int ret;
7051 
7052 	ret = ath10k_wmi_start_scan_verify(arg);
7053 	if (ret)
7054 		return ERR_PTR(ret);
7055 
7056 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7057 	skb = ath10k_wmi_alloc_skb(ar, len);
7058 	if (!skb)
7059 		return ERR_PTR(-ENOMEM);
7060 
7061 	cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
7062 
7063 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7064 	ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
7065 
7066 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
7067 	return skb;
7068 }
7069 
ath10k_wmi_start_scan_init(struct ath10k * ar,struct wmi_start_scan_arg * arg)7070 void ath10k_wmi_start_scan_init(struct ath10k *ar,
7071 				struct wmi_start_scan_arg *arg)
7072 {
7073 	/* setup commonly used values */
7074 	arg->scan_req_id = 1;
7075 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
7076 	arg->dwell_time_active = 50;
7077 	arg->dwell_time_passive = 150;
7078 	arg->min_rest_time = 50;
7079 	arg->max_rest_time = 500;
7080 	arg->repeat_probe_time = 0;
7081 	arg->probe_spacing_time = 0;
7082 	arg->idle_time = 0;
7083 	arg->max_scan_time = 20000;
7084 	arg->probe_delay = 5;
7085 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
7086 		| WMI_SCAN_EVENT_COMPLETED
7087 		| WMI_SCAN_EVENT_BSS_CHANNEL
7088 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
7089 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
7090 		| WMI_SCAN_EVENT_DEQUEUED;
7091 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
7092 	arg->n_bssids = 1;
7093 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
7094 }
7095 
7096 static struct sk_buff *
ath10k_wmi_op_gen_stop_scan(struct ath10k * ar,const struct wmi_stop_scan_arg * arg)7097 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
7098 			    const struct wmi_stop_scan_arg *arg)
7099 {
7100 	struct wmi_stop_scan_cmd *cmd;
7101 	struct sk_buff *skb;
7102 	u32 scan_id;
7103 	u32 req_id;
7104 
7105 	if (arg->req_id > 0xFFF)
7106 		return ERR_PTR(-EINVAL);
7107 	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
7108 		return ERR_PTR(-EINVAL);
7109 
7110 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7111 	if (!skb)
7112 		return ERR_PTR(-ENOMEM);
7113 
7114 	scan_id = arg->u.scan_id;
7115 	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
7116 
7117 	req_id = arg->req_id;
7118 	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
7119 
7120 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
7121 	cmd->req_type    = __cpu_to_le32(arg->req_type);
7122 	cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
7123 	cmd->scan_id     = __cpu_to_le32(scan_id);
7124 	cmd->scan_req_id = __cpu_to_le32(req_id);
7125 
7126 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7127 		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
7128 		   arg->req_id, arg->req_type, arg->u.scan_id);
7129 	return skb;
7130 }
7131 
7132 static struct sk_buff *
ath10k_wmi_op_gen_vdev_create(struct ath10k * ar,u32 vdev_id,enum wmi_vdev_type type,enum wmi_vdev_subtype subtype,const u8 macaddr[ETH_ALEN])7133 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
7134 			      enum wmi_vdev_type type,
7135 			      enum wmi_vdev_subtype subtype,
7136 			      const u8 macaddr[ETH_ALEN])
7137 {
7138 	struct wmi_vdev_create_cmd *cmd;
7139 	struct sk_buff *skb;
7140 
7141 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7142 	if (!skb)
7143 		return ERR_PTR(-ENOMEM);
7144 
7145 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
7146 	cmd->vdev_id      = __cpu_to_le32(vdev_id);
7147 	cmd->vdev_type    = __cpu_to_le32(type);
7148 	cmd->vdev_subtype = __cpu_to_le32(subtype);
7149 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
7150 
7151 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7152 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
7153 		   vdev_id, type, subtype, macaddr);
7154 	return skb;
7155 }
7156 
7157 static struct sk_buff *
ath10k_wmi_op_gen_vdev_delete(struct ath10k * ar,u32 vdev_id)7158 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
7159 {
7160 	struct wmi_vdev_delete_cmd *cmd;
7161 	struct sk_buff *skb;
7162 
7163 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7164 	if (!skb)
7165 		return ERR_PTR(-ENOMEM);
7166 
7167 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
7168 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7169 
7170 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7171 		   "WMI vdev delete id %d\n", vdev_id);
7172 	return skb;
7173 }
7174 
7175 static struct sk_buff *
ath10k_wmi_op_gen_vdev_start(struct ath10k * ar,const struct wmi_vdev_start_request_arg * arg,bool restart)7176 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
7177 			     const struct wmi_vdev_start_request_arg *arg,
7178 			     bool restart)
7179 {
7180 	struct wmi_vdev_start_request_cmd *cmd;
7181 	struct sk_buff *skb;
7182 	const char *cmdname;
7183 	u32 flags = 0;
7184 
7185 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
7186 		return ERR_PTR(-EINVAL);
7187 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
7188 		return ERR_PTR(-EINVAL);
7189 
7190 	if (restart)
7191 		cmdname = "restart";
7192 	else
7193 		cmdname = "start";
7194 
7195 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7196 	if (!skb)
7197 		return ERR_PTR(-ENOMEM);
7198 
7199 	if (arg->hidden_ssid)
7200 		flags |= WMI_VDEV_START_HIDDEN_SSID;
7201 	if (arg->pmf_enabled)
7202 		flags |= WMI_VDEV_START_PMF_ENABLED;
7203 
7204 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
7205 	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
7206 	cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
7207 	cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
7208 	cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
7209 	cmd->flags           = __cpu_to_le32(flags);
7210 	cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
7211 	cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
7212 
7213 	if (arg->ssid) {
7214 		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
7215 		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
7216 	}
7217 
7218 	ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
7219 
7220 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7221 		   "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
7222 		   cmdname, arg->vdev_id,
7223 		   flags, arg->channel.freq, arg->channel.mode,
7224 		   cmd->chan.flags, arg->channel.max_power);
7225 
7226 	return skb;
7227 }
7228 
7229 static struct sk_buff *
ath10k_wmi_op_gen_vdev_stop(struct ath10k * ar,u32 vdev_id)7230 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
7231 {
7232 	struct wmi_vdev_stop_cmd *cmd;
7233 	struct sk_buff *skb;
7234 
7235 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7236 	if (!skb)
7237 		return ERR_PTR(-ENOMEM);
7238 
7239 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
7240 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7241 
7242 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
7243 	return skb;
7244 }
7245 
7246 static struct sk_buff *
ath10k_wmi_op_gen_vdev_up(struct ath10k * ar,u32 vdev_id,u32 aid,const u8 * bssid)7247 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
7248 			  const u8 *bssid)
7249 {
7250 	struct wmi_vdev_up_cmd *cmd;
7251 	struct sk_buff *skb;
7252 
7253 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7254 	if (!skb)
7255 		return ERR_PTR(-ENOMEM);
7256 
7257 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
7258 	cmd->vdev_id       = __cpu_to_le32(vdev_id);
7259 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
7260 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
7261 
7262 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7263 		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
7264 		   vdev_id, aid, bssid);
7265 	return skb;
7266 }
7267 
7268 static struct sk_buff *
ath10k_wmi_op_gen_vdev_down(struct ath10k * ar,u32 vdev_id)7269 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
7270 {
7271 	struct wmi_vdev_down_cmd *cmd;
7272 	struct sk_buff *skb;
7273 
7274 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7275 	if (!skb)
7276 		return ERR_PTR(-ENOMEM);
7277 
7278 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
7279 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7280 
7281 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7282 		   "wmi mgmt vdev down id 0x%x\n", vdev_id);
7283 	return skb;
7284 }
7285 
7286 static struct sk_buff *
ath10k_wmi_op_gen_vdev_set_param(struct ath10k * ar,u32 vdev_id,u32 param_id,u32 param_value)7287 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
7288 				 u32 param_id, u32 param_value)
7289 {
7290 	struct wmi_vdev_set_param_cmd *cmd;
7291 	struct sk_buff *skb;
7292 
7293 	if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
7294 		ath10k_dbg(ar, ATH10K_DBG_WMI,
7295 			   "vdev param %d not supported by firmware\n",
7296 			    param_id);
7297 		return ERR_PTR(-EOPNOTSUPP);
7298 	}
7299 
7300 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7301 	if (!skb)
7302 		return ERR_PTR(-ENOMEM);
7303 
7304 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
7305 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7306 	cmd->param_id    = __cpu_to_le32(param_id);
7307 	cmd->param_value = __cpu_to_le32(param_value);
7308 
7309 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7310 		   "wmi vdev id 0x%x set param %d value %d\n",
7311 		   vdev_id, param_id, param_value);
7312 	return skb;
7313 }
7314 
7315 static struct sk_buff *
ath10k_wmi_op_gen_vdev_install_key(struct ath10k * ar,const struct wmi_vdev_install_key_arg * arg)7316 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
7317 				   const struct wmi_vdev_install_key_arg *arg)
7318 {
7319 	struct wmi_vdev_install_key_cmd *cmd;
7320 	struct sk_buff *skb;
7321 
7322 	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
7323 		return ERR_PTR(-EINVAL);
7324 	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
7325 		return ERR_PTR(-EINVAL);
7326 
7327 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
7328 	if (!skb)
7329 		return ERR_PTR(-ENOMEM);
7330 
7331 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
7332 	cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
7333 	cmd->key_idx       = __cpu_to_le32(arg->key_idx);
7334 	cmd->key_flags     = __cpu_to_le32(arg->key_flags);
7335 	cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
7336 	cmd->key_len       = __cpu_to_le32(arg->key_len);
7337 	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
7338 	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
7339 
7340 	if (arg->macaddr)
7341 		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
7342 	if (arg->key_data)
7343 		memcpy(cmd->key_data, arg->key_data, arg->key_len);
7344 
7345 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7346 		   "wmi vdev install key idx %d cipher %d len %d\n",
7347 		   arg->key_idx, arg->key_cipher, arg->key_len);
7348 	return skb;
7349 }
7350 
7351 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k * ar,const struct wmi_vdev_spectral_conf_arg * arg)7352 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
7353 				     const struct wmi_vdev_spectral_conf_arg *arg)
7354 {
7355 	struct wmi_vdev_spectral_conf_cmd *cmd;
7356 	struct sk_buff *skb;
7357 
7358 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7359 	if (!skb)
7360 		return ERR_PTR(-ENOMEM);
7361 
7362 	cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
7363 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7364 	cmd->scan_count = __cpu_to_le32(arg->scan_count);
7365 	cmd->scan_period = __cpu_to_le32(arg->scan_period);
7366 	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
7367 	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
7368 	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
7369 	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
7370 	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
7371 	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
7372 	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
7373 	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
7374 	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
7375 	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
7376 	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
7377 	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
7378 	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
7379 	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
7380 	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
7381 	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
7382 
7383 	return skb;
7384 }
7385 
7386 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k * ar,u32 vdev_id,u32 trigger,u32 enable)7387 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
7388 				       u32 trigger, u32 enable)
7389 {
7390 	struct wmi_vdev_spectral_enable_cmd *cmd;
7391 	struct sk_buff *skb;
7392 
7393 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7394 	if (!skb)
7395 		return ERR_PTR(-ENOMEM);
7396 
7397 	cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
7398 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7399 	cmd->trigger_cmd = __cpu_to_le32(trigger);
7400 	cmd->enable_cmd = __cpu_to_le32(enable);
7401 
7402 	return skb;
7403 }
7404 
7405 static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],enum wmi_peer_type peer_type)7406 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
7407 			      const u8 peer_addr[ETH_ALEN],
7408 			      enum wmi_peer_type peer_type)
7409 {
7410 	struct wmi_peer_create_cmd *cmd;
7411 	struct sk_buff *skb;
7412 
7413 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7414 	if (!skb)
7415 		return ERR_PTR(-ENOMEM);
7416 
7417 	cmd = (struct wmi_peer_create_cmd *)skb->data;
7418 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7419 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7420 	cmd->peer_type = __cpu_to_le32(peer_type);
7421 
7422 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7423 		   "wmi peer create vdev_id %d peer_addr %pM\n",
7424 		   vdev_id, peer_addr);
7425 	return skb;
7426 }
7427 
7428 static struct sk_buff *
ath10k_wmi_op_gen_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN])7429 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
7430 			      const u8 peer_addr[ETH_ALEN])
7431 {
7432 	struct wmi_peer_delete_cmd *cmd;
7433 	struct sk_buff *skb;
7434 
7435 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7436 	if (!skb)
7437 		return ERR_PTR(-ENOMEM);
7438 
7439 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
7440 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7441 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7442 
7443 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7444 		   "wmi peer delete vdev_id %d peer_addr %pM\n",
7445 		   vdev_id, peer_addr);
7446 	return skb;
7447 }
7448 
7449 static struct sk_buff *
ath10k_wmi_op_gen_peer_flush(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],u32 tid_bitmap)7450 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
7451 			     const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
7452 {
7453 	struct wmi_peer_flush_tids_cmd *cmd;
7454 	struct sk_buff *skb;
7455 
7456 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7457 	if (!skb)
7458 		return ERR_PTR(-ENOMEM);
7459 
7460 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
7461 	cmd->vdev_id         = __cpu_to_le32(vdev_id);
7462 	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
7463 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7464 
7465 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7466 		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
7467 		   vdev_id, peer_addr, tid_bitmap);
7468 	return skb;
7469 }
7470 
7471 static struct sk_buff *
ath10k_wmi_op_gen_peer_set_param(struct ath10k * ar,u32 vdev_id,const u8 * peer_addr,enum wmi_peer_param param_id,u32 param_value)7472 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
7473 				 const u8 *peer_addr,
7474 				 enum wmi_peer_param param_id,
7475 				 u32 param_value)
7476 {
7477 	struct wmi_peer_set_param_cmd *cmd;
7478 	struct sk_buff *skb;
7479 
7480 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7481 	if (!skb)
7482 		return ERR_PTR(-ENOMEM);
7483 
7484 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
7485 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7486 	cmd->param_id    = __cpu_to_le32(param_id);
7487 	cmd->param_value = __cpu_to_le32(param_value);
7488 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7489 
7490 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7491 		   "wmi vdev %d peer 0x%pM set param %d value %d\n",
7492 		   vdev_id, peer_addr, param_id, param_value);
7493 	return skb;
7494 }
7495 
7496 static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k * ar,u32 vdev_id,enum wmi_sta_ps_mode psmode)7497 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
7498 			     enum wmi_sta_ps_mode psmode)
7499 {
7500 	struct wmi_sta_powersave_mode_cmd *cmd;
7501 	struct sk_buff *skb;
7502 
7503 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7504 	if (!skb)
7505 		return ERR_PTR(-ENOMEM);
7506 
7507 	cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
7508 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7509 	cmd->sta_ps_mode = __cpu_to_le32(psmode);
7510 
7511 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7512 		   "wmi set powersave id 0x%x mode %d\n",
7513 		   vdev_id, psmode);
7514 	return skb;
7515 }
7516 
7517 static struct sk_buff *
ath10k_wmi_op_gen_set_sta_ps(struct ath10k * ar,u32 vdev_id,enum wmi_sta_powersave_param param_id,u32 value)7518 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
7519 			     enum wmi_sta_powersave_param param_id,
7520 			     u32 value)
7521 {
7522 	struct wmi_sta_powersave_param_cmd *cmd;
7523 	struct sk_buff *skb;
7524 
7525 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7526 	if (!skb)
7527 		return ERR_PTR(-ENOMEM);
7528 
7529 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
7530 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7531 	cmd->param_id    = __cpu_to_le32(param_id);
7532 	cmd->param_value = __cpu_to_le32(value);
7533 
7534 	ath10k_dbg(ar, ATH10K_DBG_STA,
7535 		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
7536 		   vdev_id, param_id, value);
7537 	return skb;
7538 }
7539 
7540 static struct sk_buff *
ath10k_wmi_op_gen_set_ap_ps(struct ath10k * ar,u32 vdev_id,const u8 * mac,enum wmi_ap_ps_peer_param param_id,u32 value)7541 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7542 			    enum wmi_ap_ps_peer_param param_id, u32 value)
7543 {
7544 	struct wmi_ap_ps_peer_cmd *cmd;
7545 	struct sk_buff *skb;
7546 
7547 	if (!mac)
7548 		return ERR_PTR(-EINVAL);
7549 
7550 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7551 	if (!skb)
7552 		return ERR_PTR(-ENOMEM);
7553 
7554 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
7555 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7556 	cmd->param_id = __cpu_to_le32(param_id);
7557 	cmd->param_value = __cpu_to_le32(value);
7558 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
7559 
7560 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7561 		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
7562 		   vdev_id, param_id, value, mac);
7563 	return skb;
7564 }
7565 
7566 static struct sk_buff *
ath10k_wmi_op_gen_scan_chan_list(struct ath10k * ar,const struct wmi_scan_chan_list_arg * arg)7567 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
7568 				 const struct wmi_scan_chan_list_arg *arg)
7569 {
7570 	struct wmi_scan_chan_list_cmd *cmd;
7571 	struct sk_buff *skb;
7572 	struct wmi_channel_arg *ch;
7573 	struct wmi_channel *ci;
7574 	int i;
7575 
7576 	skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels));
7577 	if (!skb)
7578 		return ERR_PTR(-EINVAL);
7579 
7580 	cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
7581 	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
7582 
7583 	for (i = 0; i < arg->n_channels; i++) {
7584 		ch = &arg->channels[i];
7585 		ci = &cmd->chan_info[i];
7586 
7587 		ath10k_wmi_put_wmi_channel(ar, ci, ch);
7588 	}
7589 
7590 	return skb;
7591 }
7592 
7593 static void
ath10k_wmi_peer_assoc_fill(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7594 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
7595 			   const struct wmi_peer_assoc_complete_arg *arg)
7596 {
7597 	struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
7598 
7599 	cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
7600 	cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
7601 	cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
7602 	cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
7603 	cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
7604 	cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
7605 	cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
7606 	cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
7607 	cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
7608 	cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
7609 	cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
7610 	cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
7611 	cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
7612 
7613 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
7614 
7615 	cmd->peer_legacy_rates.num_rates =
7616 		__cpu_to_le32(arg->peer_legacy_rates.num_rates);
7617 	memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
7618 	       arg->peer_legacy_rates.num_rates);
7619 
7620 	cmd->peer_ht_rates.num_rates =
7621 		__cpu_to_le32(arg->peer_ht_rates.num_rates);
7622 	memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
7623 	       arg->peer_ht_rates.num_rates);
7624 
7625 	cmd->peer_vht_rates.rx_max_rate =
7626 		__cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
7627 	cmd->peer_vht_rates.rx_mcs_set =
7628 		__cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
7629 	cmd->peer_vht_rates.tx_max_rate =
7630 		__cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
7631 	cmd->peer_vht_rates.tx_mcs_set =
7632 		__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
7633 }
7634 
7635 static void
ath10k_wmi_peer_assoc_fill_main(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7636 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
7637 				const struct wmi_peer_assoc_complete_arg *arg)
7638 {
7639 	struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
7640 
7641 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7642 	memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
7643 }
7644 
7645 static void
ath10k_wmi_peer_assoc_fill_10_1(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7646 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
7647 				const struct wmi_peer_assoc_complete_arg *arg)
7648 {
7649 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7650 }
7651 
7652 static void
ath10k_wmi_peer_assoc_fill_10_2(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7653 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
7654 				const struct wmi_peer_assoc_complete_arg *arg)
7655 {
7656 	struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
7657 	int max_mcs, max_nss;
7658 	u32 info0;
7659 
7660 	/* TODO: Is using max values okay with firmware? */
7661 	max_mcs = 0xf;
7662 	max_nss = 0xf;
7663 
7664 	info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
7665 		SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
7666 
7667 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7668 	cmd->info0 = __cpu_to_le32(info0);
7669 }
7670 
7671 static void
ath10k_wmi_peer_assoc_fill_10_4(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7672 ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
7673 				const struct wmi_peer_assoc_complete_arg *arg)
7674 {
7675 	struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
7676 
7677 	ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
7678 	cmd->peer_bw_rxnss_override =
7679 		__cpu_to_le32(arg->peer_bw_rxnss_override);
7680 }
7681 
7682 static int
ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg * arg)7683 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
7684 {
7685 	if (arg->peer_mpdu_density > 16)
7686 		return -EINVAL;
7687 	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
7688 		return -EINVAL;
7689 	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
7690 		return -EINVAL;
7691 
7692 	return 0;
7693 }
7694 
7695 static struct sk_buff *
ath10k_wmi_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7696 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
7697 			     const struct wmi_peer_assoc_complete_arg *arg)
7698 {
7699 	size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
7700 	struct sk_buff *skb;
7701 	int ret;
7702 
7703 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7704 	if (ret)
7705 		return ERR_PTR(ret);
7706 
7707 	skb = ath10k_wmi_alloc_skb(ar, len);
7708 	if (!skb)
7709 		return ERR_PTR(-ENOMEM);
7710 
7711 	ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
7712 
7713 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7714 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7715 		   arg->vdev_id, arg->addr,
7716 		   arg->peer_reassoc ? "reassociate" : "new");
7717 	return skb;
7718 }
7719 
7720 static struct sk_buff *
ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7721 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
7722 				  const struct wmi_peer_assoc_complete_arg *arg)
7723 {
7724 	size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
7725 	struct sk_buff *skb;
7726 	int ret;
7727 
7728 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7729 	if (ret)
7730 		return ERR_PTR(ret);
7731 
7732 	skb = ath10k_wmi_alloc_skb(ar, len);
7733 	if (!skb)
7734 		return ERR_PTR(-ENOMEM);
7735 
7736 	ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
7737 
7738 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7739 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7740 		   arg->vdev_id, arg->addr,
7741 		   arg->peer_reassoc ? "reassociate" : "new");
7742 	return skb;
7743 }
7744 
7745 static struct sk_buff *
ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7746 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
7747 				  const struct wmi_peer_assoc_complete_arg *arg)
7748 {
7749 	size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
7750 	struct sk_buff *skb;
7751 	int ret;
7752 
7753 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7754 	if (ret)
7755 		return ERR_PTR(ret);
7756 
7757 	skb = ath10k_wmi_alloc_skb(ar, len);
7758 	if (!skb)
7759 		return ERR_PTR(-ENOMEM);
7760 
7761 	ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
7762 
7763 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7764 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7765 		   arg->vdev_id, arg->addr,
7766 		   arg->peer_reassoc ? "reassociate" : "new");
7767 	return skb;
7768 }
7769 
7770 static struct sk_buff *
ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7771 ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
7772 				  const struct wmi_peer_assoc_complete_arg *arg)
7773 {
7774 	size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
7775 	struct sk_buff *skb;
7776 	int ret;
7777 
7778 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7779 	if (ret)
7780 		return ERR_PTR(ret);
7781 
7782 	skb = ath10k_wmi_alloc_skb(ar, len);
7783 	if (!skb)
7784 		return ERR_PTR(-ENOMEM);
7785 
7786 	ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
7787 
7788 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7789 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7790 		   arg->vdev_id, arg->addr,
7791 		   arg->peer_reassoc ? "reassociate" : "new");
7792 	return skb;
7793 }
7794 
7795 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k * ar)7796 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
7797 {
7798 	struct sk_buff *skb;
7799 
7800 	skb = ath10k_wmi_alloc_skb(ar, 0);
7801 	if (!skb)
7802 		return ERR_PTR(-ENOMEM);
7803 
7804 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
7805 	return skb;
7806 }
7807 
7808 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k * ar,enum wmi_bss_survey_req_type type)7809 ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
7810 					  enum wmi_bss_survey_req_type type)
7811 {
7812 	struct wmi_pdev_chan_info_req_cmd *cmd;
7813 	struct sk_buff *skb;
7814 
7815 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7816 	if (!skb)
7817 		return ERR_PTR(-ENOMEM);
7818 
7819 	cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
7820 	cmd->type = __cpu_to_le32(type);
7821 
7822 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7823 		   "wmi pdev bss info request type %d\n", type);
7824 
7825 	return skb;
7826 }
7827 
7828 /* This function assumes the beacon is already DMA mapped */
7829 static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k * ar,u32 vdev_id,const void * bcn,size_t bcn_len,u32 bcn_paddr,bool dtim_zero,bool deliver_cab)7830 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
7831 			     size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
7832 			     bool deliver_cab)
7833 {
7834 	struct wmi_bcn_tx_ref_cmd *cmd;
7835 	struct sk_buff *skb;
7836 	struct ieee80211_hdr *hdr;
7837 	u16 fc;
7838 
7839 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7840 	if (!skb)
7841 		return ERR_PTR(-ENOMEM);
7842 
7843 	hdr = (struct ieee80211_hdr *)bcn;
7844 	fc = le16_to_cpu(hdr->frame_control);
7845 
7846 	cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
7847 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7848 	cmd->data_len = __cpu_to_le32(bcn_len);
7849 	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
7850 	cmd->msdu_id = 0;
7851 	cmd->frame_control = __cpu_to_le32(fc);
7852 	cmd->flags = 0;
7853 	cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
7854 
7855 	if (dtim_zero)
7856 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
7857 
7858 	if (deliver_cab)
7859 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
7860 
7861 	return skb;
7862 }
7863 
ath10k_wmi_set_wmm_param(struct wmi_wmm_params * params,const struct wmi_wmm_params_arg * arg)7864 void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
7865 			      const struct wmi_wmm_params_arg *arg)
7866 {
7867 	params->cwmin  = __cpu_to_le32(arg->cwmin);
7868 	params->cwmax  = __cpu_to_le32(arg->cwmax);
7869 	params->aifs   = __cpu_to_le32(arg->aifs);
7870 	params->txop   = __cpu_to_le32(arg->txop);
7871 	params->acm    = __cpu_to_le32(arg->acm);
7872 	params->no_ack = __cpu_to_le32(arg->no_ack);
7873 }
7874 
7875 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k * ar,const struct wmi_wmm_params_all_arg * arg)7876 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
7877 			       const struct wmi_wmm_params_all_arg *arg)
7878 {
7879 	struct wmi_pdev_set_wmm_params *cmd;
7880 	struct sk_buff *skb;
7881 
7882 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7883 	if (!skb)
7884 		return ERR_PTR(-ENOMEM);
7885 
7886 	cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
7887 	ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
7888 	ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
7889 	ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
7890 	ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
7891 
7892 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
7893 	return skb;
7894 }
7895 
7896 static struct sk_buff *
ath10k_wmi_op_gen_request_stats(struct ath10k * ar,u32 stats_mask)7897 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
7898 {
7899 	struct wmi_request_stats_cmd *cmd;
7900 	struct sk_buff *skb;
7901 
7902 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7903 	if (!skb)
7904 		return ERR_PTR(-ENOMEM);
7905 
7906 	cmd = (struct wmi_request_stats_cmd *)skb->data;
7907 	cmd->stats_id = __cpu_to_le32(stats_mask);
7908 
7909 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
7910 		   stats_mask);
7911 	return skb;
7912 }
7913 
7914 static struct sk_buff *
ath10k_wmi_op_gen_force_fw_hang(struct ath10k * ar,enum wmi_force_fw_hang_type type,u32 delay_ms)7915 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
7916 				enum wmi_force_fw_hang_type type, u32 delay_ms)
7917 {
7918 	struct wmi_force_fw_hang_cmd *cmd;
7919 	struct sk_buff *skb;
7920 
7921 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7922 	if (!skb)
7923 		return ERR_PTR(-ENOMEM);
7924 
7925 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
7926 	cmd->type = __cpu_to_le32(type);
7927 	cmd->delay_ms = __cpu_to_le32(delay_ms);
7928 
7929 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
7930 		   type, delay_ms);
7931 	return skb;
7932 }
7933 
7934 static struct sk_buff *
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7935 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7936 			     u32 log_level)
7937 {
7938 	struct wmi_dbglog_cfg_cmd *cmd;
7939 	struct sk_buff *skb;
7940 	u32 cfg;
7941 
7942 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7943 	if (!skb)
7944 		return ERR_PTR(-ENOMEM);
7945 
7946 	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
7947 
7948 	if (module_enable) {
7949 		cfg = SM(log_level,
7950 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7951 	} else {
7952 		/* set back defaults, all modules with WARN level */
7953 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7954 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7955 		module_enable = ~0;
7956 	}
7957 
7958 	cmd->module_enable = __cpu_to_le32(module_enable);
7959 	cmd->module_valid = __cpu_to_le32(~0);
7960 	cmd->config_enable = __cpu_to_le32(cfg);
7961 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
7962 
7963 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7964 		   "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
7965 		   __le32_to_cpu(cmd->module_enable),
7966 		   __le32_to_cpu(cmd->module_valid),
7967 		   __le32_to_cpu(cmd->config_enable),
7968 		   __le32_to_cpu(cmd->config_valid));
7969 	return skb;
7970 }
7971 
7972 static struct sk_buff *
ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7973 ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7974 				  u32 log_level)
7975 {
7976 	struct wmi_10_4_dbglog_cfg_cmd *cmd;
7977 	struct sk_buff *skb;
7978 	u32 cfg;
7979 
7980 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7981 	if (!skb)
7982 		return ERR_PTR(-ENOMEM);
7983 
7984 	cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
7985 
7986 	if (module_enable) {
7987 		cfg = SM(log_level,
7988 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7989 	} else {
7990 		/* set back defaults, all modules with WARN level */
7991 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7992 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7993 		module_enable = ~0;
7994 	}
7995 
7996 	cmd->module_enable = __cpu_to_le64(module_enable);
7997 	cmd->module_valid = __cpu_to_le64(~0);
7998 	cmd->config_enable = __cpu_to_le32(cfg);
7999 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
8000 
8001 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8002 		   "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
8003 		   __le64_to_cpu(cmd->module_enable),
8004 		   __le64_to_cpu(cmd->module_valid),
8005 		   __le32_to_cpu(cmd->config_enable),
8006 		   __le32_to_cpu(cmd->config_valid));
8007 	return skb;
8008 }
8009 
8010 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k * ar,u32 ev_bitmap)8011 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
8012 {
8013 	struct wmi_pdev_pktlog_enable_cmd *cmd;
8014 	struct sk_buff *skb;
8015 
8016 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8017 	if (!skb)
8018 		return ERR_PTR(-ENOMEM);
8019 
8020 	ev_bitmap &= ATH10K_PKTLOG_ANY;
8021 
8022 	cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
8023 	cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
8024 
8025 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
8026 		   ev_bitmap);
8027 	return skb;
8028 }
8029 
8030 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_disable(struct ath10k * ar)8031 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
8032 {
8033 	struct sk_buff *skb;
8034 
8035 	skb = ath10k_wmi_alloc_skb(ar, 0);
8036 	if (!skb)
8037 		return ERR_PTR(-ENOMEM);
8038 
8039 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
8040 	return skb;
8041 }
8042 
8043 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k * ar,u32 period,u32 duration,u32 next_offset,u32 enabled)8044 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
8045 				      u32 duration, u32 next_offset,
8046 				      u32 enabled)
8047 {
8048 	struct wmi_pdev_set_quiet_cmd *cmd;
8049 	struct sk_buff *skb;
8050 
8051 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8052 	if (!skb)
8053 		return ERR_PTR(-ENOMEM);
8054 
8055 	cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
8056 	cmd->period = __cpu_to_le32(period);
8057 	cmd->duration = __cpu_to_le32(duration);
8058 	cmd->next_start = __cpu_to_le32(next_offset);
8059 	cmd->enabled = __cpu_to_le32(enabled);
8060 
8061 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8062 		   "wmi quiet param: period %u duration %u enabled %d\n",
8063 		   period, duration, enabled);
8064 	return skb;
8065 }
8066 
8067 static struct sk_buff *
ath10k_wmi_op_gen_addba_clear_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac)8068 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
8069 				   const u8 *mac)
8070 {
8071 	struct wmi_addba_clear_resp_cmd *cmd;
8072 	struct sk_buff *skb;
8073 
8074 	if (!mac)
8075 		return ERR_PTR(-EINVAL);
8076 
8077 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8078 	if (!skb)
8079 		return ERR_PTR(-ENOMEM);
8080 
8081 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
8082 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8083 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8084 
8085 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8086 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
8087 		   vdev_id, mac);
8088 	return skb;
8089 }
8090 
8091 static struct sk_buff *
ath10k_wmi_op_gen_addba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)8092 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8093 			     u32 tid, u32 buf_size)
8094 {
8095 	struct wmi_addba_send_cmd *cmd;
8096 	struct sk_buff *skb;
8097 
8098 	if (!mac)
8099 		return ERR_PTR(-EINVAL);
8100 
8101 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8102 	if (!skb)
8103 		return ERR_PTR(-ENOMEM);
8104 
8105 	cmd = (struct wmi_addba_send_cmd *)skb->data;
8106 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8107 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8108 	cmd->tid = __cpu_to_le32(tid);
8109 	cmd->buffersize = __cpu_to_le32(buf_size);
8110 
8111 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8112 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
8113 		   vdev_id, mac, tid, buf_size);
8114 	return skb;
8115 }
8116 
8117 static struct sk_buff *
ath10k_wmi_op_gen_addba_set_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)8118 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8119 				 u32 tid, u32 status)
8120 {
8121 	struct wmi_addba_setresponse_cmd *cmd;
8122 	struct sk_buff *skb;
8123 
8124 	if (!mac)
8125 		return ERR_PTR(-EINVAL);
8126 
8127 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8128 	if (!skb)
8129 		return ERR_PTR(-ENOMEM);
8130 
8131 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
8132 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8133 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8134 	cmd->tid = __cpu_to_le32(tid);
8135 	cmd->statuscode = __cpu_to_le32(status);
8136 
8137 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8138 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
8139 		   vdev_id, mac, tid, status);
8140 	return skb;
8141 }
8142 
8143 static struct sk_buff *
ath10k_wmi_op_gen_delba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)8144 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8145 			     u32 tid, u32 initiator, u32 reason)
8146 {
8147 	struct wmi_delba_send_cmd *cmd;
8148 	struct sk_buff *skb;
8149 
8150 	if (!mac)
8151 		return ERR_PTR(-EINVAL);
8152 
8153 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8154 	if (!skb)
8155 		return ERR_PTR(-ENOMEM);
8156 
8157 	cmd = (struct wmi_delba_send_cmd *)skb->data;
8158 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8159 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8160 	cmd->tid = __cpu_to_le32(tid);
8161 	cmd->initiator = __cpu_to_le32(initiator);
8162 	cmd->reasoncode = __cpu_to_le32(reason);
8163 
8164 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8165 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
8166 		   vdev_id, mac, tid, initiator, reason);
8167 	return skb;
8168 }
8169 
8170 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k * ar,u32 param)8171 ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
8172 {
8173 	struct wmi_pdev_get_tpc_config_cmd *cmd;
8174 	struct sk_buff *skb;
8175 
8176 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8177 	if (!skb)
8178 		return ERR_PTR(-ENOMEM);
8179 
8180 	cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
8181 	cmd->param = __cpu_to_le32(param);
8182 
8183 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8184 		   "wmi pdev get tpc config param %d\n", param);
8185 	return skb;
8186 }
8187 
8188 static void
ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8189 ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8190 				   char *buf, u32 *length)
8191 {
8192 	u32 len = *length;
8193 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8194 
8195 	len += scnprintf(buf + len, buf_len - len, "\n");
8196 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
8197 			"ath10k PDEV stats");
8198 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8199 			"=================");
8200 
8201 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8202 			"Channel noise floor", pdev->ch_noise_floor);
8203 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8204 			"Channel TX power", pdev->chan_tx_power);
8205 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8206 			"TX frame count", pdev->tx_frame_count);
8207 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8208 			"RX frame count", pdev->rx_frame_count);
8209 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8210 			"RX clear count", pdev->rx_clear_count);
8211 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8212 			"Cycle count", pdev->cycle_count);
8213 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8214 			"PHY error count", pdev->phy_err_count);
8215 
8216 	*length = len;
8217 }
8218 
8219 static void
ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8220 ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8221 				    char *buf, u32 *length)
8222 {
8223 	u32 len = *length;
8224 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8225 
8226 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8227 			"RTS bad count", pdev->rts_bad);
8228 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8229 			"RTS good count", pdev->rts_good);
8230 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8231 			"FCS bad count", pdev->fcs_bad);
8232 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8233 			"No beacon count", pdev->no_beacons);
8234 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8235 			"MIB int count", pdev->mib_int_count);
8236 
8237 	len += scnprintf(buf + len, buf_len - len, "\n");
8238 	*length = len;
8239 }
8240 
8241 static void
ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8242 ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8243 				 char *buf, u32 *length)
8244 {
8245 	u32 len = *length;
8246 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8247 
8248 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8249 			 "ath10k PDEV TX stats");
8250 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8251 				 "=================");
8252 
8253 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8254 			 "HTT cookies queued", pdev->comp_queued);
8255 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8256 			 "HTT cookies disp.", pdev->comp_delivered);
8257 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8258 			 "MSDU queued", pdev->msdu_enqued);
8259 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8260 			 "MPDU queued", pdev->mpdu_enqued);
8261 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8262 			 "MSDUs dropped", pdev->wmm_drop);
8263 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8264 			 "Local enqued", pdev->local_enqued);
8265 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8266 			 "Local freed", pdev->local_freed);
8267 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8268 			 "HW queued", pdev->hw_queued);
8269 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8270 			 "PPDUs reaped", pdev->hw_reaped);
8271 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8272 			 "Num underruns", pdev->underrun);
8273 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8274 			 "PPDUs cleaned", pdev->tx_abort);
8275 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8276 			 "MPDUs requeued", pdev->mpdus_requeued);
8277 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8278 			 "Excessive retries", pdev->tx_ko);
8279 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8280 			 "HW rate", pdev->data_rc);
8281 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8282 			 "Sched self triggers", pdev->self_triggers);
8283 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8284 			 "Dropped due to SW retries",
8285 			 pdev->sw_retry_failure);
8286 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8287 			 "Illegal rate phy errors",
8288 			 pdev->illgl_rate_phy_err);
8289 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8290 			 "Pdev continuous xretry", pdev->pdev_cont_xretry);
8291 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8292 			 "TX timeout", pdev->pdev_tx_timeout);
8293 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8294 			 "PDEV resets", pdev->pdev_resets);
8295 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8296 			 "PHY underrun", pdev->phy_underrun);
8297 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8298 			 "MPDU is more than txop limit", pdev->txop_ovf);
8299 	*length = len;
8300 }
8301 
8302 static void
ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8303 ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8304 				 char *buf, u32 *length)
8305 {
8306 	u32 len = *length;
8307 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8308 
8309 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8310 			 "ath10k PDEV RX stats");
8311 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8312 				 "=================");
8313 
8314 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8315 			 "Mid PPDU route change",
8316 			 pdev->mid_ppdu_route_change);
8317 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8318 			 "Tot. number of statuses", pdev->status_rcvd);
8319 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8320 			 "Extra frags on rings 0", pdev->r0_frags);
8321 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8322 			 "Extra frags on rings 1", pdev->r1_frags);
8323 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8324 			 "Extra frags on rings 2", pdev->r2_frags);
8325 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8326 			 "Extra frags on rings 3", pdev->r3_frags);
8327 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8328 			 "MSDUs delivered to HTT", pdev->htt_msdus);
8329 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8330 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
8331 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8332 			 "MSDUs delivered to stack", pdev->loc_msdus);
8333 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8334 			 "MPDUs delivered to stack", pdev->loc_mpdus);
8335 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8336 			 "Oversized AMSDUs", pdev->oversize_amsdu);
8337 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8338 			 "PHY errors", pdev->phy_errs);
8339 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8340 			 "PHY errors drops", pdev->phy_err_drop);
8341 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8342 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
8343 	*length = len;
8344 }
8345 
8346 static void
ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev * vdev,char * buf,u32 * length)8347 ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
8348 			      char *buf, u32 *length)
8349 {
8350 	u32 len = *length;
8351 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8352 	int i;
8353 
8354 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8355 			"vdev id", vdev->vdev_id);
8356 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8357 			"beacon snr", vdev->beacon_snr);
8358 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8359 			"data snr", vdev->data_snr);
8360 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8361 			"num rx frames", vdev->num_rx_frames);
8362 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8363 			"num rts fail", vdev->num_rts_fail);
8364 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8365 			"num rts success", vdev->num_rts_success);
8366 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8367 			"num rx err", vdev->num_rx_err);
8368 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8369 			"num rx discard", vdev->num_rx_discard);
8370 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8371 			"num tx not acked", vdev->num_tx_not_acked);
8372 
8373 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
8374 		len += scnprintf(buf + len, buf_len - len,
8375 				"%25s [%02d] %u\n",
8376 				"num tx frames", i,
8377 				vdev->num_tx_frames[i]);
8378 
8379 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
8380 		len += scnprintf(buf + len, buf_len - len,
8381 				"%25s [%02d] %u\n",
8382 				"num tx frames retries", i,
8383 				vdev->num_tx_frames_retries[i]);
8384 
8385 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
8386 		len += scnprintf(buf + len, buf_len - len,
8387 				"%25s [%02d] %u\n",
8388 				"num tx frames failures", i,
8389 				vdev->num_tx_frames_failures[i]);
8390 
8391 	for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
8392 		len += scnprintf(buf + len, buf_len - len,
8393 				"%25s [%02d] 0x%08x\n",
8394 				"tx rate history", i,
8395 				vdev->tx_rate_history[i]);
8396 
8397 	for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
8398 		len += scnprintf(buf + len, buf_len - len,
8399 				"%25s [%02d] %u\n",
8400 				"beacon rssi history", i,
8401 				vdev->beacon_rssi_history[i]);
8402 
8403 	len += scnprintf(buf + len, buf_len - len, "\n");
8404 	*length = len;
8405 }
8406 
8407 static void
ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer * peer,char * buf,u32 * length,bool extended_peer)8408 ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
8409 			      char *buf, u32 *length, bool extended_peer)
8410 {
8411 	u32 len = *length;
8412 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8413 
8414 	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8415 			"Peer MAC address", peer->peer_macaddr);
8416 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8417 			"Peer RSSI", peer->peer_rssi);
8418 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8419 			"Peer TX rate", peer->peer_tx_rate);
8420 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8421 			"Peer RX rate", peer->peer_rx_rate);
8422 	if (!extended_peer)
8423 		len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8424 				"Peer RX duration", peer->rx_duration);
8425 
8426 	len += scnprintf(buf + len, buf_len - len, "\n");
8427 	*length = len;
8428 }
8429 
8430 static void
ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer * peer,char * buf,u32 * length)8431 ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
8432 				   char *buf, u32 *length)
8433 {
8434 	u32 len = *length;
8435 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8436 
8437 	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8438 			"Peer MAC address", peer->peer_macaddr);
8439 	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8440 			"Peer RX duration", peer->rx_duration);
8441 }
8442 
ath10k_wmi_main_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8443 void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
8444 				      struct ath10k_fw_stats *fw_stats,
8445 				      char *buf)
8446 {
8447 	u32 len = 0;
8448 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8449 	const struct ath10k_fw_stats_pdev *pdev;
8450 	const struct ath10k_fw_stats_vdev *vdev;
8451 	const struct ath10k_fw_stats_peer *peer;
8452 	size_t num_peers;
8453 	size_t num_vdevs;
8454 
8455 	spin_lock_bh(&ar->data_lock);
8456 
8457 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8458 					struct ath10k_fw_stats_pdev, list);
8459 	if (!pdev) {
8460 		ath10k_warn(ar, "failed to get pdev stats\n");
8461 		goto unlock;
8462 	}
8463 
8464 	num_peers = list_count_nodes(&fw_stats->peers);
8465 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8466 
8467 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8468 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8469 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8470 
8471 	len += scnprintf(buf + len, buf_len - len, "\n");
8472 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8473 			 "ath10k VDEV stats", num_vdevs);
8474 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8475 				 "=================");
8476 
8477 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8478 		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8479 	}
8480 
8481 	len += scnprintf(buf + len, buf_len - len, "\n");
8482 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8483 			 "ath10k PEER stats", num_peers);
8484 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8485 				 "=================");
8486 
8487 	list_for_each_entry(peer, &fw_stats->peers, list) {
8488 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8489 					      fw_stats->extended);
8490 	}
8491 
8492 unlock:
8493 	spin_unlock_bh(&ar->data_lock);
8494 
8495 	if (len >= buf_len)
8496 		buf[len - 1] = 0;
8497 	else
8498 		buf[len] = 0;
8499 }
8500 
ath10k_wmi_10x_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8501 void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
8502 				     struct ath10k_fw_stats *fw_stats,
8503 				     char *buf)
8504 {
8505 	unsigned int len = 0;
8506 	unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
8507 	const struct ath10k_fw_stats_pdev *pdev;
8508 	const struct ath10k_fw_stats_vdev *vdev;
8509 	const struct ath10k_fw_stats_peer *peer;
8510 	size_t num_peers;
8511 	size_t num_vdevs;
8512 
8513 	spin_lock_bh(&ar->data_lock);
8514 
8515 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8516 					struct ath10k_fw_stats_pdev, list);
8517 	if (!pdev) {
8518 		ath10k_warn(ar, "failed to get pdev stats\n");
8519 		goto unlock;
8520 	}
8521 
8522 	num_peers = list_count_nodes(&fw_stats->peers);
8523 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8524 
8525 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8526 	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8527 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8528 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8529 
8530 	len += scnprintf(buf + len, buf_len - len, "\n");
8531 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8532 			 "ath10k VDEV stats", num_vdevs);
8533 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8534 				 "=================");
8535 
8536 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8537 		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8538 	}
8539 
8540 	len += scnprintf(buf + len, buf_len - len, "\n");
8541 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8542 			 "ath10k PEER stats", num_peers);
8543 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8544 				 "=================");
8545 
8546 	list_for_each_entry(peer, &fw_stats->peers, list) {
8547 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8548 					      fw_stats->extended);
8549 	}
8550 
8551 unlock:
8552 	spin_unlock_bh(&ar->data_lock);
8553 
8554 	if (len >= buf_len)
8555 		buf[len - 1] = 0;
8556 	else
8557 		buf[len] = 0;
8558 }
8559 
8560 static struct sk_buff *
ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k * ar,u8 enable,u32 detect_level,u32 detect_margin)8561 ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
8562 					   u32 detect_level, u32 detect_margin)
8563 {
8564 	struct wmi_pdev_set_adaptive_cca_params *cmd;
8565 	struct sk_buff *skb;
8566 
8567 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8568 	if (!skb)
8569 		return ERR_PTR(-ENOMEM);
8570 
8571 	cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
8572 	cmd->enable = __cpu_to_le32(enable);
8573 	cmd->cca_detect_level = __cpu_to_le32(detect_level);
8574 	cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
8575 
8576 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8577 		   "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
8578 		   enable, detect_level, detect_margin);
8579 	return skb;
8580 }
8581 
8582 static void
ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd * vdev,char * buf,u32 * length)8583 ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
8584 				   char *buf, u32 *length)
8585 {
8586 	u32 len = *length;
8587 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8588 	u32 val;
8589 
8590 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8591 			 "vdev id", vdev->vdev_id);
8592 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8593 			 "ppdu aggr count", vdev->ppdu_aggr_cnt);
8594 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8595 			 "ppdu noack", vdev->ppdu_noack);
8596 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8597 			 "mpdu queued", vdev->mpdu_queued);
8598 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8599 			 "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
8600 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8601 			 "mpdu sw requeued", vdev->mpdu_sw_requeued);
8602 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8603 			 "mpdu success retry", vdev->mpdu_suc_retry);
8604 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8605 			 "mpdu success multitry", vdev->mpdu_suc_multitry);
8606 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8607 			 "mpdu fail retry", vdev->mpdu_fail_retry);
8608 	val = vdev->tx_ftm_suc;
8609 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8610 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8611 				 "tx ftm success",
8612 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8613 	val = vdev->tx_ftm_suc_retry;
8614 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8615 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8616 				 "tx ftm success retry",
8617 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8618 	val = vdev->tx_ftm_fail;
8619 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8620 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8621 				 "tx ftm fail",
8622 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8623 	val = vdev->rx_ftmr_cnt;
8624 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8625 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8626 				 "rx ftm request count",
8627 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8628 	val = vdev->rx_ftmr_dup_cnt;
8629 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8630 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8631 				 "rx ftm request dup count",
8632 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8633 	val = vdev->rx_iftmr_cnt;
8634 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8635 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8636 				 "rx initial ftm req count",
8637 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8638 	val = vdev->rx_iftmr_dup_cnt;
8639 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8640 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8641 				 "rx initial ftm req dup cnt",
8642 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8643 	len += scnprintf(buf + len, buf_len - len, "\n");
8644 
8645 	*length = len;
8646 }
8647 
ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8648 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
8649 				      struct ath10k_fw_stats *fw_stats,
8650 				      char *buf)
8651 {
8652 	u32 len = 0;
8653 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8654 	const struct ath10k_fw_stats_pdev *pdev;
8655 	const struct ath10k_fw_stats_vdev_extd *vdev;
8656 	const struct ath10k_fw_stats_peer *peer;
8657 	const struct ath10k_fw_extd_stats_peer *extd_peer;
8658 	size_t num_peers;
8659 	size_t num_vdevs;
8660 
8661 	spin_lock_bh(&ar->data_lock);
8662 
8663 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8664 					struct ath10k_fw_stats_pdev, list);
8665 	if (!pdev) {
8666 		ath10k_warn(ar, "failed to get pdev stats\n");
8667 		goto unlock;
8668 	}
8669 
8670 	num_peers = list_count_nodes(&fw_stats->peers);
8671 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8672 
8673 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8674 	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8675 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8676 
8677 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8678 			"HW paused", pdev->hw_paused);
8679 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8680 			"Seqs posted", pdev->seq_posted);
8681 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8682 			"Seqs failed queueing", pdev->seq_failed_queueing);
8683 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8684 			"Seqs completed", pdev->seq_completed);
8685 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8686 			"Seqs restarted", pdev->seq_restarted);
8687 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8688 			"MU Seqs posted", pdev->mu_seq_posted);
8689 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8690 			"MPDUs SW flushed", pdev->mpdus_sw_flush);
8691 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8692 			"MPDUs HW filtered", pdev->mpdus_hw_filter);
8693 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8694 			"MPDUs truncated", pdev->mpdus_truncated);
8695 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8696 			"MPDUs receive no ACK", pdev->mpdus_ack_failed);
8697 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8698 			"MPDUs expired", pdev->mpdus_expired);
8699 
8700 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8701 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8702 			"Num Rx Overflow errors", pdev->rx_ovfl_errs);
8703 
8704 	len += scnprintf(buf + len, buf_len - len, "\n");
8705 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8706 			"ath10k VDEV stats", num_vdevs);
8707 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8708 				"=================");
8709 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8710 		ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
8711 	}
8712 
8713 	len += scnprintf(buf + len, buf_len - len, "\n");
8714 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8715 			"ath10k PEER stats", num_peers);
8716 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8717 				"=================");
8718 
8719 	list_for_each_entry(peer, &fw_stats->peers, list) {
8720 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8721 					      fw_stats->extended);
8722 	}
8723 
8724 	if (fw_stats->extended) {
8725 		list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) {
8726 			ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf,
8727 							   &len);
8728 		}
8729 	}
8730 
8731 unlock:
8732 	spin_unlock_bh(&ar->data_lock);
8733 
8734 	if (len >= buf_len)
8735 		buf[len - 1] = 0;
8736 	else
8737 		buf[len] = 0;
8738 }
8739 
ath10k_wmi_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8740 int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
8741 				   enum wmi_vdev_subtype subtype)
8742 {
8743 	switch (subtype) {
8744 	case WMI_VDEV_SUBTYPE_NONE:
8745 		return WMI_VDEV_SUBTYPE_LEGACY_NONE;
8746 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8747 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
8748 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8749 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
8750 	case WMI_VDEV_SUBTYPE_P2P_GO:
8751 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
8752 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8753 		return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
8754 	case WMI_VDEV_SUBTYPE_MESH_11S:
8755 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8756 		return -ENOTSUPP;
8757 	}
8758 	return -ENOTSUPP;
8759 }
8760 
ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8761 static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
8762 						 enum wmi_vdev_subtype subtype)
8763 {
8764 	switch (subtype) {
8765 	case WMI_VDEV_SUBTYPE_NONE:
8766 		return WMI_VDEV_SUBTYPE_10_2_4_NONE;
8767 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8768 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
8769 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8770 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
8771 	case WMI_VDEV_SUBTYPE_P2P_GO:
8772 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
8773 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8774 		return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
8775 	case WMI_VDEV_SUBTYPE_MESH_11S:
8776 		return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
8777 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8778 		return -ENOTSUPP;
8779 	}
8780 	return -ENOTSUPP;
8781 }
8782 
ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8783 static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
8784 					       enum wmi_vdev_subtype subtype)
8785 {
8786 	switch (subtype) {
8787 	case WMI_VDEV_SUBTYPE_NONE:
8788 		return WMI_VDEV_SUBTYPE_10_4_NONE;
8789 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8790 		return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
8791 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8792 		return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
8793 	case WMI_VDEV_SUBTYPE_P2P_GO:
8794 		return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
8795 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8796 		return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
8797 	case WMI_VDEV_SUBTYPE_MESH_11S:
8798 		return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
8799 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8800 		return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
8801 	}
8802 	return -ENOTSUPP;
8803 }
8804 
8805 static struct sk_buff *
ath10k_wmi_10_4_ext_resource_config(struct ath10k * ar,enum wmi_host_platform_type type,u32 fw_feature_bitmap)8806 ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
8807 				    enum wmi_host_platform_type type,
8808 				    u32 fw_feature_bitmap)
8809 {
8810 	struct wmi_ext_resource_config_10_4_cmd *cmd;
8811 	struct sk_buff *skb;
8812 	u32 num_tdls_sleep_sta = 0;
8813 
8814 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8815 	if (!skb)
8816 		return ERR_PTR(-ENOMEM);
8817 
8818 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
8819 		num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
8820 
8821 	cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
8822 	cmd->host_platform_config = __cpu_to_le32(type);
8823 	cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
8824 	cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin);
8825 	cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
8826 	cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
8827 	cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
8828 	cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
8829 	cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
8830 	cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
8831 	cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
8832 	cmd->max_tdls_concurrent_buffer_sta =
8833 			__cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
8834 
8835 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8836 		   "wmi ext resource config host type %d firmware feature bitmap %08x\n",
8837 		   type, fw_feature_bitmap);
8838 	return skb;
8839 }
8840 
8841 static struct sk_buff *
ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k * ar,u32 vdev_id,enum wmi_tdls_state state)8842 ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
8843 					 enum wmi_tdls_state state)
8844 {
8845 	struct wmi_10_4_tdls_set_state_cmd *cmd;
8846 	struct sk_buff *skb;
8847 	u32 options = 0;
8848 
8849 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8850 	if (!skb)
8851 		return ERR_PTR(-ENOMEM);
8852 
8853 	if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
8854 	    state == WMI_TDLS_ENABLE_ACTIVE)
8855 		state = WMI_TDLS_ENABLE_PASSIVE;
8856 
8857 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8858 		options |= WMI_TDLS_BUFFER_STA_EN;
8859 
8860 	cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
8861 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8862 	cmd->state = __cpu_to_le32(state);
8863 	cmd->notification_interval_ms = __cpu_to_le32(5000);
8864 	cmd->tx_discovery_threshold = __cpu_to_le32(100);
8865 	cmd->tx_teardown_threshold = __cpu_to_le32(5);
8866 	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
8867 	cmd->rssi_delta = __cpu_to_le32(-20);
8868 	cmd->tdls_options = __cpu_to_le32(options);
8869 	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
8870 	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
8871 	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
8872 	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
8873 	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
8874 	cmd->teardown_notification_ms = __cpu_to_le32(10);
8875 	cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
8876 
8877 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
8878 		   state, vdev_id);
8879 	return skb;
8880 }
8881 
ath10k_wmi_prepare_peer_qos(u8 uapsd_queues,u8 sp)8882 static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
8883 {
8884 	u32 peer_qos = 0;
8885 
8886 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
8887 		peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
8888 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
8889 		peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
8890 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
8891 		peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
8892 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
8893 		peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
8894 
8895 	peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
8896 
8897 	return peer_qos;
8898 }
8899 
8900 static struct sk_buff *
ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k * ar,u32 param)8901 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
8902 {
8903 	struct wmi_pdev_get_tpc_table_cmd *cmd;
8904 	struct sk_buff *skb;
8905 
8906 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8907 	if (!skb)
8908 		return ERR_PTR(-ENOMEM);
8909 
8910 	cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
8911 	cmd->param = __cpu_to_le32(param);
8912 
8913 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8914 		   "wmi pdev get tpc table param:%d\n", param);
8915 	return skb;
8916 }
8917 
8918 static struct sk_buff *
ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k * ar,const struct wmi_tdls_peer_update_cmd_arg * arg,const struct wmi_tdls_peer_capab_arg * cap,const struct wmi_channel_arg * chan_arg)8919 ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
8920 				     const struct wmi_tdls_peer_update_cmd_arg *arg,
8921 				     const struct wmi_tdls_peer_capab_arg *cap,
8922 				     const struct wmi_channel_arg *chan_arg)
8923 {
8924 	struct wmi_10_4_tdls_peer_update_cmd *cmd;
8925 	struct wmi_tdls_peer_capabilities *peer_cap;
8926 	struct wmi_channel *chan;
8927 	struct sk_buff *skb;
8928 	u32 peer_qos;
8929 	int len, chan_len;
8930 	int i;
8931 
8932 	/* tdls peer update cmd has place holder for one channel*/
8933 	chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
8934 
8935 	len = sizeof(*cmd) + chan_len * sizeof(*chan);
8936 
8937 	skb = ath10k_wmi_alloc_skb(ar, len);
8938 	if (!skb)
8939 		return ERR_PTR(-ENOMEM);
8940 
8941 	memset(skb->data, 0, sizeof(*cmd));
8942 
8943 	cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
8944 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
8945 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
8946 	cmd->peer_state = __cpu_to_le32(arg->peer_state);
8947 
8948 	peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
8949 					       cap->peer_max_sp);
8950 
8951 	peer_cap = &cmd->peer_capab;
8952 	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
8953 	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
8954 	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
8955 	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
8956 	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
8957 	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
8958 	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
8959 
8960 	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
8961 		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
8962 
8963 	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
8964 	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
8965 	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
8966 
8967 	for (i = 0; i < cap->peer_chan_len; i++) {
8968 		chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
8969 		ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
8970 	}
8971 
8972 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8973 		   "wmi tdls peer update vdev %i state %d n_chans %u\n",
8974 		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
8975 	return skb;
8976 }
8977 
8978 static struct sk_buff *
ath10k_wmi_10_4_gen_radar_found(struct ath10k * ar,const struct ath10k_radar_found_info * arg)8979 ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
8980 				const struct ath10k_radar_found_info *arg)
8981 {
8982 	struct wmi_radar_found_info *cmd;
8983 	struct sk_buff *skb;
8984 
8985 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8986 	if (!skb)
8987 		return ERR_PTR(-ENOMEM);
8988 
8989 	cmd = (struct wmi_radar_found_info *)skb->data;
8990 	cmd->pri_min   = __cpu_to_le32(arg->pri_min);
8991 	cmd->pri_max   = __cpu_to_le32(arg->pri_max);
8992 	cmd->width_min = __cpu_to_le32(arg->width_min);
8993 	cmd->width_max = __cpu_to_le32(arg->width_max);
8994 	cmd->sidx_min  = __cpu_to_le32(arg->sidx_min);
8995 	cmd->sidx_max  = __cpu_to_le32(arg->sidx_max);
8996 
8997 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8998 		   "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
8999 		   arg->pri_min, arg->pri_max, arg->width_min,
9000 		   arg->width_max, arg->sidx_min, arg->sidx_max);
9001 	return skb;
9002 }
9003 
9004 static struct sk_buff *
ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k * ar,const struct wmi_per_peer_per_tid_cfg_arg * arg)9005 ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar,
9006 					 const struct wmi_per_peer_per_tid_cfg_arg *arg)
9007 {
9008 	struct wmi_peer_per_tid_cfg_cmd *cmd;
9009 	struct sk_buff *skb;
9010 
9011 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9012 	if (!skb)
9013 		return ERR_PTR(-ENOMEM);
9014 
9015 	memset(skb->data, 0, sizeof(*cmd));
9016 
9017 	cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data;
9018 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9019 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr);
9020 	cmd->tid = cpu_to_le32(arg->tid);
9021 	cmd->ack_policy = cpu_to_le32(arg->ack_policy);
9022 	cmd->aggr_control = cpu_to_le32(arg->aggr_control);
9023 	cmd->rate_control = cpu_to_le32(arg->rate_ctrl);
9024 	cmd->retry_count = cpu_to_le32(arg->retry_count);
9025 	cmd->rcode_flags = cpu_to_le32(arg->rcode_flags);
9026 	cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap);
9027 	cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl);
9028 
9029 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9030 		   "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n",
9031 		   arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control,
9032 		   arg->rate_ctrl, arg->rcode_flags, arg->retry_count,
9033 		   arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr);
9034 	return skb;
9035 }
9036 
9037 static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k * ar,u32 value)9038 ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
9039 {
9040 	struct wmi_echo_cmd *cmd;
9041 	struct sk_buff *skb;
9042 
9043 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9044 	if (!skb)
9045 		return ERR_PTR(-ENOMEM);
9046 
9047 	cmd = (struct wmi_echo_cmd *)skb->data;
9048 	cmd->value = cpu_to_le32(value);
9049 
9050 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9051 		   "wmi echo value 0x%08x\n", value);
9052 	return skb;
9053 }
9054 
9055 int
ath10k_wmi_barrier(struct ath10k * ar)9056 ath10k_wmi_barrier(struct ath10k *ar)
9057 {
9058 	int ret;
9059 	int time_left;
9060 
9061 	spin_lock_bh(&ar->data_lock);
9062 	reinit_completion(&ar->wmi.barrier);
9063 	spin_unlock_bh(&ar->data_lock);
9064 
9065 	ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
9066 	if (ret) {
9067 		ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
9068 		return ret;
9069 	}
9070 
9071 	time_left = wait_for_completion_timeout(&ar->wmi.barrier,
9072 						ATH10K_WMI_BARRIER_TIMEOUT_HZ);
9073 	if (!time_left)
9074 		return -ETIMEDOUT;
9075 
9076 	return 0;
9077 }
9078 
9079 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k * ar,const struct wmi_bb_timing_cfg_arg * arg)9080 ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar,
9081 				   const struct wmi_bb_timing_cfg_arg *arg)
9082 {
9083 	struct wmi_pdev_bb_timing_cfg_cmd *cmd;
9084 	struct sk_buff *skb;
9085 
9086 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9087 	if (!skb)
9088 		return ERR_PTR(-ENOMEM);
9089 
9090 	cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
9091 	cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing);
9092 	cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing);
9093 
9094 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9095 		   "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
9096 		   arg->bb_tx_timing, arg->bb_xpa_timing);
9097 	return skb;
9098 }
9099 
9100 static const struct wmi_ops wmi_ops = {
9101 	.rx = ath10k_wmi_op_rx,
9102 	.map_svc = wmi_main_svc_map,
9103 
9104 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9105 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9106 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9107 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9108 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9109 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9110 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9111 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9112 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9113 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9114 	.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
9115 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9116 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9117 
9118 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9119 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9120 	.gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
9121 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9122 	.gen_init = ath10k_wmi_op_gen_init,
9123 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
9124 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9125 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9126 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9127 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9128 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9129 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9130 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9131 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9132 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9133 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9134 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9135 	/* .gen_vdev_wmm_conf not implemented */
9136 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9137 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9138 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9139 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9140 	.gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
9141 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9142 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9143 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9144 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9145 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9146 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9147 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9148 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9149 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9150 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9151 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9152 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9153 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9154 	/* .gen_pdev_get_temperature not implemented */
9155 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9156 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9157 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9158 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9159 	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
9160 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9161 	.gen_echo = ath10k_wmi_op_gen_echo,
9162 	/* .gen_bcn_tmpl not implemented */
9163 	/* .gen_prb_tmpl not implemented */
9164 	/* .gen_p2p_go_bcn_ie not implemented */
9165 	/* .gen_adaptive_qcs not implemented */
9166 	/* .gen_pdev_enable_adaptive_cca not implemented */
9167 };
9168 
9169 static const struct wmi_ops wmi_10_1_ops = {
9170 	.rx = ath10k_wmi_10_1_op_rx,
9171 	.map_svc = wmi_10x_svc_map,
9172 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9173 	.pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
9174 	.gen_init = ath10k_wmi_10_1_op_gen_init,
9175 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9176 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9177 	.gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
9178 	/* .gen_pdev_get_temperature not implemented */
9179 
9180 	/* shared with main branch */
9181 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9182 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9183 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9184 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9185 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9186 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9187 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9188 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9189 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9190 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9191 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9192 
9193 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9194 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9195 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9196 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9197 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9198 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9199 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9200 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9201 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9202 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9203 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9204 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9205 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9206 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9207 	/* .gen_vdev_wmm_conf not implemented */
9208 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9209 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9210 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9211 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9212 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9213 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9214 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9215 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9216 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9217 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9218 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9219 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9220 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9221 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9222 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9223 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9224 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9225 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9226 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9227 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9228 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9229 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9230 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9231 	.gen_echo = ath10k_wmi_op_gen_echo,
9232 	/* .gen_bcn_tmpl not implemented */
9233 	/* .gen_prb_tmpl not implemented */
9234 	/* .gen_p2p_go_bcn_ie not implemented */
9235 	/* .gen_adaptive_qcs not implemented */
9236 	/* .gen_pdev_enable_adaptive_cca not implemented */
9237 };
9238 
9239 static const struct wmi_ops wmi_10_2_ops = {
9240 	.rx = ath10k_wmi_10_2_op_rx,
9241 	.pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
9242 	.gen_init = ath10k_wmi_10_2_op_gen_init,
9243 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9244 	/* .gen_pdev_get_temperature not implemented */
9245 
9246 	/* shared with 10.1 */
9247 	.map_svc = wmi_10x_svc_map,
9248 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9249 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9250 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9251 	.gen_echo = ath10k_wmi_op_gen_echo,
9252 
9253 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9254 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9255 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9256 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9257 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9258 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9259 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9260 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9261 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9262 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9263 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9264 
9265 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9266 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9267 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9268 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9269 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9270 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9271 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9272 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9273 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9274 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9275 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9276 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9277 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9278 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9279 	/* .gen_vdev_wmm_conf not implemented */
9280 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9281 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9282 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9283 	.gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9284 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9285 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9286 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9287 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9288 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9289 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9290 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9291 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9292 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9293 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9294 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9295 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9296 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9297 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9298 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9299 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9300 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9301 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9302 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9303 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9304 	/* .gen_pdev_enable_adaptive_cca not implemented */
9305 };
9306 
9307 static const struct wmi_ops wmi_10_2_4_ops = {
9308 	.rx = ath10k_wmi_10_2_op_rx,
9309 	.pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
9310 	.gen_init = ath10k_wmi_10_2_op_gen_init,
9311 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9312 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9313 	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9314 
9315 	/* shared with 10.1 */
9316 	.map_svc = wmi_10x_svc_map,
9317 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9318 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9319 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9320 	.gen_echo = ath10k_wmi_op_gen_echo,
9321 
9322 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9323 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9324 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9325 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9326 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9327 	.pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
9328 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9329 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9330 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9331 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9332 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9333 
9334 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9335 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9336 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9337 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9338 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9339 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9340 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9341 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9342 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9343 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9344 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9345 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9346 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9347 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9348 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9349 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9350 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9351 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9352 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9353 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9354 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9355 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9356 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9357 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9358 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9359 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9360 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9361 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9362 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9363 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9364 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9365 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9366 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9367 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9368 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9369 	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9370 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9371 	.gen_pdev_enable_adaptive_cca =
9372 		ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
9373 	.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
9374 	.gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
9375 	/* .gen_bcn_tmpl not implemented */
9376 	/* .gen_prb_tmpl not implemented */
9377 	/* .gen_p2p_go_bcn_ie not implemented */
9378 	/* .gen_adaptive_qcs not implemented */
9379 };
9380 
9381 static const struct wmi_ops wmi_10_4_ops = {
9382 	.rx = ath10k_wmi_10_4_op_rx,
9383 	.map_svc = wmi_10_4_svc_map,
9384 
9385 	.pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
9386 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9387 	.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
9388 	.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
9389 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9390 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9391 	.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
9392 	.pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
9393 	.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
9394 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9395 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9396 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9397 	.pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
9398 	.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
9399 
9400 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9401 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9402 	.gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9403 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9404 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9405 	.gen_init = ath10k_wmi_10_4_op_gen_init,
9406 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
9407 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9408 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9409 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9410 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9411 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9412 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9413 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9414 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9415 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9416 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9417 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9418 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9419 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9420 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9421 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9422 	.gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
9423 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9424 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9425 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9426 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9427 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9428 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9429 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9430 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9431 	.gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
9432 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9433 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9434 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9435 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9436 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9437 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9438 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9439 	.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
9440 	.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
9441 	.gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
9442 	.gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
9443 	.gen_pdev_get_tpc_table_cmdid =
9444 			ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
9445 	.gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
9446 	.gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg,
9447 
9448 	/* shared with 10.2 */
9449 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9450 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9451 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9452 	.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
9453 	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9454 	.gen_echo = ath10k_wmi_op_gen_echo,
9455 	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9456 };
9457 
ath10k_wmi_attach(struct ath10k * ar)9458 int ath10k_wmi_attach(struct ath10k *ar)
9459 {
9460 	switch (ar->running_fw->fw_file.wmi_op_version) {
9461 	case ATH10K_FW_WMI_OP_VERSION_10_4:
9462 		ar->wmi.ops = &wmi_10_4_ops;
9463 		ar->wmi.cmd = &wmi_10_4_cmd_map;
9464 		ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
9465 		ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
9466 		ar->wmi.peer_param = &wmi_peer_param_map;
9467 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9468 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9469 		break;
9470 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
9471 		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
9472 		ar->wmi.ops = &wmi_10_2_4_ops;
9473 		ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
9474 		ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
9475 		ar->wmi.peer_param = &wmi_peer_param_map;
9476 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9477 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9478 		break;
9479 	case ATH10K_FW_WMI_OP_VERSION_10_2:
9480 		ar->wmi.cmd = &wmi_10_2_cmd_map;
9481 		ar->wmi.ops = &wmi_10_2_ops;
9482 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9483 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9484 		ar->wmi.peer_param = &wmi_peer_param_map;
9485 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9486 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9487 		break;
9488 	case ATH10K_FW_WMI_OP_VERSION_10_1:
9489 		ar->wmi.cmd = &wmi_10x_cmd_map;
9490 		ar->wmi.ops = &wmi_10_1_ops;
9491 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9492 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9493 		ar->wmi.peer_param = &wmi_peer_param_map;
9494 		ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
9495 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9496 		break;
9497 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
9498 		ar->wmi.cmd = &wmi_cmd_map;
9499 		ar->wmi.ops = &wmi_ops;
9500 		ar->wmi.vdev_param = &wmi_vdev_param_map;
9501 		ar->wmi.pdev_param = &wmi_pdev_param_map;
9502 		ar->wmi.peer_param = &wmi_peer_param_map;
9503 		ar->wmi.peer_flags = &wmi_peer_flags_map;
9504 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9505 		break;
9506 	case ATH10K_FW_WMI_OP_VERSION_TLV:
9507 		ath10k_wmi_tlv_attach(ar);
9508 		ar->wmi_key_cipher = wmi_tlv_key_cipher_suites;
9509 		break;
9510 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
9511 	case ATH10K_FW_WMI_OP_VERSION_MAX:
9512 		ath10k_err(ar, "unsupported WMI op version: %d\n",
9513 			   ar->running_fw->fw_file.wmi_op_version);
9514 		return -EINVAL;
9515 	}
9516 
9517 	init_completion(&ar->wmi.service_ready);
9518 	init_completion(&ar->wmi.unified_ready);
9519 	init_completion(&ar->wmi.barrier);
9520 	init_completion(&ar->wmi.radar_confirm);
9521 
9522 	INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
9523 	INIT_WORK(&ar->radar_confirmation_work,
9524 		  ath10k_radar_confirmation_work);
9525 
9526 	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9527 		     ar->running_fw->fw_file.fw_features)) {
9528 		idr_init(&ar->wmi.mgmt_pending_tx);
9529 	}
9530 
9531 	return 0;
9532 }
9533 
ath10k_wmi_free_host_mem(struct ath10k * ar)9534 void ath10k_wmi_free_host_mem(struct ath10k *ar)
9535 {
9536 	int i;
9537 
9538 	/* free the host memory chunks requested by firmware */
9539 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
9540 		dma_free_coherent(ar->dev,
9541 				  ar->wmi.mem_chunks[i].len,
9542 				  ar->wmi.mem_chunks[i].vaddr,
9543 				  ar->wmi.mem_chunks[i].paddr);
9544 	}
9545 
9546 	ar->wmi.num_mem_chunks = 0;
9547 }
9548 
ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id,void * ptr,void * ctx)9549 static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
9550 					       void *ctx)
9551 {
9552 	struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
9553 	struct ath10k *ar = ctx;
9554 	struct sk_buff *msdu;
9555 
9556 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9557 		   "force cleanup mgmt msdu_id %u\n", msdu_id);
9558 
9559 	msdu = pkt_addr->vaddr;
9560 	dma_unmap_single(ar->dev, pkt_addr->paddr,
9561 			 msdu->len, DMA_TO_DEVICE);
9562 	ieee80211_free_txskb(ar->hw, msdu);
9563 	kfree(pkt_addr);
9564 
9565 	return 0;
9566 }
9567 
ath10k_wmi_detach(struct ath10k * ar)9568 void ath10k_wmi_detach(struct ath10k *ar)
9569 {
9570 	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9571 		     ar->running_fw->fw_file.fw_features)) {
9572 		spin_lock_bh(&ar->data_lock);
9573 		idr_for_each(&ar->wmi.mgmt_pending_tx,
9574 			     ath10k_wmi_mgmt_tx_clean_up_pending, ar);
9575 		idr_destroy(&ar->wmi.mgmt_pending_tx);
9576 		spin_unlock_bh(&ar->data_lock);
9577 	}
9578 
9579 	cancel_work_sync(&ar->svc_rdy_work);
9580 	dev_kfree_skb(ar->svc_rdy_skb);
9581 }
9582