xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/wmi.c (revision 014e4e92)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/skbuff.h>
9 #include <linux/ctype.h>
10 
11 #include "core.h"
12 #include "htc.h"
13 #include "debug.h"
14 #include "wmi.h"
15 #include "wmi-tlv.h"
16 #include "mac.h"
17 #include "testmode.h"
18 #include "wmi-ops.h"
19 #include "p2p.h"
20 #include "hw.h"
21 #include "hif.h"
22 #include "txrx.h"
23 
24 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
25 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
26 #define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
27 
28 /* MAIN WMI cmd track */
29 static struct wmi_cmd_map wmi_cmd_map = {
30 	.init_cmdid = WMI_INIT_CMDID,
31 	.start_scan_cmdid = WMI_START_SCAN_CMDID,
32 	.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
33 	.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
34 	.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
35 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
36 	.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
37 	.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
38 	.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
39 	.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
40 	.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
41 	.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
42 	.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
43 	.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
44 	.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
45 	.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
46 	.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
47 	.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
48 	.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
49 	.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
50 	.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
51 	.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
52 	.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
53 	.vdev_up_cmdid = WMI_VDEV_UP_CMDID,
54 	.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
55 	.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
56 	.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
57 	.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
58 	.peer_create_cmdid = WMI_PEER_CREATE_CMDID,
59 	.peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
60 	.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
61 	.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
62 	.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
63 	.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
64 	.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
65 	.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
66 	.bcn_tx_cmdid = WMI_BCN_TX_CMDID,
67 	.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
68 	.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
69 	.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
70 	.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
71 	.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
72 	.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
73 	.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
74 	.addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
75 	.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
76 	.delba_send_cmdid = WMI_DELBA_SEND_CMDID,
77 	.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
78 	.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
79 	.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
80 	.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
81 	.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
82 	.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
83 	.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
84 	.roam_scan_mode = WMI_ROAM_SCAN_MODE,
85 	.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
86 	.roam_scan_period = WMI_ROAM_SCAN_PERIOD,
87 	.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
88 	.roam_ap_profile = WMI_ROAM_AP_PROFILE,
89 	.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
90 	.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
91 	.ofl_scan_period = WMI_OFL_SCAN_PERIOD,
92 	.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
93 	.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
94 	.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
95 	.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
96 	.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
97 	.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
98 	.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
99 	.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
100 	.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
101 	.wlan_profile_set_hist_intvl_cmdid =
102 				WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
103 	.wlan_profile_get_profile_data_cmdid =
104 				WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
105 	.wlan_profile_enable_profile_id_cmdid =
106 				WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
107 	.wlan_profile_list_profile_id_cmdid =
108 				WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
109 	.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
110 	.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
111 	.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
112 	.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
113 	.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
114 	.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
115 	.wow_enable_disable_wake_event_cmdid =
116 				WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
117 	.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
118 	.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
119 	.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
120 	.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
121 	.vdev_spectral_scan_configure_cmdid =
122 				WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
123 	.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
124 	.request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
125 	.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
126 	.network_list_offload_config_cmdid =
127 				WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
128 	.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
129 	.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
130 	.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
131 	.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
132 	.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
133 	.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
134 	.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
135 	.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
136 	.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
137 	.echo_cmdid = WMI_ECHO_CMDID,
138 	.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
139 	.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
140 	.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
141 	.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
142 	.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
143 	.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
144 	.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
145 	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
146 	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
147 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
148 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
149 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
150 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
151 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
152 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
153 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
154 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
155 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
156 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
157 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
158 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
159 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
160 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
161 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
162 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
163 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
164 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
165 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
166 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
167 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
168 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
169 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
170 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
171 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
172 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
173 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
174 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
175 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
176 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
177 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
178 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
179 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
180 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
181 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
182 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
183 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
184 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
185 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
186 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
187 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
188 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
189 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
190 	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
191 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
192 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
193 };
194 
195 /* 10.X WMI cmd track */
196 static struct wmi_cmd_map wmi_10x_cmd_map = {
197 	.init_cmdid = WMI_10X_INIT_CMDID,
198 	.start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
199 	.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
200 	.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
201 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
202 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
203 	.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
204 	.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
205 	.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
206 	.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
207 	.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
208 	.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
209 	.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
210 	.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
211 	.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
212 	.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
213 	.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
214 	.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
215 	.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
216 	.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
217 	.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
218 	.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
219 	.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
220 	.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
221 	.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
222 	.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
223 	.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
224 	.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
225 	.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
226 	.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
227 	.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
228 	.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
229 	.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
230 	.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
231 	.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
232 	.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
233 	.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
234 	.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
235 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
236 	.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
237 	.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
238 	.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
239 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
240 	.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
241 	.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
242 	.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
243 	.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
244 	.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
245 	.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
246 	.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
247 	.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
248 	.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
249 	.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
250 	.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
251 	.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
252 	.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
253 	.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
254 	.roam_scan_rssi_change_threshold =
255 				WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
256 	.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
257 	.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
258 	.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
259 	.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
260 	.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
261 	.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
262 	.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
263 	.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
264 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
265 	.ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
266 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
267 	.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
268 	.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
269 	.wlan_profile_set_hist_intvl_cmdid =
270 				WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
271 	.wlan_profile_get_profile_data_cmdid =
272 				WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
273 	.wlan_profile_enable_profile_id_cmdid =
274 				WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
275 	.wlan_profile_list_profile_id_cmdid =
276 				WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
277 	.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
278 	.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
279 	.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
280 	.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
281 	.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
282 	.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
283 	.wow_enable_disable_wake_event_cmdid =
284 				WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
285 	.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
286 	.wow_hostwakeup_from_sleep_cmdid =
287 				WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
288 	.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
289 	.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
290 	.vdev_spectral_scan_configure_cmdid =
291 				WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
292 	.vdev_spectral_scan_enable_cmdid =
293 				WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
294 	.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
295 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
296 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
297 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
298 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
299 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
300 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
301 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
302 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
303 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
304 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
305 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
306 	.echo_cmdid = WMI_10X_ECHO_CMDID,
307 	.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
308 	.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
309 	.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
310 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
311 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
312 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
313 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
314 	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
315 	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
316 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
317 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
318 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
319 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
320 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
321 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
322 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
323 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
324 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
325 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
326 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
327 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
328 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
329 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
330 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
331 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
332 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
333 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
334 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
335 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
336 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
337 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
338 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
339 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
340 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
341 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
342 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
343 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
344 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
345 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
346 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
347 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
348 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
349 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
350 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
351 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
352 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
353 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
354 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
355 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
356 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
357 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
358 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
359 	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
360 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
361 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
362 };
363 
364 /* 10.2.4 WMI cmd track */
365 static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
366 	.init_cmdid = WMI_10_2_INIT_CMDID,
367 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
368 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
369 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
370 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
371 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
372 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
373 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
374 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
375 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
376 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
377 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
378 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
379 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
380 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
381 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
382 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
383 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
384 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
385 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
386 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
387 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
388 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
389 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
390 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
391 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
392 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
393 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
394 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
395 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
396 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
397 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
398 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
399 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
400 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
401 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
402 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
403 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
404 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
405 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
406 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
407 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
408 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
409 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
410 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
411 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
412 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
413 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
414 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
415 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
416 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
417 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
418 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
419 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
420 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
421 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
422 	.roam_scan_rssi_change_threshold =
423 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
424 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
425 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
426 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
427 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
428 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
429 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
430 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
431 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
432 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
433 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
434 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
435 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
436 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
437 	.wlan_profile_set_hist_intvl_cmdid =
438 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
439 	.wlan_profile_get_profile_data_cmdid =
440 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
441 	.wlan_profile_enable_profile_id_cmdid =
442 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
443 	.wlan_profile_list_profile_id_cmdid =
444 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
445 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
446 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
447 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
448 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
449 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
450 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
451 	.wow_enable_disable_wake_event_cmdid =
452 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
453 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
454 	.wow_hostwakeup_from_sleep_cmdid =
455 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
456 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
457 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
458 	.vdev_spectral_scan_configure_cmdid =
459 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
460 	.vdev_spectral_scan_enable_cmdid =
461 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
462 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
463 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
464 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
465 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
466 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
467 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
468 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
469 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
470 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
471 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
472 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
473 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
474 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
475 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
476 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
477 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
478 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
479 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
480 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
481 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
482 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
483 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
484 	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
485 	.pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
486 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
487 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
488 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
489 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
490 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
491 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
492 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
493 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
494 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
495 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
496 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
497 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
498 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
499 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
500 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
501 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
502 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
503 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
504 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
505 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
506 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
507 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
508 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
509 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
510 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
511 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
512 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
513 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
514 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
515 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
516 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
517 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
518 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
519 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
520 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
521 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
522 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
523 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
524 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
525 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
526 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
527 	.pdev_bss_chan_info_request_cmdid =
528 		WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
529 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
530 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
531 	.set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
532 };
533 
534 /* 10.4 WMI cmd track */
535 static struct wmi_cmd_map wmi_10_4_cmd_map = {
536 	.init_cmdid = WMI_10_4_INIT_CMDID,
537 	.start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
538 	.stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
539 	.scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
540 	.scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
541 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
542 	.pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
543 	.pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
544 	.pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
545 	.pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
546 	.pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
547 	.pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
548 	.pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
549 	.pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
550 	.pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
551 	.pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
552 	.pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
553 	.pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
554 	.pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
555 	.vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
556 	.vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
557 	.vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
558 	.vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
559 	.vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
560 	.vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
561 	.vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
562 	.vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
563 	.vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
564 	.peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
565 	.peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
566 	.peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
567 	.peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
568 	.peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
569 	.peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
570 	.peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
571 	.peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
572 	.bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
573 	.pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
574 	.bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
575 	.bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
576 	.prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
577 	.mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
578 	.prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
579 	.addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
580 	.addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
581 	.addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
582 	.delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
583 	.addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
584 	.send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
585 	.sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
586 	.sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
587 	.sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
588 	.pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
589 	.pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
590 	.roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
591 	.roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
592 	.roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
593 	.roam_scan_rssi_change_threshold =
594 				WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
595 	.roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
596 	.ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
597 	.ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
598 	.ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
599 	.p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
600 	.p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
601 	.p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
602 	.p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
603 	.p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
604 	.ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
605 	.ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
606 	.peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
607 	.wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
608 	.wlan_profile_set_hist_intvl_cmdid =
609 				WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
610 	.wlan_profile_get_profile_data_cmdid =
611 				WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
612 	.wlan_profile_enable_profile_id_cmdid =
613 				WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
614 	.wlan_profile_list_profile_id_cmdid =
615 				WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
616 	.pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
617 	.pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
618 	.add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
619 	.rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
620 	.wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
621 	.wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
622 	.wow_enable_disable_wake_event_cmdid =
623 				WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
624 	.wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
625 	.wow_hostwakeup_from_sleep_cmdid =
626 				WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
627 	.rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
628 	.rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
629 	.vdev_spectral_scan_configure_cmdid =
630 				WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
631 	.vdev_spectral_scan_enable_cmdid =
632 				WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
633 	.request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
634 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
635 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
636 	.gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
637 	.csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
638 	.csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
639 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
640 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
641 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
642 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
643 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
644 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
645 	.echo_cmdid = WMI_10_4_ECHO_CMDID,
646 	.pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
647 	.dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
648 	.pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
649 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
650 	.vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
651 	.vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
652 	.force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
653 	.gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
654 	.gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
655 	.pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
656 	.vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
657 	.adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
658 	.scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
659 	.vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
660 	.vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
661 	.wlan_peer_caching_add_peer_cmdid =
662 			WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
663 	.wlan_peer_caching_evict_peer_cmdid =
664 			WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
665 	.wlan_peer_caching_restore_peer_cmdid =
666 			WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
667 	.wlan_peer_caching_print_all_peers_info_cmdid =
668 			WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
669 	.peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
670 	.peer_add_proxy_sta_entry_cmdid =
671 			WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
672 	.rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
673 	.oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
674 	.nan_cmdid = WMI_10_4_NAN_CMDID,
675 	.vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
676 	.qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
677 	.pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
678 	.pdev_smart_ant_set_rx_antenna_cmdid =
679 			WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
680 	.peer_smart_ant_set_tx_antenna_cmdid =
681 			WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
682 	.peer_smart_ant_set_train_info_cmdid =
683 			WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
684 	.peer_smart_ant_set_node_config_ops_cmdid =
685 			WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
686 	.pdev_set_antenna_switch_table_cmdid =
687 			WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
688 	.pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
689 	.pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
690 	.pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
691 	.pdev_ratepwr_chainmsk_table_cmdid =
692 			WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
693 	.pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
694 	.tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
695 	.fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
696 	.vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
697 	.peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
698 	.pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
699 	.pdev_get_ani_ofdm_config_cmdid =
700 			WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
701 	.pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
702 	.pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
703 	.pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
704 	.pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
705 	.vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
706 	.pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
707 	.vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
708 	.vdev_filter_neighbor_rx_packets_cmdid =
709 			WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
710 	.mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
711 	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
712 	.pdev_bss_chan_info_request_cmdid =
713 			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
714 	.ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
715 	.vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
716 	.set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
717 	.atf_ssid_grouping_request_cmdid =
718 			WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
719 	.peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
720 	.set_periodic_channel_stats_cfg_cmdid =
721 			WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
722 	.peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
723 	.btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
724 	.peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
725 	.peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
726 	.peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
727 	.pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
728 	.coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
729 	.pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
730 	.pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
731 	.vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
732 	.prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
733 	.config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
734 	.debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
735 	.get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
736 	.pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
737 	.vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
738 	.pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
739 	.tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
740 	.tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
741 	.tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
742 	.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
743 	.per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
744 };
745 
746 static struct wmi_peer_param_map wmi_peer_param_map = {
747 	.smps_state = WMI_PEER_SMPS_STATE,
748 	.ampdu = WMI_PEER_AMPDU,
749 	.authorize = WMI_PEER_AUTHORIZE,
750 	.chan_width = WMI_PEER_CHAN_WIDTH,
751 	.nss = WMI_PEER_NSS,
752 	.use_4addr = WMI_PEER_USE_4ADDR,
753 	.use_fixed_power = WMI_PEER_USE_FIXED_PWR,
754 	.debug = WMI_PEER_DEBUG,
755 	.phymode = WMI_PEER_PHYMODE,
756 	.dummy_var = WMI_PEER_DUMMY_VAR,
757 };
758 
759 /* MAIN WMI VDEV param map */
760 static struct wmi_vdev_param_map wmi_vdev_param_map = {
761 	.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
762 	.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
763 	.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
764 	.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
765 	.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
766 	.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
767 	.slot_time = WMI_VDEV_PARAM_SLOT_TIME,
768 	.preamble = WMI_VDEV_PARAM_PREAMBLE,
769 	.swba_time = WMI_VDEV_PARAM_SWBA_TIME,
770 	.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
771 	.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
772 	.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
773 	.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
774 	.wmi_vdev_oc_scheduler_air_time_limit =
775 					WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
776 	.wds = WMI_VDEV_PARAM_WDS,
777 	.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
778 	.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
779 	.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
780 	.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
781 	.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
782 	.chwidth = WMI_VDEV_PARAM_CHWIDTH,
783 	.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
784 	.disable_htprotection =	WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
785 	.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
786 	.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
787 	.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
788 	.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
789 	.sgi = WMI_VDEV_PARAM_SGI,
790 	.ldpc = WMI_VDEV_PARAM_LDPC,
791 	.tx_stbc = WMI_VDEV_PARAM_TX_STBC,
792 	.rx_stbc = WMI_VDEV_PARAM_RX_STBC,
793 	.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
794 	.def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
795 	.nss = WMI_VDEV_PARAM_NSS,
796 	.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
797 	.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
798 	.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
799 	.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
800 	.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
801 	.ap_keepalive_min_idle_inactive_time_secs =
802 			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
803 	.ap_keepalive_max_idle_inactive_time_secs =
804 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
805 	.ap_keepalive_max_unresponsive_time_secs =
806 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
807 	.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
808 	.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
809 	.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
810 	.txbf = WMI_VDEV_PARAM_TXBF,
811 	.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
812 	.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
813 	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
814 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
815 					WMI_VDEV_PARAM_UNSUPPORTED,
816 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
817 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
818 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
819 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
820 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
821 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
822 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
823 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
824 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
825 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
826 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
827 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
828 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
829 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
830 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
831 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
832 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
833 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
834 };
835 
836 /* 10.X WMI VDEV param map */
837 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
838 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
839 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
840 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
841 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
842 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
843 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
844 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
845 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
846 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
847 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
848 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
849 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
850 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
851 	.wmi_vdev_oc_scheduler_air_time_limit =
852 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
853 	.wds = WMI_10X_VDEV_PARAM_WDS,
854 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
855 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
856 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
857 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
858 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
859 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
860 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
861 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
862 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
863 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
864 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
865 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
866 	.sgi = WMI_10X_VDEV_PARAM_SGI,
867 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
868 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
869 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
870 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
871 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
872 	.nss = WMI_10X_VDEV_PARAM_NSS,
873 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
874 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
875 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
876 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
877 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
878 	.ap_keepalive_min_idle_inactive_time_secs =
879 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
880 	.ap_keepalive_max_idle_inactive_time_secs =
881 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
882 	.ap_keepalive_max_unresponsive_time_secs =
883 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
884 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
885 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
886 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
887 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
888 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
889 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
890 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
891 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
892 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
893 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
894 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
895 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
896 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
897 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
898 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
899 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
900 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
901 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
902 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
903 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
904 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
905 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
906 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
907 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
908 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
909 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
910 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
911 };
912 
913 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
914 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
915 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
916 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
917 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
918 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
919 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
920 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
921 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
922 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
923 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
924 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
925 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
926 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
927 	.wmi_vdev_oc_scheduler_air_time_limit =
928 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
929 	.wds = WMI_10X_VDEV_PARAM_WDS,
930 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
931 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
932 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
933 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
934 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
935 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
936 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
937 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
938 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
939 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
940 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
941 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
942 	.sgi = WMI_10X_VDEV_PARAM_SGI,
943 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
944 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
945 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
946 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
947 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
948 	.nss = WMI_10X_VDEV_PARAM_NSS,
949 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
950 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
951 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
952 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
953 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
954 	.ap_keepalive_min_idle_inactive_time_secs =
955 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
956 	.ap_keepalive_max_idle_inactive_time_secs =
957 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
958 	.ap_keepalive_max_unresponsive_time_secs =
959 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
960 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
961 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
962 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
963 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
964 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
965 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
966 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
967 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
968 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
969 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
970 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
971 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
972 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
973 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
974 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
975 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
976 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
977 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
978 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
979 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
980 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
981 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
982 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
983 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
984 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
985 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
986 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
987 };
988 
989 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
990 	.rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
991 	.fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
992 	.beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
993 	.listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
994 	.multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
995 	.mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
996 	.slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
997 	.preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
998 	.swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
999 	.wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
1000 	.wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
1001 	.wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
1002 	.dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
1003 	.wmi_vdev_oc_scheduler_air_time_limit =
1004 	       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
1005 	.wds = WMI_10_4_VDEV_PARAM_WDS,
1006 	.atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
1007 	.bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
1008 	.bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
1009 	.bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
1010 	.feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
1011 	.chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
1012 	.chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
1013 	.disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
1014 	.sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
1015 	.mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
1016 	.protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
1017 	.fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
1018 	.sgi = WMI_10_4_VDEV_PARAM_SGI,
1019 	.ldpc = WMI_10_4_VDEV_PARAM_LDPC,
1020 	.tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
1021 	.rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
1022 	.intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
1023 	.def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
1024 	.nss = WMI_10_4_VDEV_PARAM_NSS,
1025 	.bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
1026 	.mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
1027 	.mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
1028 	.dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
1029 	.unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
1030 	.ap_keepalive_min_idle_inactive_time_secs =
1031 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
1032 	.ap_keepalive_max_idle_inactive_time_secs =
1033 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
1034 	.ap_keepalive_max_unresponsive_time_secs =
1035 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
1036 	.ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
1037 	.mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
1038 	.enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
1039 	.txbf = WMI_10_4_VDEV_PARAM_TXBF,
1040 	.packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
1041 	.drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
1042 	.tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
1043 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
1044 	       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
1045 	.rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
1046 	.cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
1047 	.mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
1048 	.rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
1049 	.vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
1050 	.vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
1051 	.early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
1052 	.early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
1053 	.early_rx_bmiss_sample_cycle =
1054 	       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
1055 	.early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
1056 	.early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
1057 	.early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
1058 	.proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
1059 	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
1060 	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
1061 	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
1062 	.inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
1063 	.dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
1064 	.disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
1065 	.rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
1066 };
1067 
1068 static struct wmi_pdev_param_map wmi_pdev_param_map = {
1069 	.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
1070 	.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
1071 	.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
1072 	.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
1073 	.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
1074 	.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
1075 	.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
1076 	.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1077 	.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
1078 	.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
1079 	.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1080 	.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
1081 	.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
1082 	.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1083 	.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
1084 	.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
1085 	.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
1086 	.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
1087 	.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
1088 	.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1089 	.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1090 	.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
1091 	.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1092 	.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
1093 	.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
1094 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1095 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1096 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1097 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1098 	.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1099 	.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1100 	.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1101 	.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1102 	.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
1103 	.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
1104 	.dcs = WMI_PDEV_PARAM_DCS,
1105 	.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
1106 	.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
1107 	.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
1108 	.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
1109 	.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
1110 	.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
1111 	.proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
1112 	.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1113 	.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1114 	.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1115 	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
1116 	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1117 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
1118 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1119 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1120 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1121 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1122 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1123 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1124 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1125 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1126 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1127 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1128 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1129 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1130 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1131 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1132 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1133 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1134 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1135 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1136 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1137 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1138 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1139 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1140 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1141 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1142 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1143 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1144 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1145 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1146 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1147 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1148 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1149 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1150 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1151 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1152 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1153 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1154 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1155 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1156 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1157 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1158 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1159 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1160 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1161 };
1162 
1163 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
1164 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1165 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1166 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1167 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1168 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1169 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1170 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1171 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1172 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1173 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1174 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1175 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1176 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1177 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1178 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1179 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1180 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1181 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1182 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1183 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1184 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1185 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1186 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1187 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1188 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1189 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1190 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1191 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1192 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1193 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1194 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1195 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1196 	.bcnflt_stats_update_period =
1197 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1198 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1199 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1200 	.dcs = WMI_10X_PDEV_PARAM_DCS,
1201 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1202 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1203 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1204 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1205 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1206 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1207 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1208 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1209 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1210 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1211 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1212 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1213 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1214 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1215 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1216 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1217 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1218 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1219 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1220 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1221 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1222 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1223 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1224 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1225 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1226 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1227 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1228 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1229 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1230 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1231 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1232 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1233 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1234 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1235 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1236 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1237 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1238 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1239 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1240 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1241 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1242 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1243 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1244 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1245 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1246 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1247 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1248 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1249 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1250 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1251 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1252 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1253 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1254 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1255 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1256 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1257 };
1258 
1259 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
1260 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1261 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1262 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1263 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1264 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1265 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1266 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1267 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1268 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1269 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1270 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1271 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1272 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1273 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1274 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1275 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1276 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1277 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1278 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1279 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1280 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1281 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1282 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1283 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1284 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1285 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1286 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1287 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1288 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1289 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1290 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1291 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1292 	.bcnflt_stats_update_period =
1293 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1294 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1295 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1296 	.dcs = WMI_10X_PDEV_PARAM_DCS,
1297 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1298 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1299 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1300 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1301 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1302 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1303 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1304 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1305 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1306 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1307 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1308 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1309 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1310 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1311 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1312 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1313 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1314 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1315 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1316 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1317 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1318 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1319 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1320 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1321 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1322 	.peer_sta_ps_statechg_enable =
1323 				WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
1324 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1325 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1326 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1327 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1328 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1329 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1330 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1331 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1332 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1333 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1334 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1335 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1336 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1337 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1338 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1339 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1340 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1341 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1342 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1343 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1344 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1345 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1346 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1347 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1348 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1349 	.pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
1350 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1351 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1352 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1353 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1354 };
1355 
1356 /* firmware 10.2 specific mappings */
1357 static struct wmi_cmd_map wmi_10_2_cmd_map = {
1358 	.init_cmdid = WMI_10_2_INIT_CMDID,
1359 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
1360 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
1361 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
1362 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
1363 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
1364 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
1365 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
1366 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
1367 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
1368 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
1369 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
1370 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
1371 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
1372 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
1373 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
1374 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
1375 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
1376 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
1377 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
1378 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
1379 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
1380 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
1381 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
1382 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
1383 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
1384 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
1385 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
1386 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
1387 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
1388 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
1389 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
1390 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
1391 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
1392 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
1393 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
1394 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
1395 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1396 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
1397 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
1398 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
1399 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1400 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
1401 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
1402 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
1403 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
1404 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
1405 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
1406 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
1407 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
1408 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
1409 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
1410 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
1411 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
1412 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
1413 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
1414 	.roam_scan_rssi_change_threshold =
1415 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
1416 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
1417 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
1418 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
1419 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
1420 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
1421 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
1422 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
1423 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
1424 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
1425 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
1426 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
1427 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
1428 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
1429 	.wlan_profile_set_hist_intvl_cmdid =
1430 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1431 	.wlan_profile_get_profile_data_cmdid =
1432 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1433 	.wlan_profile_enable_profile_id_cmdid =
1434 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1435 	.wlan_profile_list_profile_id_cmdid =
1436 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1437 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
1438 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
1439 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
1440 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
1441 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
1442 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
1443 	.wow_enable_disable_wake_event_cmdid =
1444 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
1445 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
1446 	.wow_hostwakeup_from_sleep_cmdid =
1447 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
1448 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
1449 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
1450 	.vdev_spectral_scan_configure_cmdid =
1451 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
1452 	.vdev_spectral_scan_enable_cmdid =
1453 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
1454 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
1455 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
1456 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
1457 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
1458 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
1459 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
1460 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
1461 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
1462 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
1463 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
1464 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
1465 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
1466 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
1467 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
1468 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
1469 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
1470 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
1471 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1472 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1473 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
1474 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
1475 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
1476 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
1477 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
1478 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
1479 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
1480 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
1481 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
1482 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
1483 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
1484 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
1485 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
1486 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
1487 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1488 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
1489 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
1490 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
1491 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
1492 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
1493 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1494 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1495 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
1496 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
1497 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
1498 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
1499 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
1500 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
1501 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
1502 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
1503 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
1504 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
1505 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1506 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1507 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
1508 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
1509 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
1510 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
1511 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
1512 };
1513 
1514 static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
1515 	.tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
1516 	.rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
1517 	.txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
1518 	.txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
1519 	.txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
1520 	.beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
1521 	.beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
1522 	.resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1523 	.protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
1524 	.dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
1525 	.non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1526 	.agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
1527 	.sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
1528 	.ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1529 	.ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
1530 	.ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
1531 	.ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
1532 	.ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
1533 	.ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
1534 	.ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1535 	.ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1536 	.ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
1537 	.ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1538 	.l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
1539 	.dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
1540 	.pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1541 	.pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
1542 	.pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1543 	.pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1544 	.pdev_stats_update_period =
1545 			WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1546 	.vdev_stats_update_period =
1547 			WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1548 	.peer_stats_update_period =
1549 			WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1550 	.bcnflt_stats_update_period =
1551 			WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1552 	.pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
1553 	.arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
1554 	.dcs = WMI_10_4_PDEV_PARAM_DCS,
1555 	.ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
1556 	.ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
1557 	.ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
1558 	.ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
1559 	.ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
1560 	.dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
1561 	.proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
1562 	.idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
1563 	.power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
1564 	.fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
1565 	.burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
1566 	.burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
1567 	.cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
1568 	.aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
1569 	.rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
1570 	.smart_antenna_default_antenna =
1571 			WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
1572 	.igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
1573 	.igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
1574 	.antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
1575 	.rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
1576 	.set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
1577 	.proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
1578 	.set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
1579 	.set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
1580 	.remove_mcast2ucast_buffer =
1581 			WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
1582 	.peer_sta_ps_statechg_enable =
1583 			WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
1584 	.igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
1585 	.block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
1586 	.set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
1587 	.set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
1588 	.set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
1589 	.txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
1590 	.set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
1591 	.set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
1592 	.en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
1593 	.mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
1594 	.noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
1595 	.noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
1596 	.dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
1597 	.set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
1598 	.atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
1599 	.atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
1600 	.ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
1601 	.mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
1602 	.sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
1603 	.signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
1604 	.signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
1605 	.enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
1606 	.enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
1607 	.cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
1608 	.rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
1609 	.pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
1610 	.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
1611 	.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
1612 	.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
1613 	.enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
1614 };
1615 
1616 static const u8 wmi_key_cipher_suites[] = {
1617 	[WMI_CIPHER_NONE] = WMI_CIPHER_NONE,
1618 	[WMI_CIPHER_WEP] = WMI_CIPHER_WEP,
1619 	[WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP,
1620 	[WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB,
1621 	[WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM,
1622 	[WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI,
1623 	[WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP,
1624 	[WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC,
1625 	[WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM,
1626 };
1627 
1628 static const u8 wmi_tlv_key_cipher_suites[] = {
1629 	[WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE,
1630 	[WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP,
1631 	[WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP,
1632 	[WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB,
1633 	[WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM,
1634 	[WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI,
1635 	[WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP,
1636 	[WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC,
1637 	[WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM,
1638 };
1639 
1640 static const struct wmi_peer_flags_map wmi_peer_flags_map = {
1641 	.auth = WMI_PEER_AUTH,
1642 	.qos = WMI_PEER_QOS,
1643 	.need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
1644 	.need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
1645 	.apsd = WMI_PEER_APSD,
1646 	.ht = WMI_PEER_HT,
1647 	.bw40 = WMI_PEER_40MHZ,
1648 	.stbc = WMI_PEER_STBC,
1649 	.ldbc = WMI_PEER_LDPC,
1650 	.dyn_mimops = WMI_PEER_DYN_MIMOPS,
1651 	.static_mimops = WMI_PEER_STATIC_MIMOPS,
1652 	.spatial_mux = WMI_PEER_SPATIAL_MUX,
1653 	.vht = WMI_PEER_VHT,
1654 	.bw80 = WMI_PEER_80MHZ,
1655 	.vht_2g = WMI_PEER_VHT_2G,
1656 	.pmf = WMI_PEER_PMF,
1657 	.bw160 = WMI_PEER_160MHZ,
1658 };
1659 
1660 static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
1661 	.auth = WMI_10X_PEER_AUTH,
1662 	.qos = WMI_10X_PEER_QOS,
1663 	.need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
1664 	.need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
1665 	.apsd = WMI_10X_PEER_APSD,
1666 	.ht = WMI_10X_PEER_HT,
1667 	.bw40 = WMI_10X_PEER_40MHZ,
1668 	.stbc = WMI_10X_PEER_STBC,
1669 	.ldbc = WMI_10X_PEER_LDPC,
1670 	.dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
1671 	.static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
1672 	.spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
1673 	.vht = WMI_10X_PEER_VHT,
1674 	.bw80 = WMI_10X_PEER_80MHZ,
1675 	.bw160 = WMI_10X_PEER_160MHZ,
1676 };
1677 
1678 static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
1679 	.auth = WMI_10_2_PEER_AUTH,
1680 	.qos = WMI_10_2_PEER_QOS,
1681 	.need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
1682 	.need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
1683 	.apsd = WMI_10_2_PEER_APSD,
1684 	.ht = WMI_10_2_PEER_HT,
1685 	.bw40 = WMI_10_2_PEER_40MHZ,
1686 	.stbc = WMI_10_2_PEER_STBC,
1687 	.ldbc = WMI_10_2_PEER_LDPC,
1688 	.dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
1689 	.static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
1690 	.spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
1691 	.vht = WMI_10_2_PEER_VHT,
1692 	.bw80 = WMI_10_2_PEER_80MHZ,
1693 	.vht_2g = WMI_10_2_PEER_VHT_2G,
1694 	.pmf = WMI_10_2_PEER_PMF,
1695 	.bw160 = WMI_10_2_PEER_160MHZ,
1696 };
1697 
ath10k_wmi_put_wmi_channel(struct ath10k * ar,struct wmi_channel * ch,const struct wmi_channel_arg * arg)1698 void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
1699 				const struct wmi_channel_arg *arg)
1700 {
1701 	u32 flags = 0;
1702 	struct ieee80211_channel *chan = NULL;
1703 
1704 	memset(ch, 0, sizeof(*ch));
1705 
1706 	if (arg->passive)
1707 		flags |= WMI_CHAN_FLAG_PASSIVE;
1708 	if (arg->allow_ibss)
1709 		flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1710 	if (arg->allow_ht)
1711 		flags |= WMI_CHAN_FLAG_ALLOW_HT;
1712 	if (arg->allow_vht)
1713 		flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1714 	if (arg->ht40plus)
1715 		flags |= WMI_CHAN_FLAG_HT40_PLUS;
1716 	if (arg->chan_radar)
1717 		flags |= WMI_CHAN_FLAG_DFS;
1718 
1719 	ch->band_center_freq2 = 0;
1720 	ch->mhz = __cpu_to_le32(arg->freq);
1721 	ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
1722 	if (arg->mode == MODE_11AC_VHT80_80) {
1723 		ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
1724 		chan = ieee80211_get_channel(ar->hw->wiphy,
1725 					     arg->band_center_freq2 - 10);
1726 	}
1727 
1728 	if (arg->mode == MODE_11AC_VHT160) {
1729 		u32 band_center_freq1;
1730 		u32 band_center_freq2;
1731 
1732 		if (arg->freq > arg->band_center_freq1) {
1733 			band_center_freq1 = arg->band_center_freq1 + 40;
1734 			band_center_freq2 = arg->band_center_freq1 - 40;
1735 		} else {
1736 			band_center_freq1 = arg->band_center_freq1 - 40;
1737 			band_center_freq2 = arg->band_center_freq1 + 40;
1738 		}
1739 
1740 		ch->band_center_freq1 =
1741 					__cpu_to_le32(band_center_freq1);
1742 		/* Minus 10 to get a defined 5G channel frequency*/
1743 		chan = ieee80211_get_channel(ar->hw->wiphy,
1744 					     band_center_freq2 - 10);
1745 		/* The center frequency of the entire VHT160 */
1746 		ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
1747 	}
1748 
1749 	if (chan && chan->flags & IEEE80211_CHAN_RADAR)
1750 		flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
1751 
1752 	ch->min_power = arg->min_power;
1753 	ch->max_power = arg->max_power;
1754 	ch->reg_power = arg->max_reg_power;
1755 	ch->antenna_max = arg->max_antenna_gain;
1756 	ch->max_tx_power = arg->max_power;
1757 
1758 	/* mode & flags share storage */
1759 	ch->mode = arg->mode;
1760 	ch->flags |= __cpu_to_le32(flags);
1761 }
1762 
ath10k_wmi_wait_for_service_ready(struct ath10k * ar)1763 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
1764 {
1765 	unsigned long time_left, i;
1766 
1767 	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1768 						WMI_SERVICE_READY_TIMEOUT_HZ);
1769 	if (!time_left) {
1770 		/* Sometimes the PCI HIF doesn't receive interrupt
1771 		 * for the service ready message even if the buffer
1772 		 * was completed. PCIe sniffer shows that it's
1773 		 * because the corresponding CE ring doesn't fires
1774 		 * it. Workaround here by polling CE rings once.
1775 		 */
1776 		ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
1777 
1778 		for (i = 0; i < CE_COUNT; i++)
1779 			ath10k_hif_send_complete_check(ar, i, 1);
1780 
1781 		time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1782 							WMI_SERVICE_READY_TIMEOUT_HZ);
1783 		if (!time_left) {
1784 			ath10k_warn(ar, "polling timed out\n");
1785 			return -ETIMEDOUT;
1786 		}
1787 
1788 		ath10k_warn(ar, "service ready completion received, continuing normally\n");
1789 	}
1790 
1791 	return 0;
1792 }
1793 
ath10k_wmi_wait_for_unified_ready(struct ath10k * ar)1794 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
1795 {
1796 	unsigned long time_left;
1797 
1798 	time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
1799 						WMI_UNIFIED_READY_TIMEOUT_HZ);
1800 	if (!time_left)
1801 		return -ETIMEDOUT;
1802 	return 0;
1803 }
1804 
ath10k_wmi_alloc_skb(struct ath10k * ar,u32 len)1805 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
1806 {
1807 	struct sk_buff *skb;
1808 	u32 round_len = roundup(len, 4);
1809 
1810 	skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
1811 	if (!skb)
1812 		return NULL;
1813 
1814 	skb_reserve(skb, WMI_SKB_HEADROOM);
1815 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1816 		ath10k_warn(ar, "Unaligned WMI skb\n");
1817 
1818 	skb_put(skb, round_len);
1819 	memset(skb->data, 0, round_len);
1820 
1821 	return skb;
1822 }
1823 
ath10k_wmi_htc_tx_complete(struct ath10k * ar,struct sk_buff * skb)1824 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
1825 {
1826 	dev_kfree_skb(skb);
1827 }
1828 
ath10k_wmi_cmd_send_nowait(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1829 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
1830 			       u32 cmd_id)
1831 {
1832 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
1833 	struct wmi_cmd_hdr *cmd_hdr;
1834 	int ret;
1835 	u32 cmd = 0;
1836 
1837 	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1838 		return -ENOMEM;
1839 
1840 	cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
1841 
1842 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1843 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
1844 
1845 	memset(skb_cb, 0, sizeof(*skb_cb));
1846 	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
1847 	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
1848 
1849 	if (ret)
1850 		goto err_pull;
1851 
1852 	return 0;
1853 
1854 err_pull:
1855 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
1856 	return ret;
1857 }
1858 
ath10k_wmi_tx_beacon_nowait(struct ath10k_vif * arvif)1859 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
1860 {
1861 	struct ath10k *ar = arvif->ar;
1862 	struct ath10k_skb_cb *cb;
1863 	struct sk_buff *bcn;
1864 	bool dtim_zero;
1865 	bool deliver_cab;
1866 	int ret;
1867 
1868 	spin_lock_bh(&ar->data_lock);
1869 
1870 	bcn = arvif->beacon;
1871 
1872 	if (!bcn)
1873 		goto unlock;
1874 
1875 	cb = ATH10K_SKB_CB(bcn);
1876 
1877 	switch (arvif->beacon_state) {
1878 	case ATH10K_BEACON_SENDING:
1879 	case ATH10K_BEACON_SENT:
1880 		break;
1881 	case ATH10K_BEACON_SCHEDULED:
1882 		arvif->beacon_state = ATH10K_BEACON_SENDING;
1883 		spin_unlock_bh(&ar->data_lock);
1884 
1885 		dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
1886 		deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
1887 		ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
1888 							arvif->vdev_id,
1889 							bcn->data, bcn->len,
1890 							cb->paddr,
1891 							dtim_zero,
1892 							deliver_cab);
1893 
1894 		spin_lock_bh(&ar->data_lock);
1895 
1896 		if (ret == 0)
1897 			arvif->beacon_state = ATH10K_BEACON_SENT;
1898 		else
1899 			arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1900 	}
1901 
1902 unlock:
1903 	spin_unlock_bh(&ar->data_lock);
1904 }
1905 
ath10k_wmi_tx_beacons_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1906 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
1907 				       struct ieee80211_vif *vif)
1908 {
1909 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
1910 
1911 	ath10k_wmi_tx_beacon_nowait(arvif);
1912 }
1913 
ath10k_wmi_tx_beacons_nowait(struct ath10k * ar)1914 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
1915 {
1916 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1917 						   ATH10K_ITER_NORMAL_FLAGS,
1918 						   ath10k_wmi_tx_beacons_iter,
1919 						   NULL);
1920 }
1921 
ath10k_wmi_op_ep_tx_credits(struct ath10k * ar)1922 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
1923 {
1924 	/* try to send pending beacons first. they take priority */
1925 	ath10k_wmi_tx_beacons_nowait(ar);
1926 
1927 	wake_up(&ar->wmi.tx_credits_wq);
1928 }
1929 
ath10k_wmi_cmd_send(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1930 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
1931 {
1932 	int ret = -EOPNOTSUPP;
1933 
1934 	might_sleep();
1935 
1936 	if (cmd_id == WMI_CMD_UNSUPPORTED) {
1937 		ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
1938 			    cmd_id);
1939 		return ret;
1940 	}
1941 
1942 	wait_event_timeout(ar->wmi.tx_credits_wq, ({
1943 		/* try to send pending beacons first. they take priority */
1944 		ath10k_wmi_tx_beacons_nowait(ar);
1945 
1946 		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
1947 
1948 		if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1949 			ret = -ESHUTDOWN;
1950 
1951 		(ret != -EAGAIN);
1952 	}), 3 * HZ);
1953 
1954 	if (ret)
1955 		dev_kfree_skb_any(skb);
1956 
1957 	if (ret == -EAGAIN) {
1958 		ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
1959 			    cmd_id);
1960 		ath10k_core_start_recovery(ar);
1961 	}
1962 
1963 	return ret;
1964 }
1965 
1966 static struct sk_buff *
ath10k_wmi_op_gen_mgmt_tx(struct ath10k * ar,struct sk_buff * msdu)1967 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
1968 {
1969 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
1970 	struct ath10k_vif *arvif;
1971 	struct wmi_mgmt_tx_cmd *cmd;
1972 	struct ieee80211_hdr *hdr;
1973 	struct sk_buff *skb;
1974 	int len;
1975 	u32 vdev_id;
1976 	u32 buf_len = msdu->len;
1977 	u16 fc;
1978 	const u8 *peer_addr;
1979 
1980 	hdr = (struct ieee80211_hdr *)msdu->data;
1981 	fc = le16_to_cpu(hdr->frame_control);
1982 
1983 	if (cb->vif) {
1984 		arvif = (void *)cb->vif->drv_priv;
1985 		vdev_id = arvif->vdev_id;
1986 	} else {
1987 		vdev_id = 0;
1988 	}
1989 
1990 	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
1991 		return ERR_PTR(-EINVAL);
1992 
1993 	len = sizeof(cmd->hdr) + msdu->len;
1994 
1995 	if ((ieee80211_is_action(hdr->frame_control) ||
1996 	     ieee80211_is_deauth(hdr->frame_control) ||
1997 	     ieee80211_is_disassoc(hdr->frame_control)) &&
1998 	     ieee80211_has_protected(hdr->frame_control)) {
1999 		peer_addr = hdr->addr1;
2000 		if (is_multicast_ether_addr(peer_addr)) {
2001 			len += sizeof(struct ieee80211_mmie_16);
2002 			buf_len += sizeof(struct ieee80211_mmie_16);
2003 		} else {
2004 			if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
2005 			    cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
2006 				len += IEEE80211_GCMP_MIC_LEN;
2007 				buf_len += IEEE80211_GCMP_MIC_LEN;
2008 			} else {
2009 				len += IEEE80211_CCMP_MIC_LEN;
2010 				buf_len += IEEE80211_CCMP_MIC_LEN;
2011 			}
2012 		}
2013 	}
2014 
2015 	len = round_up(len, 4);
2016 
2017 	skb = ath10k_wmi_alloc_skb(ar, len);
2018 	if (!skb)
2019 		return ERR_PTR(-ENOMEM);
2020 
2021 	cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
2022 
2023 	cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
2024 	cmd->hdr.tx_rate = 0;
2025 	cmd->hdr.tx_power = 0;
2026 	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
2027 
2028 	ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
2029 	memcpy(cmd->buf, msdu->data, msdu->len);
2030 
2031 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
2032 		   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
2033 		   fc & IEEE80211_FCTL_STYPE);
2034 	trace_ath10k_tx_hdr(ar, skb->data, skb->len);
2035 	trace_ath10k_tx_payload(ar, skb->data, skb->len);
2036 
2037 	return skb;
2038 }
2039 
ath10k_wmi_event_scan_started(struct ath10k * ar)2040 static void ath10k_wmi_event_scan_started(struct ath10k *ar)
2041 {
2042 	lockdep_assert_held(&ar->data_lock);
2043 
2044 	switch (ar->scan.state) {
2045 	case ATH10K_SCAN_IDLE:
2046 	case ATH10K_SCAN_RUNNING:
2047 	case ATH10K_SCAN_ABORTING:
2048 		ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
2049 			    ath10k_scan_state_str(ar->scan.state),
2050 			    ar->scan.state);
2051 		break;
2052 	case ATH10K_SCAN_STARTING:
2053 		ar->scan.state = ATH10K_SCAN_RUNNING;
2054 
2055 		if (ar->scan.is_roc)
2056 			ieee80211_ready_on_channel(ar->hw);
2057 
2058 		complete(&ar->scan.started);
2059 		break;
2060 	}
2061 }
2062 
ath10k_wmi_event_scan_start_failed(struct ath10k * ar)2063 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
2064 {
2065 	lockdep_assert_held(&ar->data_lock);
2066 
2067 	switch (ar->scan.state) {
2068 	case ATH10K_SCAN_IDLE:
2069 	case ATH10K_SCAN_RUNNING:
2070 	case ATH10K_SCAN_ABORTING:
2071 		ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
2072 			    ath10k_scan_state_str(ar->scan.state),
2073 			    ar->scan.state);
2074 		break;
2075 	case ATH10K_SCAN_STARTING:
2076 		complete(&ar->scan.started);
2077 		__ath10k_scan_finish(ar);
2078 		break;
2079 	}
2080 }
2081 
ath10k_wmi_event_scan_completed(struct ath10k * ar)2082 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
2083 {
2084 	lockdep_assert_held(&ar->data_lock);
2085 
2086 	switch (ar->scan.state) {
2087 	case ATH10K_SCAN_IDLE:
2088 	case ATH10K_SCAN_STARTING:
2089 		/* One suspected reason scan can be completed while starting is
2090 		 * if firmware fails to deliver all scan events to the host,
2091 		 * e.g. when transport pipe is full. This has been observed
2092 		 * with spectral scan phyerr events starving wmi transport
2093 		 * pipe. In such case the "scan completed" event should be (and
2094 		 * is) ignored by the host as it may be just firmware's scan
2095 		 * state machine recovering.
2096 		 */
2097 		ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
2098 			    ath10k_scan_state_str(ar->scan.state),
2099 			    ar->scan.state);
2100 		break;
2101 	case ATH10K_SCAN_RUNNING:
2102 	case ATH10K_SCAN_ABORTING:
2103 		__ath10k_scan_finish(ar);
2104 		break;
2105 	}
2106 }
2107 
ath10k_wmi_event_scan_bss_chan(struct ath10k * ar)2108 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
2109 {
2110 	lockdep_assert_held(&ar->data_lock);
2111 
2112 	switch (ar->scan.state) {
2113 	case ATH10K_SCAN_IDLE:
2114 	case ATH10K_SCAN_STARTING:
2115 		ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
2116 			    ath10k_scan_state_str(ar->scan.state),
2117 			    ar->scan.state);
2118 		break;
2119 	case ATH10K_SCAN_RUNNING:
2120 	case ATH10K_SCAN_ABORTING:
2121 		ar->scan_channel = NULL;
2122 		break;
2123 	}
2124 }
2125 
ath10k_wmi_event_scan_foreign_chan(struct ath10k * ar,u32 freq)2126 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
2127 {
2128 	lockdep_assert_held(&ar->data_lock);
2129 
2130 	switch (ar->scan.state) {
2131 	case ATH10K_SCAN_IDLE:
2132 	case ATH10K_SCAN_STARTING:
2133 		ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
2134 			    ath10k_scan_state_str(ar->scan.state),
2135 			    ar->scan.state);
2136 		break;
2137 	case ATH10K_SCAN_RUNNING:
2138 	case ATH10K_SCAN_ABORTING:
2139 		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
2140 
2141 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
2142 			complete(&ar->scan.on_channel);
2143 		break;
2144 	}
2145 }
2146 
2147 static const char *
ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)2148 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
2149 			       enum wmi_scan_completion_reason reason)
2150 {
2151 	switch (type) {
2152 	case WMI_SCAN_EVENT_STARTED:
2153 		return "started";
2154 	case WMI_SCAN_EVENT_COMPLETED:
2155 		switch (reason) {
2156 		case WMI_SCAN_REASON_COMPLETED:
2157 			return "completed";
2158 		case WMI_SCAN_REASON_CANCELLED:
2159 			return "completed [cancelled]";
2160 		case WMI_SCAN_REASON_PREEMPTED:
2161 			return "completed [preempted]";
2162 		case WMI_SCAN_REASON_TIMEDOUT:
2163 			return "completed [timedout]";
2164 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
2165 			return "completed [internal err]";
2166 		case WMI_SCAN_REASON_MAX:
2167 			break;
2168 		}
2169 		return "completed [unknown]";
2170 	case WMI_SCAN_EVENT_BSS_CHANNEL:
2171 		return "bss channel";
2172 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2173 		return "foreign channel";
2174 	case WMI_SCAN_EVENT_DEQUEUED:
2175 		return "dequeued";
2176 	case WMI_SCAN_EVENT_PREEMPTED:
2177 		return "preempted";
2178 	case WMI_SCAN_EVENT_START_FAILED:
2179 		return "start failed";
2180 	case WMI_SCAN_EVENT_RESTARTED:
2181 		return "restarted";
2182 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2183 		return "foreign channel exit";
2184 	default:
2185 		return "unknown";
2186 	}
2187 }
2188 
ath10k_wmi_op_pull_scan_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_scan_ev_arg * arg)2189 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
2190 				      struct wmi_scan_ev_arg *arg)
2191 {
2192 	struct wmi_scan_event *ev = (void *)skb->data;
2193 
2194 	if (skb->len < sizeof(*ev))
2195 		return -EPROTO;
2196 
2197 	skb_pull(skb, sizeof(*ev));
2198 	arg->event_type = ev->event_type;
2199 	arg->reason = ev->reason;
2200 	arg->channel_freq = ev->channel_freq;
2201 	arg->scan_req_id = ev->scan_req_id;
2202 	arg->scan_id = ev->scan_id;
2203 	arg->vdev_id = ev->vdev_id;
2204 
2205 	return 0;
2206 }
2207 
ath10k_wmi_event_scan(struct ath10k * ar,struct sk_buff * skb)2208 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
2209 {
2210 	struct wmi_scan_ev_arg arg = {};
2211 	enum wmi_scan_event_type event_type;
2212 	enum wmi_scan_completion_reason reason;
2213 	u32 freq;
2214 	u32 req_id;
2215 	u32 scan_id;
2216 	u32 vdev_id;
2217 	int ret;
2218 
2219 	ret = ath10k_wmi_pull_scan(ar, skb, &arg);
2220 	if (ret) {
2221 		ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
2222 		return ret;
2223 	}
2224 
2225 	event_type = __le32_to_cpu(arg.event_type);
2226 	reason = __le32_to_cpu(arg.reason);
2227 	freq = __le32_to_cpu(arg.channel_freq);
2228 	req_id = __le32_to_cpu(arg.scan_req_id);
2229 	scan_id = __le32_to_cpu(arg.scan_id);
2230 	vdev_id = __le32_to_cpu(arg.vdev_id);
2231 
2232 	spin_lock_bh(&ar->data_lock);
2233 
2234 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2235 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
2236 		   ath10k_wmi_event_scan_type_str(event_type, reason),
2237 		   event_type, reason, freq, req_id, scan_id, vdev_id,
2238 		   ath10k_scan_state_str(ar->scan.state), ar->scan.state);
2239 
2240 	switch (event_type) {
2241 	case WMI_SCAN_EVENT_STARTED:
2242 		ath10k_wmi_event_scan_started(ar);
2243 		break;
2244 	case WMI_SCAN_EVENT_COMPLETED:
2245 		ath10k_wmi_event_scan_completed(ar);
2246 		break;
2247 	case WMI_SCAN_EVENT_BSS_CHANNEL:
2248 		ath10k_wmi_event_scan_bss_chan(ar);
2249 		break;
2250 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2251 		ath10k_wmi_event_scan_foreign_chan(ar, freq);
2252 		break;
2253 	case WMI_SCAN_EVENT_START_FAILED:
2254 		ath10k_warn(ar, "received scan start failure event\n");
2255 		ath10k_wmi_event_scan_start_failed(ar);
2256 		break;
2257 	case WMI_SCAN_EVENT_DEQUEUED:
2258 	case WMI_SCAN_EVENT_PREEMPTED:
2259 	case WMI_SCAN_EVENT_RESTARTED:
2260 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2261 	default:
2262 		break;
2263 	}
2264 
2265 	spin_unlock_bh(&ar->data_lock);
2266 	return 0;
2267 }
2268 
2269 /* If keys are configured, HW decrypts all frames
2270  * with protected bit set. Mark such frames as decrypted.
2271  */
ath10k_wmi_handle_wep_reauth(struct ath10k * ar,struct sk_buff * skb,struct ieee80211_rx_status * status)2272 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
2273 					 struct sk_buff *skb,
2274 					 struct ieee80211_rx_status *status)
2275 {
2276 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2277 	unsigned int hdrlen;
2278 	bool peer_key;
2279 	u8 *addr, keyidx;
2280 
2281 	if (!ieee80211_is_auth(hdr->frame_control) ||
2282 	    !ieee80211_has_protected(hdr->frame_control))
2283 		return;
2284 
2285 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
2286 	if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
2287 		return;
2288 
2289 	keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
2290 	addr = ieee80211_get_SA(hdr);
2291 
2292 	spin_lock_bh(&ar->data_lock);
2293 	peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
2294 	spin_unlock_bh(&ar->data_lock);
2295 
2296 	if (peer_key) {
2297 		ath10k_dbg(ar, ATH10K_DBG_MAC,
2298 			   "mac wep key present for peer %pM\n", addr);
2299 		status->flag |= RX_FLAG_DECRYPTED;
2300 	}
2301 }
2302 
ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2303 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2304 					 struct wmi_mgmt_rx_ev_arg *arg)
2305 {
2306 	struct wmi_mgmt_rx_event_v1 *ev_v1;
2307 	struct wmi_mgmt_rx_event_v2 *ev_v2;
2308 	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
2309 	struct wmi_mgmt_rx_ext_info *ext_info;
2310 	size_t pull_len;
2311 	u32 msdu_len;
2312 	u32 len;
2313 
2314 	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
2315 		     ar->running_fw->fw_file.fw_features)) {
2316 		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
2317 		ev_hdr = &ev_v2->hdr.v1;
2318 		pull_len = sizeof(*ev_v2);
2319 	} else {
2320 		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
2321 		ev_hdr = &ev_v1->hdr;
2322 		pull_len = sizeof(*ev_v1);
2323 	}
2324 
2325 	if (skb->len < pull_len)
2326 		return -EPROTO;
2327 
2328 	skb_pull(skb, pull_len);
2329 	arg->channel = ev_hdr->channel;
2330 	arg->buf_len = ev_hdr->buf_len;
2331 	arg->status = ev_hdr->status;
2332 	arg->snr = ev_hdr->snr;
2333 	arg->phy_mode = ev_hdr->phy_mode;
2334 	arg->rate = ev_hdr->rate;
2335 
2336 	msdu_len = __le32_to_cpu(arg->buf_len);
2337 	if (skb->len < msdu_len)
2338 		return -EPROTO;
2339 
2340 	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2341 		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2342 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2343 		memcpy(&arg->ext_info, ext_info,
2344 		       sizeof(struct wmi_mgmt_rx_ext_info));
2345 	}
2346 	/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
2347 	 * trailer with credit update. Trim the excess garbage.
2348 	 */
2349 	skb_trim(skb, msdu_len);
2350 
2351 	return 0;
2352 }
2353 
ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2354 static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2355 					      struct sk_buff *skb,
2356 					      struct wmi_mgmt_rx_ev_arg *arg)
2357 {
2358 	struct wmi_10_4_mgmt_rx_event *ev;
2359 	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
2360 	size_t pull_len;
2361 	u32 msdu_len;
2362 	struct wmi_mgmt_rx_ext_info *ext_info;
2363 	u32 len;
2364 
2365 	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
2366 	ev_hdr = &ev->hdr;
2367 	pull_len = sizeof(*ev);
2368 
2369 	if (skb->len < pull_len)
2370 		return -EPROTO;
2371 
2372 	skb_pull(skb, pull_len);
2373 	arg->channel = ev_hdr->channel;
2374 	arg->buf_len = ev_hdr->buf_len;
2375 	arg->status = ev_hdr->status;
2376 	arg->snr = ev_hdr->snr;
2377 	arg->phy_mode = ev_hdr->phy_mode;
2378 	arg->rate = ev_hdr->rate;
2379 
2380 	msdu_len = __le32_to_cpu(arg->buf_len);
2381 	if (skb->len < msdu_len)
2382 		return -EPROTO;
2383 
2384 	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2385 		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2386 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2387 		memcpy(&arg->ext_info, ext_info,
2388 		       sizeof(struct wmi_mgmt_rx_ext_info));
2389 	}
2390 
2391 	/* Make sure bytes added for padding are removed. */
2392 	skb_trim(skb, msdu_len);
2393 
2394 	return 0;
2395 }
2396 
ath10k_wmi_rx_is_decrypted(struct ath10k * ar,struct ieee80211_hdr * hdr)2397 static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
2398 				       struct ieee80211_hdr *hdr)
2399 {
2400 	if (!ieee80211_has_protected(hdr->frame_control))
2401 		return false;
2402 
2403 	/* FW delivers WEP Shared Auth frame with Protected Bit set and
2404 	 * encrypted payload. However in case of PMF it delivers decrypted
2405 	 * frames with Protected Bit set.
2406 	 */
2407 	if (ieee80211_is_auth(hdr->frame_control))
2408 		return false;
2409 
2410 	/* qca99x0 based FW delivers broadcast or multicast management frames
2411 	 * (ex: group privacy action frames in mesh) as encrypted payload.
2412 	 */
2413 	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
2414 	    ar->hw_params.sw_decrypt_mcast_mgmt)
2415 		return false;
2416 
2417 	return true;
2418 }
2419 
2420 static int
wmi_process_mgmt_tx_comp(struct ath10k * ar,struct mgmt_tx_compl_params * param)2421 wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
2422 {
2423 	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
2424 	struct ath10k_wmi *wmi = &ar->wmi;
2425 	struct ieee80211_tx_info *info;
2426 	struct sk_buff *msdu;
2427 	int ret;
2428 
2429 	spin_lock_bh(&ar->data_lock);
2430 
2431 	pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
2432 	if (!pkt_addr) {
2433 		ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
2434 			    param->desc_id);
2435 		ret = -ENOENT;
2436 		goto out;
2437 	}
2438 
2439 	msdu = pkt_addr->vaddr;
2440 	dma_unmap_single(ar->dev, pkt_addr->paddr,
2441 			 msdu->len, DMA_TO_DEVICE);
2442 	info = IEEE80211_SKB_CB(msdu);
2443 
2444 	if (param->status) {
2445 		info->flags &= ~IEEE80211_TX_STAT_ACK;
2446 	} else {
2447 		info->flags |= IEEE80211_TX_STAT_ACK;
2448 		info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
2449 					  param->ack_rssi;
2450 		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
2451 	}
2452 
2453 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
2454 
2455 	ret = 0;
2456 
2457 out:
2458 	idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
2459 	spin_unlock_bh(&ar->data_lock);
2460 	return ret;
2461 }
2462 
ath10k_wmi_event_mgmt_tx_compl(struct ath10k * ar,struct sk_buff * skb)2463 int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
2464 {
2465 	struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
2466 	struct mgmt_tx_compl_params param;
2467 	int ret;
2468 
2469 	ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
2470 	if (ret) {
2471 		ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
2472 		return ret;
2473 	}
2474 
2475 	memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2476 	param.desc_id = __le32_to_cpu(arg.desc_id);
2477 	param.status = __le32_to_cpu(arg.status);
2478 
2479 	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2480 		param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
2481 
2482 	wmi_process_mgmt_tx_comp(ar, &param);
2483 
2484 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
2485 
2486 	return 0;
2487 }
2488 
ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k * ar,struct sk_buff * skb)2489 int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
2490 {
2491 	struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
2492 	struct mgmt_tx_compl_params param;
2493 	u32 num_reports;
2494 	int i, ret;
2495 
2496 	ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
2497 	if (ret) {
2498 		ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret);
2499 		return ret;
2500 	}
2501 
2502 	num_reports = __le32_to_cpu(arg.num_reports);
2503 
2504 	for (i = 0; i < num_reports; i++) {
2505 		memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2506 		param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
2507 		param.status = __le32_to_cpu(arg.desc_ids[i]);
2508 
2509 		if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2510 			param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
2511 		wmi_process_mgmt_tx_comp(ar, &param);
2512 	}
2513 
2514 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
2515 
2516 	return 0;
2517 }
2518 
ath10k_wmi_event_mgmt_rx(struct ath10k * ar,struct sk_buff * skb)2519 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2520 {
2521 	struct wmi_mgmt_rx_ev_arg arg = {};
2522 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2523 	struct ieee80211_hdr *hdr;
2524 	struct ieee80211_supported_band *sband;
2525 	u32 rx_status;
2526 	u32 channel;
2527 	u32 phy_mode;
2528 	u32 snr, rssi;
2529 	u32 rate;
2530 	u16 fc;
2531 	int ret, i;
2532 
2533 	ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
2534 	if (ret) {
2535 		ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
2536 		dev_kfree_skb(skb);
2537 		return ret;
2538 	}
2539 
2540 	channel = __le32_to_cpu(arg.channel);
2541 	rx_status = __le32_to_cpu(arg.status);
2542 	snr = __le32_to_cpu(arg.snr);
2543 	phy_mode = __le32_to_cpu(arg.phy_mode);
2544 	rate = __le32_to_cpu(arg.rate);
2545 
2546 	memset(status, 0, sizeof(*status));
2547 
2548 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2549 		   "event mgmt rx status %08x\n", rx_status);
2550 
2551 	if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
2552 	    (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
2553 	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
2554 		dev_kfree_skb(skb);
2555 		return 0;
2556 	}
2557 
2558 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
2559 		status->flag |= RX_FLAG_MMIC_ERROR;
2560 
2561 	if (rx_status & WMI_RX_STATUS_EXT_INFO) {
2562 		status->mactime =
2563 			__le64_to_cpu(arg.ext_info.rx_mac_timestamp);
2564 		status->flag |= RX_FLAG_MACTIME_END;
2565 	}
2566 	/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
2567 	 * MODE_11B. This means phy_mode is not a reliable source for the band
2568 	 * of mgmt rx.
2569 	 */
2570 	if (channel >= 1 && channel <= 14) {
2571 		status->band = NL80211_BAND_2GHZ;
2572 	} else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
2573 		status->band = NL80211_BAND_5GHZ;
2574 	} else {
2575 		/* Shouldn't happen unless list of advertised channels to
2576 		 * mac80211 has been changed.
2577 		 */
2578 		WARN_ON_ONCE(1);
2579 		dev_kfree_skb(skb);
2580 		return 0;
2581 	}
2582 
2583 	if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
2584 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
2585 
2586 	sband = &ar->mac.sbands[status->band];
2587 
2588 	status->freq = ieee80211_channel_to_frequency(channel, status->band);
2589 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
2590 
2591 	BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
2592 
2593 	for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
2594 		status->chains &= ~BIT(i);
2595 		rssi = __le32_to_cpu(arg.rssi[i]);
2596 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
2597 
2598 		if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
2599 			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
2600 			status->chains |= BIT(i);
2601 		}
2602 	}
2603 
2604 	status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
2605 
2606 	hdr = (struct ieee80211_hdr *)skb->data;
2607 	fc = le16_to_cpu(hdr->frame_control);
2608 
2609 	/* Firmware is guaranteed to report all essential management frames via
2610 	 * WMI while it can deliver some extra via HTT. Since there can be
2611 	 * duplicates split the reporting wrt monitor/sniffing.
2612 	 */
2613 	status->flag |= RX_FLAG_SKIP_MONITOR;
2614 
2615 	ath10k_wmi_handle_wep_reauth(ar, skb, status);
2616 
2617 	if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
2618 		status->flag |= RX_FLAG_DECRYPTED;
2619 
2620 		if (!ieee80211_is_action(hdr->frame_control) &&
2621 		    !ieee80211_is_deauth(hdr->frame_control) &&
2622 		    !ieee80211_is_disassoc(hdr->frame_control)) {
2623 			status->flag |= RX_FLAG_IV_STRIPPED |
2624 					RX_FLAG_MMIC_STRIPPED;
2625 			hdr->frame_control = __cpu_to_le16(fc &
2626 					~IEEE80211_FCTL_PROTECTED);
2627 		}
2628 	}
2629 
2630 	if (ieee80211_is_beacon(hdr->frame_control))
2631 		ath10k_mac_handle_beacon(ar, skb);
2632 
2633 	if (ieee80211_is_beacon(hdr->frame_control) ||
2634 	    ieee80211_is_probe_resp(hdr->frame_control))
2635 		status->boottime_ns = ktime_get_boottime_ns();
2636 
2637 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2638 		   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
2639 		   skb, skb->len,
2640 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
2641 
2642 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2643 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
2644 		   status->freq, status->band, status->signal,
2645 		   status->rate_idx);
2646 
2647 	ieee80211_rx_ni(ar->hw, skb);
2648 
2649 	return 0;
2650 }
2651 
freq_to_idx(struct ath10k * ar,int freq)2652 static int freq_to_idx(struct ath10k *ar, int freq)
2653 {
2654 	struct ieee80211_supported_band *sband;
2655 	int band, ch, idx = 0;
2656 
2657 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2658 		sband = ar->hw->wiphy->bands[band];
2659 		if (!sband)
2660 			continue;
2661 
2662 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
2663 			if (sband->channels[ch].center_freq == freq)
2664 				goto exit;
2665 	}
2666 
2667 exit:
2668 	return idx;
2669 }
2670 
ath10k_wmi_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2671 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
2672 					 struct wmi_ch_info_ev_arg *arg)
2673 {
2674 	struct wmi_chan_info_event *ev = (void *)skb->data;
2675 
2676 	if (skb->len < sizeof(*ev))
2677 		return -EPROTO;
2678 
2679 	skb_pull(skb, sizeof(*ev));
2680 	arg->err_code = ev->err_code;
2681 	arg->freq = ev->freq;
2682 	arg->cmd_flags = ev->cmd_flags;
2683 	arg->noise_floor = ev->noise_floor;
2684 	arg->rx_clear_count = ev->rx_clear_count;
2685 	arg->cycle_count = ev->cycle_count;
2686 
2687 	return 0;
2688 }
2689 
ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2690 static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
2691 					      struct sk_buff *skb,
2692 					      struct wmi_ch_info_ev_arg *arg)
2693 {
2694 	struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
2695 
2696 	if (skb->len < sizeof(*ev))
2697 		return -EPROTO;
2698 
2699 	skb_pull(skb, sizeof(*ev));
2700 	arg->err_code = ev->err_code;
2701 	arg->freq = ev->freq;
2702 	arg->cmd_flags = ev->cmd_flags;
2703 	arg->noise_floor = ev->noise_floor;
2704 	arg->rx_clear_count = ev->rx_clear_count;
2705 	arg->cycle_count = ev->cycle_count;
2706 	arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
2707 	arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
2708 	arg->rx_frame_count = ev->rx_frame_count;
2709 
2710 	return 0;
2711 }
2712 
2713 /*
2714  * Handle the channel info event for firmware which only sends one
2715  * chan_info event per scanned channel.
2716  */
ath10k_wmi_event_chan_info_unpaired(struct ath10k * ar,struct chan_info_params * params)2717 static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
2718 						struct chan_info_params *params)
2719 {
2720 	struct survey_info *survey;
2721 	int idx;
2722 
2723 	if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2724 		ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
2725 		return;
2726 	}
2727 
2728 	idx = freq_to_idx(ar, params->freq);
2729 	if (idx >= ARRAY_SIZE(ar->survey)) {
2730 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2731 			    params->freq, idx);
2732 		return;
2733 	}
2734 
2735 	survey = &ar->survey[idx];
2736 
2737 	if (!params->mac_clk_mhz)
2738 		return;
2739 
2740 	memset(survey, 0, sizeof(*survey));
2741 
2742 	survey->noise = params->noise_floor;
2743 	survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
2744 	survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
2745 	survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
2746 			  SURVEY_INFO_TIME_BUSY;
2747 }
2748 
2749 /*
2750  * Handle the channel info event for firmware which sends chan_info
2751  * event in pairs(start and stop events) for every scanned channel.
2752  */
ath10k_wmi_event_chan_info_paired(struct ath10k * ar,struct chan_info_params * params)2753 static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
2754 					      struct chan_info_params *params)
2755 {
2756 	struct survey_info *survey;
2757 	int idx;
2758 
2759 	idx = freq_to_idx(ar, params->freq);
2760 	if (idx >= ARRAY_SIZE(ar->survey)) {
2761 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2762 			    params->freq, idx);
2763 		return;
2764 	}
2765 
2766 	if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2767 		if (ar->ch_info_can_report_survey) {
2768 			survey = &ar->survey[idx];
2769 			survey->noise = params->noise_floor;
2770 			survey->filled = SURVEY_INFO_NOISE_DBM;
2771 
2772 			ath10k_hw_fill_survey_time(ar,
2773 						   survey,
2774 						   params->cycle_count,
2775 						   params->rx_clear_count,
2776 						   ar->survey_last_cycle_count,
2777 						   ar->survey_last_rx_clear_count);
2778 		}
2779 
2780 		ar->ch_info_can_report_survey = false;
2781 	} else {
2782 		ar->ch_info_can_report_survey = true;
2783 	}
2784 
2785 	if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
2786 		ar->survey_last_rx_clear_count = params->rx_clear_count;
2787 		ar->survey_last_cycle_count = params->cycle_count;
2788 	}
2789 }
2790 
ath10k_wmi_event_chan_info(struct ath10k * ar,struct sk_buff * skb)2791 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
2792 {
2793 	struct chan_info_params ch_info_param;
2794 	struct wmi_ch_info_ev_arg arg = {};
2795 	int ret;
2796 
2797 	ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
2798 	if (ret) {
2799 		ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
2800 		return;
2801 	}
2802 
2803 	ch_info_param.err_code = __le32_to_cpu(arg.err_code);
2804 	ch_info_param.freq = __le32_to_cpu(arg.freq);
2805 	ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
2806 	ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
2807 	ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
2808 	ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
2809 	ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
2810 
2811 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2812 		   "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
2813 		   ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
2814 		   ch_info_param.noise_floor, ch_info_param.rx_clear_count,
2815 		   ch_info_param.cycle_count);
2816 
2817 	spin_lock_bh(&ar->data_lock);
2818 
2819 	switch (ar->scan.state) {
2820 	case ATH10K_SCAN_IDLE:
2821 	case ATH10K_SCAN_STARTING:
2822 		ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
2823 		goto exit;
2824 	case ATH10K_SCAN_RUNNING:
2825 	case ATH10K_SCAN_ABORTING:
2826 		break;
2827 	}
2828 
2829 	if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
2830 		     ar->running_fw->fw_file.fw_features))
2831 		ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
2832 	else
2833 		ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
2834 
2835 exit:
2836 	spin_unlock_bh(&ar->data_lock);
2837 }
2838 
ath10k_wmi_event_echo(struct ath10k * ar,struct sk_buff * skb)2839 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
2840 {
2841 	struct wmi_echo_ev_arg arg = {};
2842 	int ret;
2843 
2844 	ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
2845 	if (ret) {
2846 		ath10k_warn(ar, "failed to parse echo: %d\n", ret);
2847 		return;
2848 	}
2849 
2850 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2851 		   "wmi event echo value 0x%08x\n",
2852 		   le32_to_cpu(arg.value));
2853 
2854 	if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
2855 		complete(&ar->wmi.barrier);
2856 }
2857 
ath10k_wmi_event_debug_mesg(struct ath10k * ar,struct sk_buff * skb)2858 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
2859 {
2860 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
2861 		   skb->len);
2862 
2863 	trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
2864 
2865 	return 0;
2866 }
2867 
ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base * src,struct ath10k_fw_stats_pdev * dst)2868 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
2869 				     struct ath10k_fw_stats_pdev *dst)
2870 {
2871 	dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
2872 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
2873 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
2874 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
2875 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
2876 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
2877 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
2878 }
2879 
ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2880 void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
2881 				   struct ath10k_fw_stats_pdev *dst)
2882 {
2883 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
2884 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2885 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2886 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2887 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2888 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
2889 	dst->local_freed = __le32_to_cpu(src->local_freed);
2890 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
2891 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2892 	dst->underrun = __le32_to_cpu(src->underrun);
2893 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
2894 	dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2895 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
2896 	dst->data_rc = __le32_to_cpu(src->data_rc);
2897 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
2898 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2899 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2900 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2901 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2902 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2903 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2904 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2905 }
2906 
2907 static void
ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2908 ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
2909 				   struct ath10k_fw_stats_pdev *dst)
2910 {
2911 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
2912 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2913 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2914 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2915 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2916 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
2917 	dst->local_freed = __le32_to_cpu(src->local_freed);
2918 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
2919 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2920 	dst->underrun = __le32_to_cpu(src->underrun);
2921 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
2922 	dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2923 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
2924 	dst->data_rc = __le32_to_cpu(src->data_rc);
2925 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
2926 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2927 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2928 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2929 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2930 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2931 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2932 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2933 	dst->hw_paused = __le32_to_cpu(src->hw_paused);
2934 	dst->seq_posted = __le32_to_cpu(src->seq_posted);
2935 	dst->seq_failed_queueing =
2936 		__le32_to_cpu(src->seq_failed_queueing);
2937 	dst->seq_completed = __le32_to_cpu(src->seq_completed);
2938 	dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
2939 	dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
2940 	dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
2941 	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2942 	dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
2943 	dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
2944 	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2945 	dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
2946 }
2947 
ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx * src,struct ath10k_fw_stats_pdev * dst)2948 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
2949 				   struct ath10k_fw_stats_pdev *dst)
2950 {
2951 	dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
2952 	dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
2953 	dst->r0_frags = __le32_to_cpu(src->r0_frags);
2954 	dst->r1_frags = __le32_to_cpu(src->r1_frags);
2955 	dst->r2_frags = __le32_to_cpu(src->r2_frags);
2956 	dst->r3_frags = __le32_to_cpu(src->r3_frags);
2957 	dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
2958 	dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
2959 	dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
2960 	dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
2961 	dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
2962 	dst->phy_errs = __le32_to_cpu(src->phy_errs);
2963 	dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
2964 	dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
2965 }
2966 
ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra * src,struct ath10k_fw_stats_pdev * dst)2967 void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
2968 				      struct ath10k_fw_stats_pdev *dst)
2969 {
2970 	dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
2971 	dst->rts_bad = __le32_to_cpu(src->rts_bad);
2972 	dst->rts_good = __le32_to_cpu(src->rts_good);
2973 	dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
2974 	dst->no_beacons = __le32_to_cpu(src->no_beacons);
2975 	dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
2976 }
2977 
ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats * src,struct ath10k_fw_stats_peer * dst)2978 void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
2979 				struct ath10k_fw_stats_peer *dst)
2980 {
2981 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2982 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2983 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2984 }
2985 
2986 static void
ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats * src,struct ath10k_fw_stats_peer * dst)2987 ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
2988 				struct ath10k_fw_stats_peer *dst)
2989 {
2990 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2991 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2992 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2993 	dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2994 }
2995 
2996 static void
ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd * src,struct ath10k_fw_stats_vdev_extd * dst)2997 ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
2998 				struct ath10k_fw_stats_vdev_extd *dst)
2999 {
3000 	dst->vdev_id = __le32_to_cpu(src->vdev_id);
3001 	dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
3002 	dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
3003 	dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
3004 	dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
3005 	dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
3006 	dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
3007 	dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
3008 	dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
3009 	dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
3010 	dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
3011 	dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
3012 	dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
3013 	dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
3014 	dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
3015 	dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
3016 }
3017 
ath10k_wmi_main_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3018 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
3019 					    struct sk_buff *skb,
3020 					    struct ath10k_fw_stats *stats)
3021 {
3022 	const struct wmi_stats_event *ev = (void *)skb->data;
3023 	u32 num_pdev_stats, num_peer_stats;
3024 	int i;
3025 
3026 	if (!skb_pull(skb, sizeof(*ev)))
3027 		return -EPROTO;
3028 
3029 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3030 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3031 
3032 	for (i = 0; i < num_pdev_stats; i++) {
3033 		const struct wmi_pdev_stats *src;
3034 		struct ath10k_fw_stats_pdev *dst;
3035 
3036 		src = (void *)skb->data;
3037 		if (!skb_pull(skb, sizeof(*src)))
3038 			return -EPROTO;
3039 
3040 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3041 		if (!dst)
3042 			continue;
3043 
3044 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3045 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3046 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3047 
3048 		list_add_tail(&dst->list, &stats->pdevs);
3049 	}
3050 
3051 	/* fw doesn't implement vdev stats */
3052 
3053 	for (i = 0; i < num_peer_stats; i++) {
3054 		const struct wmi_peer_stats *src;
3055 		struct ath10k_fw_stats_peer *dst;
3056 
3057 		src = (void *)skb->data;
3058 		if (!skb_pull(skb, sizeof(*src)))
3059 			return -EPROTO;
3060 
3061 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3062 		if (!dst)
3063 			continue;
3064 
3065 		ath10k_wmi_pull_peer_stats(src, dst);
3066 		list_add_tail(&dst->list, &stats->peers);
3067 	}
3068 
3069 	return 0;
3070 }
3071 
ath10k_wmi_10x_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3072 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
3073 					   struct sk_buff *skb,
3074 					   struct ath10k_fw_stats *stats)
3075 {
3076 	const struct wmi_stats_event *ev = (void *)skb->data;
3077 	u32 num_pdev_stats, num_peer_stats;
3078 	int i;
3079 
3080 	if (!skb_pull(skb, sizeof(*ev)))
3081 		return -EPROTO;
3082 
3083 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3084 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3085 
3086 	for (i = 0; i < num_pdev_stats; i++) {
3087 		const struct wmi_10x_pdev_stats *src;
3088 		struct ath10k_fw_stats_pdev *dst;
3089 
3090 		src = (void *)skb->data;
3091 		if (!skb_pull(skb, sizeof(*src)))
3092 			return -EPROTO;
3093 
3094 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3095 		if (!dst)
3096 			continue;
3097 
3098 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3099 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3100 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3101 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3102 
3103 		list_add_tail(&dst->list, &stats->pdevs);
3104 	}
3105 
3106 	/* fw doesn't implement vdev stats */
3107 
3108 	for (i = 0; i < num_peer_stats; i++) {
3109 		const struct wmi_10x_peer_stats *src;
3110 		struct ath10k_fw_stats_peer *dst;
3111 
3112 		src = (void *)skb->data;
3113 		if (!skb_pull(skb, sizeof(*src)))
3114 			return -EPROTO;
3115 
3116 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3117 		if (!dst)
3118 			continue;
3119 
3120 		ath10k_wmi_pull_peer_stats(&src->old, dst);
3121 
3122 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3123 
3124 		list_add_tail(&dst->list, &stats->peers);
3125 	}
3126 
3127 	return 0;
3128 }
3129 
ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3130 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
3131 					    struct sk_buff *skb,
3132 					    struct ath10k_fw_stats *stats)
3133 {
3134 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3135 	u32 num_pdev_stats;
3136 	u32 num_pdev_ext_stats;
3137 	u32 num_peer_stats;
3138 	int i;
3139 
3140 	if (!skb_pull(skb, sizeof(*ev)))
3141 		return -EPROTO;
3142 
3143 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3144 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3145 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3146 
3147 	for (i = 0; i < num_pdev_stats; i++) {
3148 		const struct wmi_10_2_pdev_stats *src;
3149 		struct ath10k_fw_stats_pdev *dst;
3150 
3151 		src = (void *)skb->data;
3152 		if (!skb_pull(skb, sizeof(*src)))
3153 			return -EPROTO;
3154 
3155 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3156 		if (!dst)
3157 			continue;
3158 
3159 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3160 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3161 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3162 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3163 		/* FIXME: expose 10.2 specific values */
3164 
3165 		list_add_tail(&dst->list, &stats->pdevs);
3166 	}
3167 
3168 	for (i = 0; i < num_pdev_ext_stats; i++) {
3169 		const struct wmi_10_2_pdev_ext_stats *src;
3170 
3171 		src = (void *)skb->data;
3172 		if (!skb_pull(skb, sizeof(*src)))
3173 			return -EPROTO;
3174 
3175 		/* FIXME: expose values to userspace
3176 		 *
3177 		 * Note: Even though this loop seems to do nothing it is
3178 		 * required to parse following sub-structures properly.
3179 		 */
3180 	}
3181 
3182 	/* fw doesn't implement vdev stats */
3183 
3184 	for (i = 0; i < num_peer_stats; i++) {
3185 		const struct wmi_10_2_peer_stats *src;
3186 		struct ath10k_fw_stats_peer *dst;
3187 
3188 		src = (void *)skb->data;
3189 		if (!skb_pull(skb, sizeof(*src)))
3190 			return -EPROTO;
3191 
3192 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3193 		if (!dst)
3194 			continue;
3195 
3196 		ath10k_wmi_pull_peer_stats(&src->old, dst);
3197 
3198 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3199 		/* FIXME: expose 10.2 specific values */
3200 
3201 		list_add_tail(&dst->list, &stats->peers);
3202 	}
3203 
3204 	return 0;
3205 }
3206 
ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3207 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
3208 					      struct sk_buff *skb,
3209 					      struct ath10k_fw_stats *stats)
3210 {
3211 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3212 	u32 num_pdev_stats;
3213 	u32 num_pdev_ext_stats;
3214 	u32 num_peer_stats;
3215 	int i;
3216 
3217 	if (!skb_pull(skb, sizeof(*ev)))
3218 		return -EPROTO;
3219 
3220 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3221 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3222 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3223 
3224 	for (i = 0; i < num_pdev_stats; i++) {
3225 		const struct wmi_10_2_pdev_stats *src;
3226 		struct ath10k_fw_stats_pdev *dst;
3227 
3228 		src = (void *)skb->data;
3229 		if (!skb_pull(skb, sizeof(*src)))
3230 			return -EPROTO;
3231 
3232 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3233 		if (!dst)
3234 			continue;
3235 
3236 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3237 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3238 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3239 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3240 		/* FIXME: expose 10.2 specific values */
3241 
3242 		list_add_tail(&dst->list, &stats->pdevs);
3243 	}
3244 
3245 	for (i = 0; i < num_pdev_ext_stats; i++) {
3246 		const struct wmi_10_2_pdev_ext_stats *src;
3247 
3248 		src = (void *)skb->data;
3249 		if (!skb_pull(skb, sizeof(*src)))
3250 			return -EPROTO;
3251 
3252 		/* FIXME: expose values to userspace
3253 		 *
3254 		 * Note: Even though this loop seems to do nothing it is
3255 		 * required to parse following sub-structures properly.
3256 		 */
3257 	}
3258 
3259 	/* fw doesn't implement vdev stats */
3260 
3261 	for (i = 0; i < num_peer_stats; i++) {
3262 		const struct wmi_10_2_4_ext_peer_stats *src;
3263 		struct ath10k_fw_stats_peer *dst;
3264 		int stats_len;
3265 
3266 		if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
3267 			stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
3268 		else
3269 			stats_len = sizeof(struct wmi_10_2_4_peer_stats);
3270 
3271 		src = (void *)skb->data;
3272 		if (!skb_pull(skb, stats_len))
3273 			return -EPROTO;
3274 
3275 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3276 		if (!dst)
3277 			continue;
3278 
3279 		ath10k_wmi_pull_peer_stats(&src->common.old, dst);
3280 
3281 		dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
3282 
3283 		if (ath10k_peer_stats_enabled(ar))
3284 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
3285 		/* FIXME: expose 10.2 specific values */
3286 
3287 		list_add_tail(&dst->list, &stats->peers);
3288 	}
3289 
3290 	return 0;
3291 }
3292 
ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3293 static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
3294 					    struct sk_buff *skb,
3295 					    struct ath10k_fw_stats *stats)
3296 {
3297 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3298 	u32 num_pdev_stats;
3299 	u32 num_pdev_ext_stats;
3300 	u32 num_vdev_stats;
3301 	u32 num_peer_stats;
3302 	u32 num_bcnflt_stats;
3303 	u32 stats_id;
3304 	int i;
3305 
3306 	if (!skb_pull(skb, sizeof(*ev)))
3307 		return -EPROTO;
3308 
3309 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3310 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3311 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
3312 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3313 	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
3314 	stats_id = __le32_to_cpu(ev->stats_id);
3315 
3316 	for (i = 0; i < num_pdev_stats; i++) {
3317 		const struct wmi_10_4_pdev_stats *src;
3318 		struct ath10k_fw_stats_pdev *dst;
3319 
3320 		src = (void *)skb->data;
3321 		if (!skb_pull(skb, sizeof(*src)))
3322 			return -EPROTO;
3323 
3324 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3325 		if (!dst)
3326 			continue;
3327 
3328 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3329 		ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
3330 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3331 		dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
3332 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3333 
3334 		list_add_tail(&dst->list, &stats->pdevs);
3335 	}
3336 
3337 	for (i = 0; i < num_pdev_ext_stats; i++) {
3338 		const struct wmi_10_2_pdev_ext_stats *src;
3339 
3340 		src = (void *)skb->data;
3341 		if (!skb_pull(skb, sizeof(*src)))
3342 			return -EPROTO;
3343 
3344 		/* FIXME: expose values to userspace
3345 		 *
3346 		 * Note: Even though this loop seems to do nothing it is
3347 		 * required to parse following sub-structures properly.
3348 		 */
3349 	}
3350 
3351 	for (i = 0; i < num_vdev_stats; i++) {
3352 		const struct wmi_vdev_stats *src;
3353 
3354 		/* Ignore vdev stats here as it has only vdev id. Actual vdev
3355 		 * stats will be retrieved from vdev extended stats.
3356 		 */
3357 		src = (void *)skb->data;
3358 		if (!skb_pull(skb, sizeof(*src)))
3359 			return -EPROTO;
3360 	}
3361 
3362 	for (i = 0; i < num_peer_stats; i++) {
3363 		const struct wmi_10_4_peer_stats *src;
3364 		struct ath10k_fw_stats_peer *dst;
3365 
3366 		src = (void *)skb->data;
3367 		if (!skb_pull(skb, sizeof(*src)))
3368 			return -EPROTO;
3369 
3370 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3371 		if (!dst)
3372 			continue;
3373 
3374 		ath10k_wmi_10_4_pull_peer_stats(src, dst);
3375 		list_add_tail(&dst->list, &stats->peers);
3376 	}
3377 
3378 	for (i = 0; i < num_bcnflt_stats; i++) {
3379 		const struct wmi_10_4_bss_bcn_filter_stats *src;
3380 
3381 		src = (void *)skb->data;
3382 		if (!skb_pull(skb, sizeof(*src)))
3383 			return -EPROTO;
3384 
3385 		/* FIXME: expose values to userspace
3386 		 *
3387 		 * Note: Even though this loop seems to do nothing it is
3388 		 * required to parse following sub-structures properly.
3389 		 */
3390 	}
3391 
3392 	if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
3393 		stats->extended = true;
3394 
3395 		for (i = 0; i < num_peer_stats; i++) {
3396 			const struct wmi_10_4_peer_extd_stats *src;
3397 			struct ath10k_fw_extd_stats_peer *dst;
3398 
3399 			src = (void *)skb->data;
3400 			if (!skb_pull(skb, sizeof(*src)))
3401 				return -EPROTO;
3402 
3403 			dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3404 			if (!dst)
3405 				continue;
3406 
3407 			ether_addr_copy(dst->peer_macaddr,
3408 					src->peer_macaddr.addr);
3409 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
3410 			list_add_tail(&dst->list, &stats->peers_extd);
3411 		}
3412 	}
3413 
3414 	if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
3415 		for (i = 0; i < num_vdev_stats; i++) {
3416 			const struct wmi_vdev_stats_extd *src;
3417 			struct ath10k_fw_stats_vdev_extd *dst;
3418 
3419 			src = (void *)skb->data;
3420 			if (!skb_pull(skb, sizeof(*src)))
3421 				return -EPROTO;
3422 
3423 			dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3424 			if (!dst)
3425 				continue;
3426 			ath10k_wmi_10_4_pull_vdev_stats(src, dst);
3427 			list_add_tail(&dst->list, &stats->vdevs);
3428 		}
3429 	}
3430 
3431 	return 0;
3432 }
3433 
ath10k_wmi_event_update_stats(struct ath10k * ar,struct sk_buff * skb)3434 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
3435 {
3436 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
3437 	ath10k_debug_fw_stats_process(ar, skb);
3438 }
3439 
3440 static int
ath10k_wmi_op_pull_vdev_start_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_vdev_start_ev_arg * arg)3441 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
3442 				 struct wmi_vdev_start_ev_arg *arg)
3443 {
3444 	struct wmi_vdev_start_response_event *ev = (void *)skb->data;
3445 
3446 	if (skb->len < sizeof(*ev))
3447 		return -EPROTO;
3448 
3449 	skb_pull(skb, sizeof(*ev));
3450 	arg->vdev_id = ev->vdev_id;
3451 	arg->req_id = ev->req_id;
3452 	arg->resp_type = ev->resp_type;
3453 	arg->status = ev->status;
3454 
3455 	return 0;
3456 }
3457 
ath10k_wmi_event_vdev_start_resp(struct ath10k * ar,struct sk_buff * skb)3458 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
3459 {
3460 	struct wmi_vdev_start_ev_arg arg = {};
3461 	int ret;
3462 	u32 status;
3463 
3464 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
3465 
3466 	ar->last_wmi_vdev_start_status = 0;
3467 
3468 	ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
3469 	if (ret) {
3470 		ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
3471 		ar->last_wmi_vdev_start_status = ret;
3472 		goto out;
3473 	}
3474 
3475 	status = __le32_to_cpu(arg.status);
3476 	if (WARN_ON_ONCE(status)) {
3477 		ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
3478 			    status, (status == WMI_VDEV_START_CHAN_INVALID) ?
3479 			    "chan-invalid" : "unknown");
3480 		/* Setup is done one way or another though, so we should still
3481 		 * do the completion, so don't return here.
3482 		 */
3483 		ar->last_wmi_vdev_start_status = -EINVAL;
3484 	}
3485 
3486 out:
3487 	complete(&ar->vdev_setup_done);
3488 }
3489 
ath10k_wmi_event_vdev_stopped(struct ath10k * ar,struct sk_buff * skb)3490 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
3491 {
3492 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
3493 	complete(&ar->vdev_setup_done);
3494 }
3495 
3496 static int
ath10k_wmi_op_pull_peer_kick_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_peer_kick_ev_arg * arg)3497 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
3498 				struct wmi_peer_kick_ev_arg *arg)
3499 {
3500 	struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
3501 
3502 	if (skb->len < sizeof(*ev))
3503 		return -EPROTO;
3504 
3505 	skb_pull(skb, sizeof(*ev));
3506 	arg->mac_addr = ev->peer_macaddr.addr;
3507 
3508 	return 0;
3509 }
3510 
ath10k_wmi_event_peer_sta_kickout(struct ath10k * ar,struct sk_buff * skb)3511 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
3512 {
3513 	struct wmi_peer_kick_ev_arg arg = {};
3514 	struct ieee80211_sta *sta;
3515 	int ret;
3516 
3517 	ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
3518 	if (ret) {
3519 		ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
3520 			    ret);
3521 		return;
3522 	}
3523 
3524 	ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n",
3525 		   arg.mac_addr);
3526 
3527 	rcu_read_lock();
3528 
3529 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
3530 	if (!sta) {
3531 		ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
3532 			    arg.mac_addr);
3533 		goto exit;
3534 	}
3535 
3536 	ieee80211_report_low_ack(sta, 10);
3537 
3538 exit:
3539 	rcu_read_unlock();
3540 }
3541 
3542 /*
3543  * FIXME
3544  *
3545  * We don't report to mac80211 sleep state of connected
3546  * stations. Due to this mac80211 can't fill in TIM IE
3547  * correctly.
3548  *
3549  * I know of no way of getting nullfunc frames that contain
3550  * sleep transition from connected stations - these do not
3551  * seem to be sent from the target to the host. There also
3552  * doesn't seem to be a dedicated event for that. So the
3553  * only way left to do this would be to read tim_bitmap
3554  * during SWBA.
3555  *
3556  * We could probably try using tim_bitmap from SWBA to tell
3557  * mac80211 which stations are asleep and which are not. The
3558  * problem here is calling mac80211 functions so many times
3559  * could take too long and make us miss the time to submit
3560  * the beacon to the target.
3561  *
3562  * So as a workaround we try to extend the TIM IE if there
3563  * is unicast buffered for stations with aid > 7 and fill it
3564  * in ourselves.
3565  */
ath10k_wmi_update_tim(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_tim_info_arg * tim_info)3566 static void ath10k_wmi_update_tim(struct ath10k *ar,
3567 				  struct ath10k_vif *arvif,
3568 				  struct sk_buff *bcn,
3569 				  const struct wmi_tim_info_arg *tim_info)
3570 {
3571 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
3572 	struct ieee80211_tim_ie *tim;
3573 	u8 *ies, *ie;
3574 	u8 ie_len, pvm_len;
3575 	__le32 t;
3576 	u32 v, tim_len;
3577 
3578 	/* When FW reports 0 in tim_len, ensure at least first byte
3579 	 * in tim_bitmap is considered for pvm calculation.
3580 	 */
3581 	tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
3582 
3583 	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
3584 	 * we must copy the bitmap upon change and reuse it later
3585 	 */
3586 	if (__le32_to_cpu(tim_info->tim_changed)) {
3587 		int i;
3588 
3589 		if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
3590 			ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
3591 				    tim_len, sizeof(arvif->u.ap.tim_bitmap));
3592 			tim_len = sizeof(arvif->u.ap.tim_bitmap);
3593 		}
3594 
3595 		for (i = 0; i < tim_len; i++) {
3596 			t = tim_info->tim_bitmap[i / 4];
3597 			v = __le32_to_cpu(t);
3598 			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
3599 		}
3600 
3601 		/* FW reports either length 0 or length based on max supported
3602 		 * station. so we calculate this on our own
3603 		 */
3604 		arvif->u.ap.tim_len = 0;
3605 		for (i = 0; i < tim_len; i++)
3606 			if (arvif->u.ap.tim_bitmap[i])
3607 				arvif->u.ap.tim_len = i;
3608 
3609 		arvif->u.ap.tim_len++;
3610 	}
3611 
3612 	ies = bcn->data;
3613 	ies += ieee80211_hdrlen(hdr->frame_control);
3614 	ies += 12; /* fixed parameters */
3615 
3616 	ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
3617 				    (u8 *)skb_tail_pointer(bcn) - ies);
3618 	if (!ie) {
3619 		if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
3620 			ath10k_warn(ar, "no tim ie found;\n");
3621 		return;
3622 	}
3623 
3624 	tim = (void *)ie + 2;
3625 	ie_len = ie[1];
3626 	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
3627 
3628 	if (pvm_len < arvif->u.ap.tim_len) {
3629 		int expand_size = tim_len - pvm_len;
3630 		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
3631 		void *next_ie = ie + 2 + ie_len;
3632 
3633 		if (skb_put(bcn, expand_size)) {
3634 			memmove(next_ie + expand_size, next_ie, move_size);
3635 
3636 			ie[1] += expand_size;
3637 			ie_len += expand_size;
3638 			pvm_len += expand_size;
3639 		} else {
3640 			ath10k_warn(ar, "tim expansion failed\n");
3641 		}
3642 	}
3643 
3644 	if (pvm_len > tim_len) {
3645 		ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
3646 		return;
3647 	}
3648 
3649 	tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
3650 	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
3651 
3652 	if (tim->dtim_count == 0) {
3653 		ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
3654 
3655 		if (__le32_to_cpu(tim_info->tim_mcast) == 1)
3656 			ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
3657 	}
3658 
3659 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
3660 		   tim->dtim_count, tim->dtim_period,
3661 		   tim->bitmap_ctrl, pvm_len);
3662 }
3663 
ath10k_wmi_update_noa(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_p2p_noa_info * noa)3664 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
3665 				  struct sk_buff *bcn,
3666 				  const struct wmi_p2p_noa_info *noa)
3667 {
3668 	if (!arvif->vif->p2p)
3669 		return;
3670 
3671 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
3672 
3673 	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
3674 		ath10k_p2p_noa_update(arvif, noa);
3675 
3676 	if (arvif->u.ap.noa_data)
3677 		if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
3678 			skb_put_data(bcn, arvif->u.ap.noa_data,
3679 				     arvif->u.ap.noa_len);
3680 }
3681 
ath10k_wmi_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3682 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
3683 				      struct wmi_swba_ev_arg *arg)
3684 {
3685 	struct wmi_host_swba_event *ev = (void *)skb->data;
3686 	u32 map;
3687 	size_t i;
3688 
3689 	if (skb->len < sizeof(*ev))
3690 		return -EPROTO;
3691 
3692 	skb_pull(skb, sizeof(*ev));
3693 	arg->vdev_map = ev->vdev_map;
3694 
3695 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3696 		if (!(map & BIT(0)))
3697 			continue;
3698 
3699 		/* If this happens there were some changes in firmware and
3700 		 * ath10k should update the max size of tim_info array.
3701 		 */
3702 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3703 			break;
3704 
3705 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3706 		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3707 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3708 			return -EPROTO;
3709 		}
3710 
3711 		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3712 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3713 		arg->tim_info[i].tim_bitmap =
3714 				ev->bcn_info[i].tim_info.tim_bitmap;
3715 		arg->tim_info[i].tim_changed =
3716 				ev->bcn_info[i].tim_info.tim_changed;
3717 		arg->tim_info[i].tim_num_ps_pending =
3718 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3719 
3720 		arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
3721 		i++;
3722 	}
3723 
3724 	return 0;
3725 }
3726 
ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3727 static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
3728 					     struct sk_buff *skb,
3729 					     struct wmi_swba_ev_arg *arg)
3730 {
3731 	struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
3732 	u32 map;
3733 	size_t i;
3734 
3735 	if (skb->len < sizeof(*ev))
3736 		return -EPROTO;
3737 
3738 	skb_pull(skb, sizeof(*ev));
3739 	arg->vdev_map = ev->vdev_map;
3740 
3741 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3742 		if (!(map & BIT(0)))
3743 			continue;
3744 
3745 		/* If this happens there were some changes in firmware and
3746 		 * ath10k should update the max size of tim_info array.
3747 		 */
3748 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3749 			break;
3750 
3751 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3752 		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3753 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3754 			return -EPROTO;
3755 		}
3756 
3757 		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3758 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3759 		arg->tim_info[i].tim_bitmap =
3760 				ev->bcn_info[i].tim_info.tim_bitmap;
3761 		arg->tim_info[i].tim_changed =
3762 				ev->bcn_info[i].tim_info.tim_changed;
3763 		arg->tim_info[i].tim_num_ps_pending =
3764 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3765 		i++;
3766 	}
3767 
3768 	return 0;
3769 }
3770 
ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3771 static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
3772 					   struct sk_buff *skb,
3773 					   struct wmi_swba_ev_arg *arg)
3774 {
3775 	struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
3776 	u32 map, tim_len;
3777 	size_t i;
3778 
3779 	if (skb->len < sizeof(*ev))
3780 		return -EPROTO;
3781 
3782 	skb_pull(skb, sizeof(*ev));
3783 	arg->vdev_map = ev->vdev_map;
3784 
3785 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3786 		if (!(map & BIT(0)))
3787 			continue;
3788 
3789 		/* If this happens there were some changes in firmware and
3790 		 * ath10k should update the max size of tim_info array.
3791 		 */
3792 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3793 			break;
3794 
3795 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3796 		      sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3797 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3798 			return -EPROTO;
3799 		}
3800 
3801 		tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
3802 		if (tim_len) {
3803 			/* Exclude 4 byte guard length */
3804 			tim_len -= 4;
3805 			arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
3806 		} else {
3807 			arg->tim_info[i].tim_len = 0;
3808 		}
3809 
3810 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3811 		arg->tim_info[i].tim_bitmap =
3812 				ev->bcn_info[i].tim_info.tim_bitmap;
3813 		arg->tim_info[i].tim_changed =
3814 				ev->bcn_info[i].tim_info.tim_changed;
3815 		arg->tim_info[i].tim_num_ps_pending =
3816 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3817 
3818 		/* 10.4 firmware doesn't have p2p support. notice of absence
3819 		 * info can be ignored for now.
3820 		 */
3821 
3822 		i++;
3823 	}
3824 
3825 	return 0;
3826 }
3827 
ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k * ar)3828 static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
3829 {
3830 	return WMI_TXBF_CONF_BEFORE_ASSOC;
3831 }
3832 
ath10k_wmi_event_host_swba(struct ath10k * ar,struct sk_buff * skb)3833 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
3834 {
3835 	struct wmi_swba_ev_arg arg = {};
3836 	u32 map;
3837 	int i = -1;
3838 	const struct wmi_tim_info_arg *tim_info;
3839 	const struct wmi_p2p_noa_info *noa_info;
3840 	struct ath10k_vif *arvif;
3841 	struct sk_buff *bcn;
3842 	dma_addr_t paddr;
3843 	int ret, vdev_id = 0;
3844 
3845 	ret = ath10k_wmi_pull_swba(ar, skb, &arg);
3846 	if (ret) {
3847 		ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
3848 		return;
3849 	}
3850 
3851 	map = __le32_to_cpu(arg.vdev_map);
3852 
3853 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
3854 		   map);
3855 
3856 	for (; map; map >>= 1, vdev_id++) {
3857 		if (!(map & 0x1))
3858 			continue;
3859 
3860 		i++;
3861 
3862 		if (i >= WMI_MAX_AP_VDEV) {
3863 			ath10k_warn(ar, "swba has corrupted vdev map\n");
3864 			break;
3865 		}
3866 
3867 		tim_info = &arg.tim_info[i];
3868 		noa_info = arg.noa_info[i];
3869 
3870 		ath10k_dbg(ar, ATH10K_DBG_MGMT,
3871 			   "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
3872 			   i,
3873 			   __le32_to_cpu(tim_info->tim_len),
3874 			   __le32_to_cpu(tim_info->tim_mcast),
3875 			   __le32_to_cpu(tim_info->tim_changed),
3876 			   __le32_to_cpu(tim_info->tim_num_ps_pending),
3877 			   __le32_to_cpu(tim_info->tim_bitmap[3]),
3878 			   __le32_to_cpu(tim_info->tim_bitmap[2]),
3879 			   __le32_to_cpu(tim_info->tim_bitmap[1]),
3880 			   __le32_to_cpu(tim_info->tim_bitmap[0]));
3881 
3882 		/* TODO: Only first 4 word from tim_bitmap is dumped.
3883 		 * Extend debug code to dump full tim_bitmap.
3884 		 */
3885 
3886 		arvif = ath10k_get_arvif(ar, vdev_id);
3887 		if (arvif == NULL) {
3888 			ath10k_warn(ar, "no vif for vdev_id %d found\n",
3889 				    vdev_id);
3890 			continue;
3891 		}
3892 
3893 		/* mac80211 would have already asked us to stop beaconing and
3894 		 * bring the vdev down, so continue in that case
3895 		 */
3896 		if (!arvif->is_up)
3897 			continue;
3898 
3899 		/* There are no completions for beacons so wait for next SWBA
3900 		 * before telling mac80211 to decrement CSA counter
3901 		 *
3902 		 * Once CSA counter is completed stop sending beacons until
3903 		 * actual channel switch is done
3904 		 */
3905 		if (arvif->vif->bss_conf.csa_active &&
3906 		    ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
3907 			ieee80211_csa_finish(arvif->vif);
3908 			continue;
3909 		}
3910 
3911 		bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0);
3912 		if (!bcn) {
3913 			ath10k_warn(ar, "could not get mac80211 beacon\n");
3914 			continue;
3915 		}
3916 
3917 		ath10k_tx_h_seq_no(arvif->vif, bcn);
3918 		ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
3919 		ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
3920 
3921 		spin_lock_bh(&ar->data_lock);
3922 
3923 		if (arvif->beacon) {
3924 			switch (arvif->beacon_state) {
3925 			case ATH10K_BEACON_SENT:
3926 				break;
3927 			case ATH10K_BEACON_SCHEDULED:
3928 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
3929 					    arvif->vdev_id);
3930 				break;
3931 			case ATH10K_BEACON_SENDING:
3932 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
3933 					    arvif->vdev_id);
3934 				dev_kfree_skb(bcn);
3935 				goto skip;
3936 			}
3937 
3938 			ath10k_mac_vif_beacon_free(arvif);
3939 		}
3940 
3941 		if (!arvif->beacon_buf) {
3942 			paddr = dma_map_single(arvif->ar->dev, bcn->data,
3943 					       bcn->len, DMA_TO_DEVICE);
3944 			ret = dma_mapping_error(arvif->ar->dev, paddr);
3945 			if (ret) {
3946 				ath10k_warn(ar, "failed to map beacon: %d\n",
3947 					    ret);
3948 				dev_kfree_skb_any(bcn);
3949 				goto skip;
3950 			}
3951 
3952 			ATH10K_SKB_CB(bcn)->paddr = paddr;
3953 		} else {
3954 			if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
3955 				ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
3956 					    bcn->len, IEEE80211_MAX_FRAME_LEN);
3957 				skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
3958 			}
3959 			memcpy(arvif->beacon_buf, bcn->data, bcn->len);
3960 			ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
3961 		}
3962 
3963 		arvif->beacon = bcn;
3964 		arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
3965 
3966 		trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
3967 		trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
3968 
3969 skip:
3970 		spin_unlock_bh(&ar->data_lock);
3971 	}
3972 
3973 	ath10k_wmi_tx_beacons_nowait(ar);
3974 }
3975 
ath10k_wmi_event_tbttoffset_update(struct ath10k * ar,struct sk_buff * skb)3976 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
3977 {
3978 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
3979 }
3980 
ath10k_radar_detected(struct ath10k * ar)3981 static void ath10k_radar_detected(struct ath10k *ar)
3982 {
3983 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
3984 	ATH10K_DFS_STAT_INC(ar, radar_detected);
3985 
3986 	/* Control radar events reporting in debugfs file
3987 	 * dfs_block_radar_events
3988 	 */
3989 	if (ar->dfs_block_radar_events)
3990 		ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
3991 	else
3992 		ieee80211_radar_detected(ar->hw);
3993 }
3994 
ath10k_radar_confirmation_work(struct work_struct * work)3995 static void ath10k_radar_confirmation_work(struct work_struct *work)
3996 {
3997 	struct ath10k *ar = container_of(work, struct ath10k,
3998 					 radar_confirmation_work);
3999 	struct ath10k_radar_found_info radar_info;
4000 	int ret, time_left;
4001 
4002 	reinit_completion(&ar->wmi.radar_confirm);
4003 
4004 	spin_lock_bh(&ar->data_lock);
4005 	memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
4006 	spin_unlock_bh(&ar->data_lock);
4007 
4008 	ret = ath10k_wmi_report_radar_found(ar, &radar_info);
4009 	if (ret) {
4010 		ath10k_warn(ar, "failed to send radar found %d\n", ret);
4011 		goto wait_complete;
4012 	}
4013 
4014 	time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
4015 						ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
4016 	if (time_left) {
4017 		/* DFS Confirmation status event received and
4018 		 * necessary action completed.
4019 		 */
4020 		goto wait_complete;
4021 	} else {
4022 		/* DFS Confirmation event not received from FW.Considering this
4023 		 * as real radar.
4024 		 */
4025 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4026 			   "dfs confirmation not received from fw, considering as radar\n");
4027 		goto radar_detected;
4028 	}
4029 
4030 radar_detected:
4031 	ath10k_radar_detected(ar);
4032 
4033 	/* Reset state to allow sending confirmation on consecutive radar
4034 	 * detections, unless radar confirmation is disabled/stopped.
4035 	 */
4036 wait_complete:
4037 	spin_lock_bh(&ar->data_lock);
4038 	if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
4039 		ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
4040 	spin_unlock_bh(&ar->data_lock);
4041 }
4042 
ath10k_dfs_radar_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_radar_report * rr,u64 tsf)4043 static void ath10k_dfs_radar_report(struct ath10k *ar,
4044 				    struct wmi_phyerr_ev_arg *phyerr,
4045 				    const struct phyerr_radar_report *rr,
4046 				    u64 tsf)
4047 {
4048 	u32 reg0, reg1, tsf32l;
4049 	struct ieee80211_channel *ch;
4050 	struct pulse_event pe;
4051 	struct radar_detector_specs rs;
4052 	u64 tsf64;
4053 	u8 rssi, width;
4054 	struct ath10k_radar_found_info *radar_info;
4055 
4056 	reg0 = __le32_to_cpu(rr->reg0);
4057 	reg1 = __le32_to_cpu(rr->reg1);
4058 
4059 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4060 		   "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
4061 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
4062 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
4063 		   MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
4064 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
4065 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4066 		   "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
4067 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
4068 		   MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
4069 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
4070 		   MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
4071 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
4072 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4073 		   "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
4074 		   MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
4075 		   MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
4076 
4077 	if (!ar->dfs_detector)
4078 		return;
4079 
4080 	spin_lock_bh(&ar->data_lock);
4081 	ch = ar->rx_channel;
4082 
4083 	/* fetch target operating channel during channel change */
4084 	if (!ch)
4085 		ch = ar->tgt_oper_chan;
4086 
4087 	spin_unlock_bh(&ar->data_lock);
4088 
4089 	if (!ch) {
4090 		ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
4091 		goto radar_detected;
4092 	}
4093 
4094 	/* report event to DFS pattern detector */
4095 	tsf32l = phyerr->tsf_timestamp;
4096 	tsf64 = tsf & (~0xFFFFFFFFULL);
4097 	tsf64 |= tsf32l;
4098 
4099 	width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
4100 	rssi = phyerr->rssi_combined;
4101 
4102 	/* hardware store this as 8 bit signed value,
4103 	 * set to zero if negative number
4104 	 */
4105 	if (rssi & 0x80)
4106 		rssi = 0;
4107 
4108 	pe.ts = tsf64;
4109 	pe.freq = ch->center_freq;
4110 	pe.width = width;
4111 	pe.rssi = rssi;
4112 	pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
4113 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4114 		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
4115 		   pe.freq, pe.width, pe.rssi, pe.ts);
4116 
4117 	ATH10K_DFS_STAT_INC(ar, pulses_detected);
4118 
4119 	if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
4120 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4121 			   "dfs no pulse pattern detected, yet\n");
4122 		return;
4123 	}
4124 
4125 	if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
4126 	    ar->dfs_detector->region == NL80211_DFS_FCC) {
4127 		/* Consecutive radar indications need not be
4128 		 * sent to the firmware until we get confirmation
4129 		 * for the previous detected radar.
4130 		 */
4131 		spin_lock_bh(&ar->data_lock);
4132 		if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
4133 			spin_unlock_bh(&ar->data_lock);
4134 			return;
4135 		}
4136 		ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
4137 		radar_info = &ar->last_radar_info;
4138 
4139 		radar_info->pri_min = rs.pri_min;
4140 		radar_info->pri_max = rs.pri_max;
4141 		radar_info->width_min = rs.width_min;
4142 		radar_info->width_max = rs.width_max;
4143 		/*TODO Find sidx_min and sidx_max */
4144 		radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4145 		radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4146 
4147 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4148 			   "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
4149 			   radar_info->pri_min, radar_info->pri_max,
4150 			   radar_info->width_min, radar_info->width_max,
4151 			   radar_info->sidx_min, radar_info->sidx_max);
4152 		ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
4153 		spin_unlock_bh(&ar->data_lock);
4154 		return;
4155 	}
4156 
4157 radar_detected:
4158 	ath10k_radar_detected(ar);
4159 }
4160 
ath10k_dfs_fft_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_fft_report * fftr,u64 tsf)4161 static int ath10k_dfs_fft_report(struct ath10k *ar,
4162 				 struct wmi_phyerr_ev_arg *phyerr,
4163 				 const struct phyerr_fft_report *fftr,
4164 				 u64 tsf)
4165 {
4166 	u32 reg0, reg1;
4167 	u8 rssi, peak_mag;
4168 
4169 	reg0 = __le32_to_cpu(fftr->reg0);
4170 	reg1 = __le32_to_cpu(fftr->reg1);
4171 	rssi = phyerr->rssi_combined;
4172 
4173 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4174 		   "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
4175 		   MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
4176 		   MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
4177 		   MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
4178 		   MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
4179 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4180 		   "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
4181 		   MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
4182 		   MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
4183 		   MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
4184 		   MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
4185 
4186 	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
4187 
4188 	/* false event detection */
4189 	if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
4190 	    peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
4191 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
4192 		ATH10K_DFS_STAT_INC(ar, pulses_discarded);
4193 		return -EINVAL;
4194 	}
4195 
4196 	return 0;
4197 }
4198 
ath10k_wmi_event_dfs(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4199 void ath10k_wmi_event_dfs(struct ath10k *ar,
4200 			  struct wmi_phyerr_ev_arg *phyerr,
4201 			  u64 tsf)
4202 {
4203 	int buf_len, tlv_len, res, i = 0;
4204 	const struct phyerr_tlv *tlv;
4205 	const struct phyerr_radar_report *rr;
4206 	const struct phyerr_fft_report *fftr;
4207 	const u8 *tlv_buf;
4208 
4209 	buf_len = phyerr->buf_len;
4210 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4211 		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
4212 		   phyerr->phy_err_code, phyerr->rssi_combined,
4213 		   phyerr->tsf_timestamp, tsf, buf_len);
4214 
4215 	/* Skip event if DFS disabled */
4216 	if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
4217 		return;
4218 
4219 	ATH10K_DFS_STAT_INC(ar, pulses_total);
4220 
4221 	while (i < buf_len) {
4222 		if (i + sizeof(*tlv) > buf_len) {
4223 			ath10k_warn(ar, "too short buf for tlv header (%d)\n",
4224 				    i);
4225 			return;
4226 		}
4227 
4228 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4229 		tlv_len = __le16_to_cpu(tlv->len);
4230 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4231 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4232 			   "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
4233 			   tlv_len, tlv->tag, tlv->sig);
4234 
4235 		switch (tlv->tag) {
4236 		case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
4237 			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
4238 				ath10k_warn(ar, "too short radar pulse summary (%d)\n",
4239 					    i);
4240 				return;
4241 			}
4242 
4243 			rr = (struct phyerr_radar_report *)tlv_buf;
4244 			ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
4245 			break;
4246 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4247 			if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
4248 				ath10k_warn(ar, "too short fft report (%d)\n",
4249 					    i);
4250 				return;
4251 			}
4252 
4253 			fftr = (struct phyerr_fft_report *)tlv_buf;
4254 			res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
4255 			if (res)
4256 				return;
4257 			break;
4258 		}
4259 
4260 		i += sizeof(*tlv) + tlv_len;
4261 	}
4262 }
4263 
ath10k_wmi_event_spectral_scan(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4264 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4265 				    struct wmi_phyerr_ev_arg *phyerr,
4266 				    u64 tsf)
4267 {
4268 	int buf_len, tlv_len, res, i = 0;
4269 	struct phyerr_tlv *tlv;
4270 	const void *tlv_buf;
4271 	const struct phyerr_fft_report *fftr;
4272 	size_t fftr_len;
4273 
4274 	buf_len = phyerr->buf_len;
4275 
4276 	while (i < buf_len) {
4277 		if (i + sizeof(*tlv) > buf_len) {
4278 			ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
4279 				    i);
4280 			return;
4281 		}
4282 
4283 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4284 		tlv_len = __le16_to_cpu(tlv->len);
4285 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4286 
4287 		if (i + sizeof(*tlv) + tlv_len > buf_len) {
4288 			ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
4289 				    i);
4290 			return;
4291 		}
4292 
4293 		switch (tlv->tag) {
4294 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4295 			if (sizeof(*fftr) > tlv_len) {
4296 				ath10k_warn(ar, "failed to parse fft report at byte %d\n",
4297 					    i);
4298 				return;
4299 			}
4300 
4301 			fftr_len = tlv_len - sizeof(*fftr);
4302 			fftr = tlv_buf;
4303 			res = ath10k_spectral_process_fft(ar, phyerr,
4304 							  fftr, fftr_len,
4305 							  tsf);
4306 			if (res < 0) {
4307 				ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
4308 					   res);
4309 				return;
4310 			}
4311 			break;
4312 		}
4313 
4314 		i += sizeof(*tlv) + tlv_len;
4315 	}
4316 }
4317 
ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4318 static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4319 					    struct sk_buff *skb,
4320 					    struct wmi_phyerr_hdr_arg *arg)
4321 {
4322 	struct wmi_phyerr_event *ev = (void *)skb->data;
4323 
4324 	if (skb->len < sizeof(*ev))
4325 		return -EPROTO;
4326 
4327 	arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
4328 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4329 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4330 	arg->buf_len = skb->len - sizeof(*ev);
4331 	arg->phyerrs = ev->phyerrs;
4332 
4333 	return 0;
4334 }
4335 
ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4336 static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4337 						 struct sk_buff *skb,
4338 						 struct wmi_phyerr_hdr_arg *arg)
4339 {
4340 	struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
4341 
4342 	if (skb->len < sizeof(*ev))
4343 		return -EPROTO;
4344 
4345 	/* 10.4 firmware always reports only one phyerr */
4346 	arg->num_phyerrs = 1;
4347 
4348 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4349 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4350 	arg->buf_len = skb->len;
4351 	arg->phyerrs = skb->data;
4352 
4353 	return 0;
4354 }
4355 
ath10k_wmi_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4356 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
4357 				 const void *phyerr_buf,
4358 				 int left_len,
4359 				 struct wmi_phyerr_ev_arg *arg)
4360 {
4361 	const struct wmi_phyerr *phyerr = phyerr_buf;
4362 	int i;
4363 
4364 	if (left_len < sizeof(*phyerr)) {
4365 		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4366 			    left_len, sizeof(*phyerr));
4367 		return -EINVAL;
4368 	}
4369 
4370 	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4371 	arg->freq1 = __le16_to_cpu(phyerr->freq1);
4372 	arg->freq2 = __le16_to_cpu(phyerr->freq2);
4373 	arg->rssi_combined = phyerr->rssi_combined;
4374 	arg->chan_width_mhz = phyerr->chan_width_mhz;
4375 	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4376 	arg->buf = phyerr->buf;
4377 	arg->hdr_len = sizeof(*phyerr);
4378 
4379 	for (i = 0; i < 4; i++)
4380 		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4381 
4382 	switch (phyerr->phy_err_code) {
4383 	case PHY_ERROR_GEN_SPECTRAL_SCAN:
4384 		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4385 		break;
4386 	case PHY_ERROR_GEN_FALSE_RADAR_EXT:
4387 		arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
4388 		break;
4389 	case PHY_ERROR_GEN_RADAR:
4390 		arg->phy_err_code = PHY_ERROR_RADAR;
4391 		break;
4392 	default:
4393 		arg->phy_err_code = PHY_ERROR_UNKNOWN;
4394 		break;
4395 	}
4396 
4397 	return 0;
4398 }
4399 
ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4400 static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
4401 					     const void *phyerr_buf,
4402 					     int left_len,
4403 					     struct wmi_phyerr_ev_arg *arg)
4404 {
4405 	const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
4406 	u32 phy_err_mask;
4407 	int i;
4408 
4409 	if (left_len < sizeof(*phyerr)) {
4410 		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4411 			    left_len, sizeof(*phyerr));
4412 		return -EINVAL;
4413 	}
4414 
4415 	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4416 	arg->freq1 = __le16_to_cpu(phyerr->freq1);
4417 	arg->freq2 = __le16_to_cpu(phyerr->freq2);
4418 	arg->rssi_combined = phyerr->rssi_combined;
4419 	arg->chan_width_mhz = phyerr->chan_width_mhz;
4420 	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4421 	arg->buf = phyerr->buf;
4422 	arg->hdr_len = sizeof(*phyerr);
4423 
4424 	for (i = 0; i < 4; i++)
4425 		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4426 
4427 	phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
4428 
4429 	if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
4430 		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4431 	else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
4432 		arg->phy_err_code = PHY_ERROR_RADAR;
4433 	else
4434 		arg->phy_err_code = PHY_ERROR_UNKNOWN;
4435 
4436 	return 0;
4437 }
4438 
ath10k_wmi_event_phyerr(struct ath10k * ar,struct sk_buff * skb)4439 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
4440 {
4441 	struct wmi_phyerr_hdr_arg hdr_arg = {};
4442 	struct wmi_phyerr_ev_arg phyerr_arg = {};
4443 	const void *phyerr;
4444 	u32 count, i, buf_len, phy_err_code;
4445 	u64 tsf;
4446 	int left_len, ret;
4447 
4448 	ATH10K_DFS_STAT_INC(ar, phy_errors);
4449 
4450 	ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
4451 	if (ret) {
4452 		ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
4453 		return;
4454 	}
4455 
4456 	/* Check number of included events */
4457 	count = hdr_arg.num_phyerrs;
4458 
4459 	left_len = hdr_arg.buf_len;
4460 
4461 	tsf = hdr_arg.tsf_u32;
4462 	tsf <<= 32;
4463 	tsf |= hdr_arg.tsf_l32;
4464 
4465 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4466 		   "wmi event phyerr count %d tsf64 0x%llX\n",
4467 		   count, tsf);
4468 
4469 	phyerr = hdr_arg.phyerrs;
4470 	for (i = 0; i < count; i++) {
4471 		ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
4472 		if (ret) {
4473 			ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
4474 				    i);
4475 			return;
4476 		}
4477 
4478 		left_len -= phyerr_arg.hdr_len;
4479 		buf_len = phyerr_arg.buf_len;
4480 		phy_err_code = phyerr_arg.phy_err_code;
4481 
4482 		if (left_len < buf_len) {
4483 			ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
4484 			return;
4485 		}
4486 
4487 		left_len -= buf_len;
4488 
4489 		switch (phy_err_code) {
4490 		case PHY_ERROR_RADAR:
4491 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4492 			break;
4493 		case PHY_ERROR_SPECTRAL_SCAN:
4494 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4495 			break;
4496 		case PHY_ERROR_FALSE_RADAR_EXT:
4497 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4498 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4499 			break;
4500 		default:
4501 			break;
4502 		}
4503 
4504 		phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
4505 	}
4506 }
4507 
4508 static int
ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_dfs_status_ev_arg * arg)4509 ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
4510 				      struct wmi_dfs_status_ev_arg *arg)
4511 {
4512 	struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
4513 
4514 	if (skb->len < sizeof(*ev))
4515 		return -EPROTO;
4516 
4517 	arg->status = ev->status;
4518 
4519 	return 0;
4520 }
4521 
4522 static void
ath10k_wmi_event_dfs_status_check(struct ath10k * ar,struct sk_buff * skb)4523 ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
4524 {
4525 	struct wmi_dfs_status_ev_arg status_arg = {};
4526 	int ret;
4527 
4528 	ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
4529 
4530 	if (ret) {
4531 		ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
4532 		return;
4533 	}
4534 
4535 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4536 		   "dfs status event received from fw: %d\n",
4537 		   status_arg.status);
4538 
4539 	/* Even in case of radar detection failure we follow the same
4540 	 * behaviour as if radar is detected i.e to switch to a different
4541 	 * channel.
4542 	 */
4543 	if (status_arg.status == WMI_HW_RADAR_DETECTED ||
4544 	    status_arg.status == WMI_RADAR_DETECTION_FAIL)
4545 		ath10k_radar_detected(ar);
4546 	complete(&ar->wmi.radar_confirm);
4547 }
4548 
ath10k_wmi_event_roam(struct ath10k * ar,struct sk_buff * skb)4549 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
4550 {
4551 	struct wmi_roam_ev_arg arg = {};
4552 	int ret;
4553 	u32 vdev_id;
4554 	u32 reason;
4555 	s32 rssi;
4556 
4557 	ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
4558 	if (ret) {
4559 		ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
4560 		return;
4561 	}
4562 
4563 	vdev_id = __le32_to_cpu(arg.vdev_id);
4564 	reason = __le32_to_cpu(arg.reason);
4565 	rssi = __le32_to_cpu(arg.rssi);
4566 	rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
4567 
4568 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4569 		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
4570 		   vdev_id, reason, rssi);
4571 
4572 	if (reason >= WMI_ROAM_REASON_MAX)
4573 		ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
4574 			    reason, vdev_id);
4575 
4576 	switch (reason) {
4577 	case WMI_ROAM_REASON_BEACON_MISS:
4578 		ath10k_mac_handle_beacon_miss(ar, vdev_id);
4579 		break;
4580 	case WMI_ROAM_REASON_BETTER_AP:
4581 	case WMI_ROAM_REASON_LOW_RSSI:
4582 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
4583 	case WMI_ROAM_REASON_HO_FAILED:
4584 		ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
4585 			    reason, vdev_id);
4586 		break;
4587 	}
4588 }
4589 
ath10k_wmi_event_profile_match(struct ath10k * ar,struct sk_buff * skb)4590 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
4591 {
4592 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
4593 }
4594 
ath10k_wmi_event_debug_print(struct ath10k * ar,struct sk_buff * skb)4595 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
4596 {
4597 	char buf[101], c;
4598 	int i;
4599 
4600 	for (i = 0; i < sizeof(buf) - 1; i++) {
4601 		if (i >= skb->len)
4602 			break;
4603 
4604 		c = skb->data[i];
4605 
4606 		if (c == '\0')
4607 			break;
4608 
4609 		if (isascii(c) && isprint(c))
4610 			buf[i] = c;
4611 		else
4612 			buf[i] = '.';
4613 	}
4614 
4615 	if (i == sizeof(buf) - 1)
4616 		ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
4617 
4618 	/* for some reason the debug prints end with \n, remove that */
4619 	if (skb->data[i - 1] == '\n')
4620 		i--;
4621 
4622 	/* the last byte is always reserved for the null character */
4623 	buf[i] = '\0';
4624 
4625 	ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
4626 }
4627 
ath10k_wmi_event_pdev_qvit(struct ath10k * ar,struct sk_buff * skb)4628 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
4629 {
4630 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
4631 }
4632 
ath10k_wmi_event_wlan_profile_data(struct ath10k * ar,struct sk_buff * skb)4633 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
4634 {
4635 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
4636 }
4637 
ath10k_wmi_event_rtt_measurement_report(struct ath10k * ar,struct sk_buff * skb)4638 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4639 					     struct sk_buff *skb)
4640 {
4641 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
4642 }
4643 
ath10k_wmi_event_tsf_measurement_report(struct ath10k * ar,struct sk_buff * skb)4644 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4645 					     struct sk_buff *skb)
4646 {
4647 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
4648 }
4649 
ath10k_wmi_event_rtt_error_report(struct ath10k * ar,struct sk_buff * skb)4650 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
4651 {
4652 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
4653 }
4654 
ath10k_wmi_event_wow_wakeup_host(struct ath10k * ar,struct sk_buff * skb)4655 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
4656 {
4657 	struct wmi_wow_ev_arg ev = {};
4658 	int ret;
4659 
4660 	complete(&ar->wow.wakeup_completed);
4661 
4662 	ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
4663 	if (ret) {
4664 		ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
4665 		return;
4666 	}
4667 
4668 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
4669 		   wow_reason(ev.wake_reason));
4670 }
4671 
ath10k_wmi_event_dcs_interference(struct ath10k * ar,struct sk_buff * skb)4672 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
4673 {
4674 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
4675 }
4676 
ath10k_tpc_config_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type)4677 static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
4678 				     struct wmi_pdev_tpc_config_event *ev,
4679 				     u32 rate_idx, u32 num_chains,
4680 				     u32 rate_code, u8 type)
4681 {
4682 	u8 tpc, num_streams, preamble, ch, stm_idx;
4683 
4684 	num_streams = ATH10K_HW_NSS(rate_code);
4685 	preamble = ATH10K_HW_PREAMBLE(rate_code);
4686 	ch = num_chains - 1;
4687 
4688 	tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
4689 
4690 	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4691 		goto out;
4692 
4693 	if (preamble == WMI_RATE_PREAMBLE_CCK)
4694 		goto out;
4695 
4696 	stm_idx = num_streams - 1;
4697 	if (num_chains <= num_streams)
4698 		goto out;
4699 
4700 	switch (type) {
4701 	case WMI_TPC_TABLE_TYPE_STBC:
4702 		tpc = min_t(u8, tpc,
4703 			    ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
4704 		break;
4705 	case WMI_TPC_TABLE_TYPE_TXBF:
4706 		tpc = min_t(u8, tpc,
4707 			    ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
4708 		break;
4709 	case WMI_TPC_TABLE_TYPE_CDD:
4710 		tpc = min_t(u8, tpc,
4711 			    ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
4712 		break;
4713 	default:
4714 		ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
4715 		tpc = 0;
4716 		break;
4717 	}
4718 
4719 out:
4720 	return tpc;
4721 }
4722 
ath10k_tpc_config_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,struct ath10k_tpc_stats * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)4723 static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
4724 					  struct wmi_pdev_tpc_config_event *ev,
4725 					  struct ath10k_tpc_stats *tpc_stats,
4726 					  u8 *rate_code, u16 *pream_table, u8 type)
4727 {
4728 	u32 i, j, pream_idx, flags;
4729 	u8 tpc[WMI_TPC_TX_N_CHAIN];
4730 	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
4731 	char buff[WMI_TPC_BUF_SIZE];
4732 
4733 	flags = __le32_to_cpu(ev->flags);
4734 
4735 	switch (type) {
4736 	case WMI_TPC_TABLE_TYPE_CDD:
4737 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
4738 			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
4739 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4740 			return;
4741 		}
4742 		break;
4743 	case WMI_TPC_TABLE_TYPE_STBC:
4744 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
4745 			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
4746 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4747 			return;
4748 		}
4749 		break;
4750 	case WMI_TPC_TABLE_TYPE_TXBF:
4751 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
4752 			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
4753 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4754 			return;
4755 		}
4756 		break;
4757 	default:
4758 		ath10k_dbg(ar, ATH10K_DBG_WMI,
4759 			   "invalid table type in wmi tpc event: %d\n", type);
4760 		return;
4761 	}
4762 
4763 	pream_idx = 0;
4764 	for (i = 0; i < tpc_stats->rate_max; i++) {
4765 		memset(tpc_value, 0, sizeof(tpc_value));
4766 		memset(buff, 0, sizeof(buff));
4767 		if (i == pream_table[pream_idx])
4768 			pream_idx++;
4769 
4770 		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
4771 			tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
4772 							    rate_code[i],
4773 							    type);
4774 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
4775 			strlcat(tpc_value, buff, sizeof(tpc_value));
4776 		}
4777 		tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
4778 		tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
4779 		memcpy(tpc_stats->tpc_table[type].tpc_value[i],
4780 		       tpc_value, sizeof(tpc_value));
4781 	}
4782 }
4783 
ath10k_wmi_tpc_config_get_rate_code(u8 * rate_code,u16 * pream_table,u32 num_tx_chain)4784 void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
4785 					 u32 num_tx_chain)
4786 {
4787 	u32 i, j, pream_idx;
4788 	u8 rate_idx;
4789 
4790 	/* Create the rate code table based on the chains supported */
4791 	rate_idx = 0;
4792 	pream_idx = 0;
4793 
4794 	/* Fill CCK rate code */
4795 	for (i = 0; i < 4; i++) {
4796 		rate_code[rate_idx] =
4797 			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
4798 		rate_idx++;
4799 	}
4800 	pream_table[pream_idx] = rate_idx;
4801 	pream_idx++;
4802 
4803 	/* Fill OFDM rate code */
4804 	for (i = 0; i < 8; i++) {
4805 		rate_code[rate_idx] =
4806 			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
4807 		rate_idx++;
4808 	}
4809 	pream_table[pream_idx] = rate_idx;
4810 	pream_idx++;
4811 
4812 	/* Fill HT20 rate code */
4813 	for (i = 0; i < num_tx_chain; i++) {
4814 		for (j = 0; j < 8; j++) {
4815 			rate_code[rate_idx] =
4816 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4817 			rate_idx++;
4818 		}
4819 	}
4820 	pream_table[pream_idx] = rate_idx;
4821 	pream_idx++;
4822 
4823 	/* Fill HT40 rate code */
4824 	for (i = 0; i < num_tx_chain; i++) {
4825 		for (j = 0; j < 8; j++) {
4826 			rate_code[rate_idx] =
4827 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4828 			rate_idx++;
4829 		}
4830 	}
4831 	pream_table[pream_idx] = rate_idx;
4832 	pream_idx++;
4833 
4834 	/* Fill VHT20 rate code */
4835 	for (i = 0; i < num_tx_chain; i++) {
4836 		for (j = 0; j < 10; j++) {
4837 			rate_code[rate_idx] =
4838 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4839 			rate_idx++;
4840 		}
4841 	}
4842 	pream_table[pream_idx] = rate_idx;
4843 	pream_idx++;
4844 
4845 	/* Fill VHT40 rate code */
4846 	for (i = 0; i < num_tx_chain; i++) {
4847 		for (j = 0; j < 10; j++) {
4848 			rate_code[rate_idx] =
4849 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4850 			rate_idx++;
4851 		}
4852 	}
4853 	pream_table[pream_idx] = rate_idx;
4854 	pream_idx++;
4855 
4856 	/* Fill VHT80 rate code */
4857 	for (i = 0; i < num_tx_chain; i++) {
4858 		for (j = 0; j < 10; j++) {
4859 			rate_code[rate_idx] =
4860 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4861 			rate_idx++;
4862 		}
4863 	}
4864 	pream_table[pream_idx] = rate_idx;
4865 	pream_idx++;
4866 
4867 	rate_code[rate_idx++] =
4868 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4869 	rate_code[rate_idx++] =
4870 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4871 	rate_code[rate_idx++] =
4872 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4873 	rate_code[rate_idx++] =
4874 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4875 	rate_code[rate_idx++] =
4876 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4877 
4878 	pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
4879 }
4880 
ath10k_wmi_event_pdev_tpc_config(struct ath10k * ar,struct sk_buff * skb)4881 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4882 {
4883 	u32 num_tx_chain, rate_max;
4884 	u8 rate_code[WMI_TPC_RATE_MAX];
4885 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4886 	struct wmi_pdev_tpc_config_event *ev;
4887 	struct ath10k_tpc_stats *tpc_stats;
4888 
4889 	ev = (struct wmi_pdev_tpc_config_event *)skb->data;
4890 
4891 	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4892 
4893 	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
4894 		ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
4895 			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
4896 		return;
4897 	}
4898 
4899 	rate_max = __le32_to_cpu(ev->rate_max);
4900 	if (rate_max > WMI_TPC_RATE_MAX) {
4901 		ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
4902 			    rate_max, WMI_TPC_RATE_MAX);
4903 		rate_max = WMI_TPC_RATE_MAX;
4904 	}
4905 
4906 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4907 	if (!tpc_stats)
4908 		return;
4909 
4910 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4911 					    num_tx_chain);
4912 
4913 	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
4914 	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
4915 	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
4916 	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
4917 	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
4918 	tpc_stats->twice_antenna_reduction =
4919 		__le32_to_cpu(ev->twice_antenna_reduction);
4920 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4921 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4922 	tpc_stats->num_tx_chain = num_tx_chain;
4923 	tpc_stats->rate_max = rate_max;
4924 
4925 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4926 				      rate_code, pream_table,
4927 				      WMI_TPC_TABLE_TYPE_CDD);
4928 	ath10k_tpc_config_disp_tables(ar, ev,  tpc_stats,
4929 				      rate_code, pream_table,
4930 				      WMI_TPC_TABLE_TYPE_STBC);
4931 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4932 				      rate_code, pream_table,
4933 				      WMI_TPC_TABLE_TYPE_TXBF);
4934 
4935 	ath10k_debug_tpc_stats_process(ar, tpc_stats);
4936 
4937 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4938 		   "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
4939 		   __le32_to_cpu(ev->chan_freq),
4940 		   __le32_to_cpu(ev->phy_mode),
4941 		   __le32_to_cpu(ev->ctl),
4942 		   __le32_to_cpu(ev->reg_domain),
4943 		   a_sle32_to_cpu(ev->twice_antenna_gain),
4944 		   __le32_to_cpu(ev->twice_antenna_reduction),
4945 		   __le32_to_cpu(ev->power_limit),
4946 		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
4947 		   __le32_to_cpu(ev->num_tx_chain),
4948 		   __le32_to_cpu(ev->rate_max));
4949 }
4950 
4951 static u8
ath10k_wmi_tpc_final_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type,u32 pream_idx)4952 ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
4953 			      struct wmi_pdev_tpc_final_table_event *ev,
4954 			      u32 rate_idx, u32 num_chains,
4955 			      u32 rate_code, u8 type, u32 pream_idx)
4956 {
4957 	u8 tpc, num_streams, preamble, ch, stm_idx;
4958 	s8 pow_agcdd, pow_agstbc, pow_agtxbf;
4959 	int pream;
4960 
4961 	num_streams = ATH10K_HW_NSS(rate_code);
4962 	preamble = ATH10K_HW_PREAMBLE(rate_code);
4963 	ch = num_chains - 1;
4964 	stm_idx = num_streams - 1;
4965 	pream = -1;
4966 
4967 	if (__le32_to_cpu(ev->chan_freq) <= 2483) {
4968 		switch (pream_idx) {
4969 		case WMI_TPC_PREAM_2GHZ_CCK:
4970 			pream = 0;
4971 			break;
4972 		case WMI_TPC_PREAM_2GHZ_OFDM:
4973 			pream = 1;
4974 			break;
4975 		case WMI_TPC_PREAM_2GHZ_HT20:
4976 		case WMI_TPC_PREAM_2GHZ_VHT20:
4977 			pream = 2;
4978 			break;
4979 		case WMI_TPC_PREAM_2GHZ_HT40:
4980 		case WMI_TPC_PREAM_2GHZ_VHT40:
4981 			pream = 3;
4982 			break;
4983 		case WMI_TPC_PREAM_2GHZ_VHT80:
4984 			pream = 4;
4985 			break;
4986 		default:
4987 			pream = -1;
4988 			break;
4989 		}
4990 	}
4991 
4992 	if (__le32_to_cpu(ev->chan_freq) >= 5180) {
4993 		switch (pream_idx) {
4994 		case WMI_TPC_PREAM_5GHZ_OFDM:
4995 			pream = 0;
4996 			break;
4997 		case WMI_TPC_PREAM_5GHZ_HT20:
4998 		case WMI_TPC_PREAM_5GHZ_VHT20:
4999 			pream = 1;
5000 			break;
5001 		case WMI_TPC_PREAM_5GHZ_HT40:
5002 		case WMI_TPC_PREAM_5GHZ_VHT40:
5003 			pream = 2;
5004 			break;
5005 		case WMI_TPC_PREAM_5GHZ_VHT80:
5006 			pream = 3;
5007 			break;
5008 		case WMI_TPC_PREAM_5GHZ_HTCUP:
5009 			pream = 4;
5010 			break;
5011 		default:
5012 			pream = -1;
5013 			break;
5014 		}
5015 	}
5016 
5017 	if (pream == -1) {
5018 		ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
5019 			    pream_idx, __le32_to_cpu(ev->chan_freq));
5020 		tpc = 0;
5021 		goto out;
5022 	}
5023 
5024 	if (pream == 4)
5025 		tpc = min_t(u8, ev->rates_array[rate_idx],
5026 			    ev->max_reg_allow_pow[ch]);
5027 	else
5028 		tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
5029 				      ev->max_reg_allow_pow[ch]),
5030 			    ev->ctl_power_table[0][pream][stm_idx]);
5031 
5032 	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
5033 		goto out;
5034 
5035 	if (preamble == WMI_RATE_PREAMBLE_CCK)
5036 		goto out;
5037 
5038 	if (num_chains <= num_streams)
5039 		goto out;
5040 
5041 	switch (type) {
5042 	case WMI_TPC_TABLE_TYPE_STBC:
5043 		pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
5044 		if (pream == 4)
5045 			tpc = min_t(u8, tpc, pow_agstbc);
5046 		else
5047 			tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
5048 				    ev->ctl_power_table[0][pream][stm_idx]);
5049 		break;
5050 	case WMI_TPC_TABLE_TYPE_TXBF:
5051 		pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
5052 		if (pream == 4)
5053 			tpc = min_t(u8, tpc, pow_agtxbf);
5054 		else
5055 			tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
5056 				    ev->ctl_power_table[1][pream][stm_idx]);
5057 		break;
5058 	case WMI_TPC_TABLE_TYPE_CDD:
5059 		pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
5060 		if (pream == 4)
5061 			tpc = min_t(u8, tpc, pow_agcdd);
5062 		else
5063 			tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
5064 				    ev->ctl_power_table[0][pream][stm_idx]);
5065 		break;
5066 	default:
5067 		ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
5068 		tpc = 0;
5069 		break;
5070 	}
5071 
5072 out:
5073 	return tpc;
5074 }
5075 
5076 static void
ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,struct ath10k_tpc_stats_final * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)5077 ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
5078 				       struct wmi_pdev_tpc_final_table_event *ev,
5079 				       struct ath10k_tpc_stats_final *tpc_stats,
5080 				       u8 *rate_code, u16 *pream_table, u8 type)
5081 {
5082 	u32 i, j, pream_idx, flags;
5083 	u8 tpc[WMI_TPC_TX_N_CHAIN];
5084 	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
5085 	char buff[WMI_TPC_BUF_SIZE];
5086 
5087 	flags = __le32_to_cpu(ev->flags);
5088 
5089 	switch (type) {
5090 	case WMI_TPC_TABLE_TYPE_CDD:
5091 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
5092 			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
5093 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5094 			return;
5095 		}
5096 		break;
5097 	case WMI_TPC_TABLE_TYPE_STBC:
5098 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
5099 			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
5100 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5101 			return;
5102 		}
5103 		break;
5104 	case WMI_TPC_TABLE_TYPE_TXBF:
5105 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
5106 			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
5107 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5108 			return;
5109 		}
5110 		break;
5111 	default:
5112 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5113 			   "invalid table type in wmi tpc event: %d\n", type);
5114 		return;
5115 	}
5116 
5117 	pream_idx = 0;
5118 	for (i = 0; i < tpc_stats->rate_max; i++) {
5119 		memset(tpc_value, 0, sizeof(tpc_value));
5120 		memset(buff, 0, sizeof(buff));
5121 		if (i == pream_table[pream_idx])
5122 			pream_idx++;
5123 
5124 		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
5125 			tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
5126 							       rate_code[i],
5127 							       type, pream_idx);
5128 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
5129 			strlcat(tpc_value, buff, sizeof(tpc_value));
5130 		}
5131 		tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
5132 		tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
5133 		memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
5134 		       tpc_value, sizeof(tpc_value));
5135 	}
5136 }
5137 
ath10k_wmi_event_tpc_final_table(struct ath10k * ar,struct sk_buff * skb)5138 void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
5139 {
5140 	u32 num_tx_chain, rate_max;
5141 	u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
5142 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
5143 	struct wmi_pdev_tpc_final_table_event *ev;
5144 	struct ath10k_tpc_stats_final *tpc_stats;
5145 
5146 	ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
5147 
5148 	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
5149 	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
5150 		ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
5151 			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
5152 		return;
5153 	}
5154 
5155 	rate_max = __le32_to_cpu(ev->rate_max);
5156 	if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
5157 		ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
5158 			    rate_max, WMI_TPC_FINAL_RATE_MAX);
5159 		rate_max = WMI_TPC_FINAL_RATE_MAX;
5160 	}
5161 
5162 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
5163 	if (!tpc_stats)
5164 		return;
5165 
5166 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
5167 					    num_tx_chain);
5168 
5169 	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
5170 	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
5171 	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
5172 	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
5173 	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
5174 	tpc_stats->twice_antenna_reduction =
5175 		__le32_to_cpu(ev->twice_antenna_reduction);
5176 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
5177 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
5178 	tpc_stats->num_tx_chain = num_tx_chain;
5179 	tpc_stats->rate_max = rate_max;
5180 
5181 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5182 					       rate_code, pream_table,
5183 					       WMI_TPC_TABLE_TYPE_CDD);
5184 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev,  tpc_stats,
5185 					       rate_code, pream_table,
5186 					       WMI_TPC_TABLE_TYPE_STBC);
5187 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5188 					       rate_code, pream_table,
5189 					       WMI_TPC_TABLE_TYPE_TXBF);
5190 
5191 	ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
5192 
5193 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5194 		   "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
5195 		   __le32_to_cpu(ev->chan_freq),
5196 		   __le32_to_cpu(ev->phy_mode),
5197 		   __le32_to_cpu(ev->ctl),
5198 		   __le32_to_cpu(ev->reg_domain),
5199 		   a_sle32_to_cpu(ev->twice_antenna_gain),
5200 		   __le32_to_cpu(ev->twice_antenna_reduction),
5201 		   __le32_to_cpu(ev->power_limit),
5202 		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
5203 		   __le32_to_cpu(ev->num_tx_chain),
5204 		   __le32_to_cpu(ev->rate_max));
5205 }
5206 
5207 static void
ath10k_wmi_handle_tdls_peer_event(struct ath10k * ar,struct sk_buff * skb)5208 ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
5209 {
5210 	struct wmi_tdls_peer_event *ev;
5211 	struct ath10k_peer *peer;
5212 	struct ath10k_vif *arvif;
5213 	int vdev_id;
5214 	int peer_status;
5215 	int peer_reason;
5216 	u8 reason;
5217 
5218 	if (skb->len < sizeof(*ev)) {
5219 		ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
5220 			   skb->len);
5221 		return;
5222 	}
5223 
5224 	ev = (struct wmi_tdls_peer_event *)skb->data;
5225 	vdev_id = __le32_to_cpu(ev->vdev_id);
5226 	peer_status = __le32_to_cpu(ev->peer_status);
5227 	peer_reason = __le32_to_cpu(ev->peer_reason);
5228 
5229 	spin_lock_bh(&ar->data_lock);
5230 	peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
5231 	spin_unlock_bh(&ar->data_lock);
5232 
5233 	if (!peer) {
5234 		ath10k_warn(ar, "failed to find peer entry for %pM\n",
5235 			    ev->peer_macaddr.addr);
5236 		return;
5237 	}
5238 
5239 	switch (peer_status) {
5240 	case WMI_TDLS_SHOULD_TEARDOWN:
5241 		switch (peer_reason) {
5242 		case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
5243 		case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
5244 		case WMI_TDLS_TEARDOWN_REASON_RSSI:
5245 			reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
5246 			break;
5247 		default:
5248 			reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
5249 			break;
5250 		}
5251 
5252 		arvif = ath10k_get_arvif(ar, vdev_id);
5253 		if (!arvif) {
5254 			ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
5255 				    vdev_id);
5256 			return;
5257 		}
5258 
5259 		ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
5260 					    NL80211_TDLS_TEARDOWN, reason,
5261 					    GFP_ATOMIC);
5262 
5263 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5264 			   "received tdls teardown event for peer %pM reason %u\n",
5265 			   ev->peer_macaddr.addr, peer_reason);
5266 		break;
5267 	default:
5268 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5269 			   "received unknown tdls peer event %u\n",
5270 			   peer_status);
5271 		break;
5272 	}
5273 }
5274 
5275 static void
ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k * ar,struct sk_buff * skb)5276 ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
5277 {
5278 	struct wmi_peer_sta_ps_state_chg_event *ev;
5279 	struct ieee80211_sta *sta;
5280 	struct ath10k_sta *arsta;
5281 	u8 peer_addr[ETH_ALEN];
5282 
5283 	lockdep_assert_held(&ar->data_lock);
5284 
5285 	ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
5286 	ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
5287 
5288 	rcu_read_lock();
5289 
5290 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
5291 
5292 	if (!sta) {
5293 		ath10k_warn(ar, "failed to find station entry %pM\n",
5294 			    peer_addr);
5295 		goto exit;
5296 	}
5297 
5298 	arsta = (struct ath10k_sta *)sta->drv_priv;
5299 	arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
5300 
5301 exit:
5302 	rcu_read_unlock();
5303 }
5304 
ath10k_wmi_event_pdev_ftm_intg(struct ath10k * ar,struct sk_buff * skb)5305 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
5306 {
5307 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
5308 }
5309 
ath10k_wmi_event_gtk_offload_status(struct ath10k * ar,struct sk_buff * skb)5310 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
5311 {
5312 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
5313 }
5314 
ath10k_wmi_event_gtk_rekey_fail(struct ath10k * ar,struct sk_buff * skb)5315 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
5316 {
5317 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
5318 }
5319 
ath10k_wmi_event_delba_complete(struct ath10k * ar,struct sk_buff * skb)5320 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
5321 {
5322 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
5323 }
5324 
ath10k_wmi_event_addba_complete(struct ath10k * ar,struct sk_buff * skb)5325 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
5326 {
5327 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
5328 }
5329 
ath10k_wmi_event_vdev_install_key_complete(struct ath10k * ar,struct sk_buff * skb)5330 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
5331 						struct sk_buff *skb)
5332 {
5333 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
5334 }
5335 
ath10k_wmi_event_inst_rssi_stats(struct ath10k * ar,struct sk_buff * skb)5336 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
5337 {
5338 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
5339 }
5340 
ath10k_wmi_event_vdev_standby_req(struct ath10k * ar,struct sk_buff * skb)5341 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
5342 {
5343 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
5344 }
5345 
ath10k_wmi_event_vdev_resume_req(struct ath10k * ar,struct sk_buff * skb)5346 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
5347 {
5348 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
5349 }
5350 
ath10k_wmi_alloc_chunk(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5351 static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5352 				  u32 num_units, u32 unit_len)
5353 {
5354 	dma_addr_t paddr;
5355 	u32 pool_size;
5356 	int idx = ar->wmi.num_mem_chunks;
5357 	void *vaddr;
5358 
5359 	pool_size = num_units * round_up(unit_len, 4);
5360 	vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5361 
5362 	if (!vaddr)
5363 		return -ENOMEM;
5364 
5365 	ar->wmi.mem_chunks[idx].vaddr = vaddr;
5366 	ar->wmi.mem_chunks[idx].paddr = paddr;
5367 	ar->wmi.mem_chunks[idx].len = pool_size;
5368 	ar->wmi.mem_chunks[idx].req_id = req_id;
5369 	ar->wmi.num_mem_chunks++;
5370 
5371 	return num_units;
5372 }
5373 
ath10k_wmi_alloc_host_mem(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5374 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
5375 				     u32 num_units, u32 unit_len)
5376 {
5377 	int ret;
5378 
5379 	while (num_units) {
5380 		ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
5381 		if (ret < 0)
5382 			return ret;
5383 
5384 		num_units -= ret;
5385 	}
5386 
5387 	return 0;
5388 }
5389 
5390 static bool
ath10k_wmi_is_host_mem_allocated(struct ath10k * ar,const struct wlan_host_mem_req ** mem_reqs,u32 num_mem_reqs)5391 ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
5392 				 const struct wlan_host_mem_req **mem_reqs,
5393 				 u32 num_mem_reqs)
5394 {
5395 	u32 req_id, num_units, unit_size, num_unit_info;
5396 	u32 pool_size;
5397 	int i, j;
5398 	bool found;
5399 
5400 	if (ar->wmi.num_mem_chunks != num_mem_reqs)
5401 		return false;
5402 
5403 	for (i = 0; i < num_mem_reqs; ++i) {
5404 		req_id = __le32_to_cpu(mem_reqs[i]->req_id);
5405 		num_units = __le32_to_cpu(mem_reqs[i]->num_units);
5406 		unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
5407 		num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
5408 
5409 		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5410 			if (ar->num_active_peers)
5411 				num_units = ar->num_active_peers + 1;
5412 			else
5413 				num_units = ar->max_num_peers + 1;
5414 		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5415 			num_units = ar->max_num_peers + 1;
5416 		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5417 			num_units = ar->max_num_vdevs + 1;
5418 		}
5419 
5420 		found = false;
5421 		for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
5422 			if (ar->wmi.mem_chunks[j].req_id == req_id) {
5423 				pool_size = num_units * round_up(unit_size, 4);
5424 				if (ar->wmi.mem_chunks[j].len == pool_size) {
5425 					found = true;
5426 					break;
5427 				}
5428 			}
5429 		}
5430 		if (!found)
5431 			return false;
5432 	}
5433 
5434 	return true;
5435 }
5436 
5437 static int
ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5438 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5439 				   struct wmi_svc_rdy_ev_arg *arg)
5440 {
5441 	struct wmi_service_ready_event *ev;
5442 	size_t i, n;
5443 
5444 	if (skb->len < sizeof(*ev))
5445 		return -EPROTO;
5446 
5447 	ev = (void *)skb->data;
5448 	skb_pull(skb, sizeof(*ev));
5449 	arg->min_tx_power = ev->hw_min_tx_power;
5450 	arg->max_tx_power = ev->hw_max_tx_power;
5451 	arg->ht_cap = ev->ht_cap_info;
5452 	arg->vht_cap = ev->vht_cap_info;
5453 	arg->vht_supp_mcs = ev->vht_supp_mcs;
5454 	arg->sw_ver0 = ev->sw_version;
5455 	arg->sw_ver1 = ev->sw_version_1;
5456 	arg->phy_capab = ev->phy_capability;
5457 	arg->num_rf_chains = ev->num_rf_chains;
5458 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5459 	arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5460 	arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5461 	arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5462 	arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5463 	arg->num_mem_reqs = ev->num_mem_reqs;
5464 	arg->service_map = ev->wmi_service_bitmap;
5465 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5466 
5467 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5468 		  ARRAY_SIZE(arg->mem_reqs));
5469 	for (i = 0; i < n; i++)
5470 		arg->mem_reqs[i] = &ev->mem_reqs[i];
5471 
5472 	if (skb->len <
5473 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5474 		return -EPROTO;
5475 
5476 	return 0;
5477 }
5478 
5479 static int
ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5480 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5481 				  struct wmi_svc_rdy_ev_arg *arg)
5482 {
5483 	struct wmi_10x_service_ready_event *ev;
5484 	int i, n;
5485 
5486 	if (skb->len < sizeof(*ev))
5487 		return -EPROTO;
5488 
5489 	ev = (void *)skb->data;
5490 	skb_pull(skb, sizeof(*ev));
5491 	arg->min_tx_power = ev->hw_min_tx_power;
5492 	arg->max_tx_power = ev->hw_max_tx_power;
5493 	arg->ht_cap = ev->ht_cap_info;
5494 	arg->vht_cap = ev->vht_cap_info;
5495 	arg->vht_supp_mcs = ev->vht_supp_mcs;
5496 	arg->sw_ver0 = ev->sw_version;
5497 	arg->phy_capab = ev->phy_capability;
5498 	arg->num_rf_chains = ev->num_rf_chains;
5499 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5500 	arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5501 	arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5502 	arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5503 	arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5504 	arg->num_mem_reqs = ev->num_mem_reqs;
5505 	arg->service_map = ev->wmi_service_bitmap;
5506 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5507 
5508 	/* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
5509 	 * different values. We would need a translation to handle that,
5510 	 * but as we don't currently need anything from sys_cap_info from
5511 	 * WMI interface (only from WMI-TLV) safest it to skip it.
5512 	 */
5513 
5514 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5515 		  ARRAY_SIZE(arg->mem_reqs));
5516 	for (i = 0; i < n; i++)
5517 		arg->mem_reqs[i] = &ev->mem_reqs[i];
5518 
5519 	if (skb->len <
5520 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5521 		return -EPROTO;
5522 
5523 	return 0;
5524 }
5525 
ath10k_wmi_event_service_ready_work(struct work_struct * work)5526 static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
5527 {
5528 	struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
5529 	struct sk_buff *skb = ar->svc_rdy_skb;
5530 	struct wmi_svc_rdy_ev_arg arg = {};
5531 	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
5532 	int ret;
5533 	bool allocated;
5534 
5535 	if (!skb) {
5536 		ath10k_warn(ar, "invalid service ready event skb\n");
5537 		return;
5538 	}
5539 
5540 	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
5541 	if (ret) {
5542 		ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
5543 		return;
5544 	}
5545 
5546 	ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
5547 			   arg.service_map_len);
5548 
5549 	ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
5550 	ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
5551 	ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
5552 	ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
5553 	ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
5554 	ar->fw_version_major =
5555 		(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
5556 	ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
5557 	ar->fw_version_release =
5558 		(__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
5559 	ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
5560 	ar->phy_capability = __le32_to_cpu(arg.phy_capab);
5561 	ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
5562 	ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
5563 	ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
5564 	ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
5565 	ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
5566 	ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
5567 	ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
5568 
5569 	ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
5570 			arg.service_map, arg.service_map_len);
5571 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
5572 		   ar->sys_cap_info);
5573 
5574 	if (ar->num_rf_chains > ar->max_spatial_stream) {
5575 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
5576 			    ar->num_rf_chains, ar->max_spatial_stream);
5577 		ar->num_rf_chains = ar->max_spatial_stream;
5578 	}
5579 
5580 	if (!ar->cfg_tx_chainmask) {
5581 		ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
5582 		ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
5583 	}
5584 
5585 	if (strlen(ar->hw->wiphy->fw_version) == 0) {
5586 		snprintf(ar->hw->wiphy->fw_version,
5587 			 sizeof(ar->hw->wiphy->fw_version),
5588 			 "%u.%u.%u.%u",
5589 			 ar->fw_version_major,
5590 			 ar->fw_version_minor,
5591 			 ar->fw_version_release,
5592 			 ar->fw_version_build);
5593 	}
5594 
5595 	num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
5596 	if (num_mem_reqs > WMI_MAX_MEM_REQS) {
5597 		ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
5598 			    num_mem_reqs);
5599 		return;
5600 	}
5601 
5602 	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
5603 		if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
5604 			     ar->running_fw->fw_file.fw_features))
5605 			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
5606 					       ar->max_num_vdevs;
5607 		else
5608 			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
5609 					       ar->max_num_vdevs;
5610 
5611 		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
5612 				    ar->max_num_vdevs;
5613 		ar->num_tids = ar->num_active_peers * 2;
5614 		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
5615 	}
5616 
5617 	/* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
5618 	 * and WMI_SERVICE_IRAM_TIDS, etc.
5619 	 */
5620 
5621 	allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
5622 						     num_mem_reqs);
5623 	if (allocated)
5624 		goto skip_mem_alloc;
5625 
5626 	/* Either this event is received during boot time or there is a change
5627 	 * in memory requirement from firmware when compared to last request.
5628 	 * Free any old memory and do a fresh allocation based on the current
5629 	 * memory requirement.
5630 	 */
5631 	ath10k_wmi_free_host_mem(ar);
5632 
5633 	for (i = 0; i < num_mem_reqs; ++i) {
5634 		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
5635 		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
5636 		unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
5637 		num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
5638 
5639 		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5640 			if (ar->num_active_peers)
5641 				num_units = ar->num_active_peers + 1;
5642 			else
5643 				num_units = ar->max_num_peers + 1;
5644 		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5645 			/* number of units to allocate is number of
5646 			 * peers, 1 extra for self peer on target
5647 			 * this needs to be tied, host and target
5648 			 * can get out of sync
5649 			 */
5650 			num_units = ar->max_num_peers + 1;
5651 		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5652 			num_units = ar->max_num_vdevs + 1;
5653 		}
5654 
5655 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5656 			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
5657 			   req_id,
5658 			   __le32_to_cpu(arg.mem_reqs[i]->num_units),
5659 			   num_unit_info,
5660 			   unit_size,
5661 			   num_units);
5662 
5663 		ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
5664 						unit_size);
5665 		if (ret)
5666 			return;
5667 	}
5668 
5669 skip_mem_alloc:
5670 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5671 		   "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
5672 		   __le32_to_cpu(arg.min_tx_power),
5673 		   __le32_to_cpu(arg.max_tx_power),
5674 		   __le32_to_cpu(arg.ht_cap),
5675 		   __le32_to_cpu(arg.vht_cap),
5676 		   __le32_to_cpu(arg.vht_supp_mcs),
5677 		   __le32_to_cpu(arg.sw_ver0),
5678 		   __le32_to_cpu(arg.sw_ver1),
5679 		   __le32_to_cpu(arg.fw_build),
5680 		   __le32_to_cpu(arg.phy_capab),
5681 		   __le32_to_cpu(arg.num_rf_chains),
5682 		   __le32_to_cpu(arg.eeprom_rd),
5683 		   __le32_to_cpu(arg.low_2ghz_chan),
5684 		   __le32_to_cpu(arg.high_2ghz_chan),
5685 		   __le32_to_cpu(arg.low_5ghz_chan),
5686 		   __le32_to_cpu(arg.high_5ghz_chan),
5687 		   __le32_to_cpu(arg.num_mem_reqs));
5688 
5689 	dev_kfree_skb(skb);
5690 	ar->svc_rdy_skb = NULL;
5691 	complete(&ar->wmi.service_ready);
5692 }
5693 
ath10k_wmi_event_service_ready(struct ath10k * ar,struct sk_buff * skb)5694 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
5695 {
5696 	ar->svc_rdy_skb = skb;
5697 	queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
5698 }
5699 
ath10k_wmi_op_pull_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_rdy_ev_arg * arg)5700 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5701 				     struct wmi_rdy_ev_arg *arg)
5702 {
5703 	struct wmi_ready_event *ev = (void *)skb->data;
5704 
5705 	if (skb->len < sizeof(*ev))
5706 		return -EPROTO;
5707 
5708 	skb_pull(skb, sizeof(*ev));
5709 	arg->sw_version = ev->sw_version;
5710 	arg->abi_version = ev->abi_version;
5711 	arg->status = ev->status;
5712 	arg->mac_addr = ev->mac_addr.addr;
5713 
5714 	return 0;
5715 }
5716 
ath10k_wmi_op_pull_roam_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_roam_ev_arg * arg)5717 static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
5718 				      struct wmi_roam_ev_arg *arg)
5719 {
5720 	struct wmi_roam_ev *ev = (void *)skb->data;
5721 
5722 	if (skb->len < sizeof(*ev))
5723 		return -EPROTO;
5724 
5725 	skb_pull(skb, sizeof(*ev));
5726 	arg->vdev_id = ev->vdev_id;
5727 	arg->reason = ev->reason;
5728 
5729 	return 0;
5730 }
5731 
ath10k_wmi_op_pull_echo_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_echo_ev_arg * arg)5732 static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
5733 				      struct sk_buff *skb,
5734 				      struct wmi_echo_ev_arg *arg)
5735 {
5736 	struct wmi_echo_event *ev = (void *)skb->data;
5737 
5738 	arg->value = ev->value;
5739 
5740 	return 0;
5741 }
5742 
ath10k_wmi_event_ready(struct ath10k * ar,struct sk_buff * skb)5743 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
5744 {
5745 	struct wmi_rdy_ev_arg arg = {};
5746 	int ret;
5747 
5748 	ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
5749 	if (ret) {
5750 		ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
5751 		return ret;
5752 	}
5753 
5754 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5755 		   "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
5756 		   __le32_to_cpu(arg.sw_version),
5757 		   __le32_to_cpu(arg.abi_version),
5758 		   arg.mac_addr,
5759 		   __le32_to_cpu(arg.status));
5760 
5761 	if (is_zero_ether_addr(ar->mac_addr))
5762 		ether_addr_copy(ar->mac_addr, arg.mac_addr);
5763 	complete(&ar->wmi.unified_ready);
5764 	return 0;
5765 }
5766 
ath10k_wmi_event_service_available(struct ath10k * ar,struct sk_buff * skb)5767 void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
5768 {
5769 	int ret;
5770 	struct wmi_svc_avail_ev_arg arg = {};
5771 
5772 	ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
5773 	if (ret) {
5774 		ath10k_warn(ar, "failed to parse service available event: %d\n",
5775 			    ret);
5776 	}
5777 
5778 	/*
5779 	 * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
5780 	 * for the below logic to work.
5781 	 */
5782 	if (arg.service_map_ext_valid)
5783 		ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
5784 				       __le32_to_cpu(arg.service_map_ext_len));
5785 }
5786 
ath10k_wmi_event_temperature(struct ath10k * ar,struct sk_buff * skb)5787 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
5788 {
5789 	const struct wmi_pdev_temperature_event *ev;
5790 
5791 	ev = (struct wmi_pdev_temperature_event *)skb->data;
5792 	if (WARN_ON(skb->len < sizeof(*ev)))
5793 		return -EPROTO;
5794 
5795 	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
5796 	return 0;
5797 }
5798 
ath10k_wmi_event_pdev_bss_chan_info(struct ath10k * ar,struct sk_buff * skb)5799 static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
5800 					       struct sk_buff *skb)
5801 {
5802 	struct wmi_pdev_bss_chan_info_event *ev;
5803 	struct survey_info *survey;
5804 	u64 busy, total, tx, rx, rx_bss;
5805 	u32 freq, noise_floor;
5806 	u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
5807 	int idx;
5808 
5809 	ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
5810 	if (WARN_ON(skb->len < sizeof(*ev)))
5811 		return -EPROTO;
5812 
5813 	freq        = __le32_to_cpu(ev->freq);
5814 	noise_floor = __le32_to_cpu(ev->noise_floor);
5815 	busy        = __le64_to_cpu(ev->cycle_busy);
5816 	total       = __le64_to_cpu(ev->cycle_total);
5817 	tx          = __le64_to_cpu(ev->cycle_tx);
5818 	rx          = __le64_to_cpu(ev->cycle_rx);
5819 	rx_bss      = __le64_to_cpu(ev->cycle_rx_bss);
5820 
5821 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5822 		   "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5823 		   freq, noise_floor, busy, total, tx, rx, rx_bss);
5824 
5825 	spin_lock_bh(&ar->data_lock);
5826 	idx = freq_to_idx(ar, freq);
5827 	if (idx >= ARRAY_SIZE(ar->survey)) {
5828 		ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5829 			    freq, idx);
5830 		goto exit;
5831 	}
5832 
5833 	survey = &ar->survey[idx];
5834 
5835 	survey->noise     = noise_floor;
5836 	survey->time      = div_u64(total, cc_freq_hz);
5837 	survey->time_busy = div_u64(busy, cc_freq_hz);
5838 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
5839 	survey->time_tx   = div_u64(tx, cc_freq_hz);
5840 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
5841 			     SURVEY_INFO_TIME |
5842 			     SURVEY_INFO_TIME_BUSY |
5843 			     SURVEY_INFO_TIME_RX |
5844 			     SURVEY_INFO_TIME_TX);
5845 exit:
5846 	spin_unlock_bh(&ar->data_lock);
5847 	complete(&ar->bss_survey_done);
5848 	return 0;
5849 }
5850 
ath10k_wmi_queue_set_coverage_class_work(struct ath10k * ar)5851 static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
5852 {
5853 	if (ar->hw_params.hw_ops->set_coverage_class) {
5854 		spin_lock_bh(&ar->data_lock);
5855 
5856 		/* This call only ensures that the modified coverage class
5857 		 * persists in case the firmware sets the registers back to
5858 		 * their default value. So calling it is only necessary if the
5859 		 * coverage class has a non-zero value.
5860 		 */
5861 		if (ar->fw_coverage.coverage_class)
5862 			queue_work(ar->workqueue, &ar->set_coverage_class_work);
5863 
5864 		spin_unlock_bh(&ar->data_lock);
5865 	}
5866 }
5867 
ath10k_wmi_op_rx(struct ath10k * ar,struct sk_buff * skb)5868 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
5869 {
5870 	struct wmi_cmd_hdr *cmd_hdr;
5871 	enum wmi_event_id id;
5872 
5873 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5874 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5875 
5876 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5877 		goto out;
5878 
5879 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5880 
5881 	switch (id) {
5882 	case WMI_MGMT_RX_EVENTID:
5883 		ath10k_wmi_event_mgmt_rx(ar, skb);
5884 		/* mgmt_rx() owns the skb now! */
5885 		return;
5886 	case WMI_SCAN_EVENTID:
5887 		ath10k_wmi_event_scan(ar, skb);
5888 		ath10k_wmi_queue_set_coverage_class_work(ar);
5889 		break;
5890 	case WMI_CHAN_INFO_EVENTID:
5891 		ath10k_wmi_event_chan_info(ar, skb);
5892 		break;
5893 	case WMI_ECHO_EVENTID:
5894 		ath10k_wmi_event_echo(ar, skb);
5895 		break;
5896 	case WMI_DEBUG_MESG_EVENTID:
5897 		ath10k_wmi_event_debug_mesg(ar, skb);
5898 		ath10k_wmi_queue_set_coverage_class_work(ar);
5899 		break;
5900 	case WMI_UPDATE_STATS_EVENTID:
5901 		ath10k_wmi_event_update_stats(ar, skb);
5902 		break;
5903 	case WMI_VDEV_START_RESP_EVENTID:
5904 		ath10k_wmi_event_vdev_start_resp(ar, skb);
5905 		ath10k_wmi_queue_set_coverage_class_work(ar);
5906 		break;
5907 	case WMI_VDEV_STOPPED_EVENTID:
5908 		ath10k_wmi_event_vdev_stopped(ar, skb);
5909 		ath10k_wmi_queue_set_coverage_class_work(ar);
5910 		break;
5911 	case WMI_PEER_STA_KICKOUT_EVENTID:
5912 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
5913 		break;
5914 	case WMI_HOST_SWBA_EVENTID:
5915 		ath10k_wmi_event_host_swba(ar, skb);
5916 		break;
5917 	case WMI_TBTTOFFSET_UPDATE_EVENTID:
5918 		ath10k_wmi_event_tbttoffset_update(ar, skb);
5919 		break;
5920 	case WMI_PHYERR_EVENTID:
5921 		ath10k_wmi_event_phyerr(ar, skb);
5922 		break;
5923 	case WMI_ROAM_EVENTID:
5924 		ath10k_wmi_event_roam(ar, skb);
5925 		ath10k_wmi_queue_set_coverage_class_work(ar);
5926 		break;
5927 	case WMI_PROFILE_MATCH:
5928 		ath10k_wmi_event_profile_match(ar, skb);
5929 		break;
5930 	case WMI_DEBUG_PRINT_EVENTID:
5931 		ath10k_wmi_event_debug_print(ar, skb);
5932 		ath10k_wmi_queue_set_coverage_class_work(ar);
5933 		break;
5934 	case WMI_PDEV_QVIT_EVENTID:
5935 		ath10k_wmi_event_pdev_qvit(ar, skb);
5936 		break;
5937 	case WMI_WLAN_PROFILE_DATA_EVENTID:
5938 		ath10k_wmi_event_wlan_profile_data(ar, skb);
5939 		break;
5940 	case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
5941 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
5942 		break;
5943 	case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
5944 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
5945 		break;
5946 	case WMI_RTT_ERROR_REPORT_EVENTID:
5947 		ath10k_wmi_event_rtt_error_report(ar, skb);
5948 		break;
5949 	case WMI_WOW_WAKEUP_HOST_EVENTID:
5950 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
5951 		break;
5952 	case WMI_DCS_INTERFERENCE_EVENTID:
5953 		ath10k_wmi_event_dcs_interference(ar, skb);
5954 		break;
5955 	case WMI_PDEV_TPC_CONFIG_EVENTID:
5956 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
5957 		break;
5958 	case WMI_PDEV_FTM_INTG_EVENTID:
5959 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
5960 		break;
5961 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
5962 		ath10k_wmi_event_gtk_offload_status(ar, skb);
5963 		break;
5964 	case WMI_GTK_REKEY_FAIL_EVENTID:
5965 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
5966 		break;
5967 	case WMI_TX_DELBA_COMPLETE_EVENTID:
5968 		ath10k_wmi_event_delba_complete(ar, skb);
5969 		break;
5970 	case WMI_TX_ADDBA_COMPLETE_EVENTID:
5971 		ath10k_wmi_event_addba_complete(ar, skb);
5972 		break;
5973 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
5974 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
5975 		break;
5976 	case WMI_SERVICE_READY_EVENTID:
5977 		ath10k_wmi_event_service_ready(ar, skb);
5978 		return;
5979 	case WMI_READY_EVENTID:
5980 		ath10k_wmi_event_ready(ar, skb);
5981 		ath10k_wmi_queue_set_coverage_class_work(ar);
5982 		break;
5983 	case WMI_SERVICE_AVAILABLE_EVENTID:
5984 		ath10k_wmi_event_service_available(ar, skb);
5985 		break;
5986 	default:
5987 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
5988 		break;
5989 	}
5990 
5991 out:
5992 	dev_kfree_skb(skb);
5993 }
5994 
ath10k_wmi_10_1_op_rx(struct ath10k * ar,struct sk_buff * skb)5995 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
5996 {
5997 	struct wmi_cmd_hdr *cmd_hdr;
5998 	enum wmi_10x_event_id id;
5999 	bool consumed;
6000 
6001 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6002 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6003 
6004 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6005 		goto out;
6006 
6007 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6008 
6009 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6010 
6011 	/* Ready event must be handled normally also in UTF mode so that we
6012 	 * know the UTF firmware has booted, others we are just bypass WMI
6013 	 * events to testmode.
6014 	 */
6015 	if (consumed && id != WMI_10X_READY_EVENTID) {
6016 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6017 			   "wmi testmode consumed 0x%x\n", id);
6018 		goto out;
6019 	}
6020 
6021 	switch (id) {
6022 	case WMI_10X_MGMT_RX_EVENTID:
6023 		ath10k_wmi_event_mgmt_rx(ar, skb);
6024 		/* mgmt_rx() owns the skb now! */
6025 		return;
6026 	case WMI_10X_SCAN_EVENTID:
6027 		ath10k_wmi_event_scan(ar, skb);
6028 		ath10k_wmi_queue_set_coverage_class_work(ar);
6029 		break;
6030 	case WMI_10X_CHAN_INFO_EVENTID:
6031 		ath10k_wmi_event_chan_info(ar, skb);
6032 		break;
6033 	case WMI_10X_ECHO_EVENTID:
6034 		ath10k_wmi_event_echo(ar, skb);
6035 		break;
6036 	case WMI_10X_DEBUG_MESG_EVENTID:
6037 		ath10k_wmi_event_debug_mesg(ar, skb);
6038 		ath10k_wmi_queue_set_coverage_class_work(ar);
6039 		break;
6040 	case WMI_10X_UPDATE_STATS_EVENTID:
6041 		ath10k_wmi_event_update_stats(ar, skb);
6042 		break;
6043 	case WMI_10X_VDEV_START_RESP_EVENTID:
6044 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6045 		ath10k_wmi_queue_set_coverage_class_work(ar);
6046 		break;
6047 	case WMI_10X_VDEV_STOPPED_EVENTID:
6048 		ath10k_wmi_event_vdev_stopped(ar, skb);
6049 		ath10k_wmi_queue_set_coverage_class_work(ar);
6050 		break;
6051 	case WMI_10X_PEER_STA_KICKOUT_EVENTID:
6052 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6053 		break;
6054 	case WMI_10X_HOST_SWBA_EVENTID:
6055 		ath10k_wmi_event_host_swba(ar, skb);
6056 		break;
6057 	case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
6058 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6059 		break;
6060 	case WMI_10X_PHYERR_EVENTID:
6061 		ath10k_wmi_event_phyerr(ar, skb);
6062 		break;
6063 	case WMI_10X_ROAM_EVENTID:
6064 		ath10k_wmi_event_roam(ar, skb);
6065 		ath10k_wmi_queue_set_coverage_class_work(ar);
6066 		break;
6067 	case WMI_10X_PROFILE_MATCH:
6068 		ath10k_wmi_event_profile_match(ar, skb);
6069 		break;
6070 	case WMI_10X_DEBUG_PRINT_EVENTID:
6071 		ath10k_wmi_event_debug_print(ar, skb);
6072 		ath10k_wmi_queue_set_coverage_class_work(ar);
6073 		break;
6074 	case WMI_10X_PDEV_QVIT_EVENTID:
6075 		ath10k_wmi_event_pdev_qvit(ar, skb);
6076 		break;
6077 	case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
6078 		ath10k_wmi_event_wlan_profile_data(ar, skb);
6079 		break;
6080 	case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
6081 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
6082 		break;
6083 	case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
6084 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
6085 		break;
6086 	case WMI_10X_RTT_ERROR_REPORT_EVENTID:
6087 		ath10k_wmi_event_rtt_error_report(ar, skb);
6088 		break;
6089 	case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
6090 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
6091 		break;
6092 	case WMI_10X_DCS_INTERFERENCE_EVENTID:
6093 		ath10k_wmi_event_dcs_interference(ar, skb);
6094 		break;
6095 	case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
6096 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6097 		break;
6098 	case WMI_10X_INST_RSSI_STATS_EVENTID:
6099 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
6100 		break;
6101 	case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
6102 		ath10k_wmi_event_vdev_standby_req(ar, skb);
6103 		break;
6104 	case WMI_10X_VDEV_RESUME_REQ_EVENTID:
6105 		ath10k_wmi_event_vdev_resume_req(ar, skb);
6106 		break;
6107 	case WMI_10X_SERVICE_READY_EVENTID:
6108 		ath10k_wmi_event_service_ready(ar, skb);
6109 		return;
6110 	case WMI_10X_READY_EVENTID:
6111 		ath10k_wmi_event_ready(ar, skb);
6112 		ath10k_wmi_queue_set_coverage_class_work(ar);
6113 		break;
6114 	case WMI_10X_PDEV_UTF_EVENTID:
6115 		/* ignore utf events */
6116 		break;
6117 	default:
6118 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6119 		break;
6120 	}
6121 
6122 out:
6123 	dev_kfree_skb(skb);
6124 }
6125 
ath10k_wmi_10_2_op_rx(struct ath10k * ar,struct sk_buff * skb)6126 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
6127 {
6128 	struct wmi_cmd_hdr *cmd_hdr;
6129 	enum wmi_10_2_event_id id;
6130 	bool consumed;
6131 
6132 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6133 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6134 
6135 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6136 		goto out;
6137 
6138 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6139 
6140 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6141 
6142 	/* Ready event must be handled normally also in UTF mode so that we
6143 	 * know the UTF firmware has booted, others we are just bypass WMI
6144 	 * events to testmode.
6145 	 */
6146 	if (consumed && id != WMI_10_2_READY_EVENTID) {
6147 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6148 			   "wmi testmode consumed 0x%x\n", id);
6149 		goto out;
6150 	}
6151 
6152 	switch (id) {
6153 	case WMI_10_2_MGMT_RX_EVENTID:
6154 		ath10k_wmi_event_mgmt_rx(ar, skb);
6155 		/* mgmt_rx() owns the skb now! */
6156 		return;
6157 	case WMI_10_2_SCAN_EVENTID:
6158 		ath10k_wmi_event_scan(ar, skb);
6159 		ath10k_wmi_queue_set_coverage_class_work(ar);
6160 		break;
6161 	case WMI_10_2_CHAN_INFO_EVENTID:
6162 		ath10k_wmi_event_chan_info(ar, skb);
6163 		break;
6164 	case WMI_10_2_ECHO_EVENTID:
6165 		ath10k_wmi_event_echo(ar, skb);
6166 		break;
6167 	case WMI_10_2_DEBUG_MESG_EVENTID:
6168 		ath10k_wmi_event_debug_mesg(ar, skb);
6169 		ath10k_wmi_queue_set_coverage_class_work(ar);
6170 		break;
6171 	case WMI_10_2_UPDATE_STATS_EVENTID:
6172 		ath10k_wmi_event_update_stats(ar, skb);
6173 		break;
6174 	case WMI_10_2_VDEV_START_RESP_EVENTID:
6175 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6176 		ath10k_wmi_queue_set_coverage_class_work(ar);
6177 		break;
6178 	case WMI_10_2_VDEV_STOPPED_EVENTID:
6179 		ath10k_wmi_event_vdev_stopped(ar, skb);
6180 		ath10k_wmi_queue_set_coverage_class_work(ar);
6181 		break;
6182 	case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
6183 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6184 		break;
6185 	case WMI_10_2_HOST_SWBA_EVENTID:
6186 		ath10k_wmi_event_host_swba(ar, skb);
6187 		break;
6188 	case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
6189 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6190 		break;
6191 	case WMI_10_2_PHYERR_EVENTID:
6192 		ath10k_wmi_event_phyerr(ar, skb);
6193 		break;
6194 	case WMI_10_2_ROAM_EVENTID:
6195 		ath10k_wmi_event_roam(ar, skb);
6196 		ath10k_wmi_queue_set_coverage_class_work(ar);
6197 		break;
6198 	case WMI_10_2_PROFILE_MATCH:
6199 		ath10k_wmi_event_profile_match(ar, skb);
6200 		break;
6201 	case WMI_10_2_DEBUG_PRINT_EVENTID:
6202 		ath10k_wmi_event_debug_print(ar, skb);
6203 		ath10k_wmi_queue_set_coverage_class_work(ar);
6204 		break;
6205 	case WMI_10_2_PDEV_QVIT_EVENTID:
6206 		ath10k_wmi_event_pdev_qvit(ar, skb);
6207 		break;
6208 	case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
6209 		ath10k_wmi_event_wlan_profile_data(ar, skb);
6210 		break;
6211 	case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
6212 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
6213 		break;
6214 	case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
6215 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
6216 		break;
6217 	case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
6218 		ath10k_wmi_event_rtt_error_report(ar, skb);
6219 		break;
6220 	case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
6221 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
6222 		break;
6223 	case WMI_10_2_DCS_INTERFERENCE_EVENTID:
6224 		ath10k_wmi_event_dcs_interference(ar, skb);
6225 		break;
6226 	case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
6227 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6228 		break;
6229 	case WMI_10_2_INST_RSSI_STATS_EVENTID:
6230 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
6231 		break;
6232 	case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
6233 		ath10k_wmi_event_vdev_standby_req(ar, skb);
6234 		ath10k_wmi_queue_set_coverage_class_work(ar);
6235 		break;
6236 	case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
6237 		ath10k_wmi_event_vdev_resume_req(ar, skb);
6238 		ath10k_wmi_queue_set_coverage_class_work(ar);
6239 		break;
6240 	case WMI_10_2_SERVICE_READY_EVENTID:
6241 		ath10k_wmi_event_service_ready(ar, skb);
6242 		return;
6243 	case WMI_10_2_READY_EVENTID:
6244 		ath10k_wmi_event_ready(ar, skb);
6245 		ath10k_wmi_queue_set_coverage_class_work(ar);
6246 		break;
6247 	case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
6248 		ath10k_wmi_event_temperature(ar, skb);
6249 		break;
6250 	case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
6251 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6252 		break;
6253 	case WMI_10_2_RTT_KEEPALIVE_EVENTID:
6254 	case WMI_10_2_GPIO_INPUT_EVENTID:
6255 	case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
6256 	case WMI_10_2_GENERIC_BUFFER_EVENTID:
6257 	case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
6258 	case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
6259 	case WMI_10_2_WDS_PEER_EVENTID:
6260 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6261 			   "received event id %d not implemented\n", id);
6262 		break;
6263 	case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
6264 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6265 		break;
6266 	default:
6267 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6268 		break;
6269 	}
6270 
6271 out:
6272 	dev_kfree_skb(skb);
6273 }
6274 
ath10k_wmi_10_4_op_rx(struct ath10k * ar,struct sk_buff * skb)6275 static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
6276 {
6277 	struct wmi_cmd_hdr *cmd_hdr;
6278 	enum wmi_10_4_event_id id;
6279 	bool consumed;
6280 
6281 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6282 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6283 
6284 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6285 		goto out;
6286 
6287 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6288 
6289 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6290 
6291 	/* Ready event must be handled normally also in UTF mode so that we
6292 	 * know the UTF firmware has booted, others we are just bypass WMI
6293 	 * events to testmode.
6294 	 */
6295 	if (consumed && id != WMI_10_4_READY_EVENTID) {
6296 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6297 			   "wmi testmode consumed 0x%x\n", id);
6298 		goto out;
6299 	}
6300 
6301 	switch (id) {
6302 	case WMI_10_4_MGMT_RX_EVENTID:
6303 		ath10k_wmi_event_mgmt_rx(ar, skb);
6304 		/* mgmt_rx() owns the skb now! */
6305 		return;
6306 	case WMI_10_4_ECHO_EVENTID:
6307 		ath10k_wmi_event_echo(ar, skb);
6308 		break;
6309 	case WMI_10_4_DEBUG_MESG_EVENTID:
6310 		ath10k_wmi_event_debug_mesg(ar, skb);
6311 		ath10k_wmi_queue_set_coverage_class_work(ar);
6312 		break;
6313 	case WMI_10_4_SERVICE_READY_EVENTID:
6314 		ath10k_wmi_event_service_ready(ar, skb);
6315 		return;
6316 	case WMI_10_4_SCAN_EVENTID:
6317 		ath10k_wmi_event_scan(ar, skb);
6318 		ath10k_wmi_queue_set_coverage_class_work(ar);
6319 		break;
6320 	case WMI_10_4_CHAN_INFO_EVENTID:
6321 		ath10k_wmi_event_chan_info(ar, skb);
6322 		break;
6323 	case WMI_10_4_PHYERR_EVENTID:
6324 		ath10k_wmi_event_phyerr(ar, skb);
6325 		break;
6326 	case WMI_10_4_READY_EVENTID:
6327 		ath10k_wmi_event_ready(ar, skb);
6328 		ath10k_wmi_queue_set_coverage_class_work(ar);
6329 		break;
6330 	case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
6331 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6332 		break;
6333 	case WMI_10_4_ROAM_EVENTID:
6334 		ath10k_wmi_event_roam(ar, skb);
6335 		ath10k_wmi_queue_set_coverage_class_work(ar);
6336 		break;
6337 	case WMI_10_4_HOST_SWBA_EVENTID:
6338 		ath10k_wmi_event_host_swba(ar, skb);
6339 		break;
6340 	case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
6341 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6342 		break;
6343 	case WMI_10_4_DEBUG_PRINT_EVENTID:
6344 		ath10k_wmi_event_debug_print(ar, skb);
6345 		ath10k_wmi_queue_set_coverage_class_work(ar);
6346 		break;
6347 	case WMI_10_4_VDEV_START_RESP_EVENTID:
6348 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6349 		ath10k_wmi_queue_set_coverage_class_work(ar);
6350 		break;
6351 	case WMI_10_4_VDEV_STOPPED_EVENTID:
6352 		ath10k_wmi_event_vdev_stopped(ar, skb);
6353 		ath10k_wmi_queue_set_coverage_class_work(ar);
6354 		break;
6355 	case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
6356 	case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
6357 	case WMI_10_4_WDS_PEER_EVENTID:
6358 	case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
6359 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6360 			   "received event id %d not implemented\n", id);
6361 		break;
6362 	case WMI_10_4_UPDATE_STATS_EVENTID:
6363 		ath10k_wmi_event_update_stats(ar, skb);
6364 		break;
6365 	case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
6366 		ath10k_wmi_event_temperature(ar, skb);
6367 		break;
6368 	case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
6369 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6370 		break;
6371 	case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
6372 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6373 		break;
6374 	case WMI_10_4_TDLS_PEER_EVENTID:
6375 		ath10k_wmi_handle_tdls_peer_event(ar, skb);
6376 		break;
6377 	case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
6378 		ath10k_wmi_event_tpc_final_table(ar, skb);
6379 		break;
6380 	case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
6381 		ath10k_wmi_event_dfs_status_check(ar, skb);
6382 		break;
6383 	case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
6384 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6385 		break;
6386 	default:
6387 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6388 		break;
6389 	}
6390 
6391 out:
6392 	dev_kfree_skb(skb);
6393 }
6394 
ath10k_wmi_process_rx(struct ath10k * ar,struct sk_buff * skb)6395 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
6396 {
6397 	int ret;
6398 
6399 	ret = ath10k_wmi_rx(ar, skb);
6400 	if (ret)
6401 		ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
6402 }
6403 
ath10k_wmi_connect(struct ath10k * ar)6404 int ath10k_wmi_connect(struct ath10k *ar)
6405 {
6406 	int status;
6407 	struct ath10k_htc_svc_conn_req conn_req;
6408 	struct ath10k_htc_svc_conn_resp conn_resp;
6409 
6410 	memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
6411 
6412 	memset(&conn_req, 0, sizeof(conn_req));
6413 	memset(&conn_resp, 0, sizeof(conn_resp));
6414 
6415 	/* these fields are the same for all service endpoints */
6416 	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
6417 	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
6418 	conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
6419 
6420 	/* connect to control service */
6421 	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
6422 
6423 	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
6424 	if (status) {
6425 		ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
6426 			    status);
6427 		return status;
6428 	}
6429 
6430 	ar->wmi.eid = conn_resp.eid;
6431 	return 0;
6432 }
6433 
6434 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k * ar,const u8 macaddr[ETH_ALEN])6435 ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar,
6436 					const u8 macaddr[ETH_ALEN])
6437 {
6438 	struct wmi_pdev_set_base_macaddr_cmd *cmd;
6439 	struct sk_buff *skb;
6440 
6441 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6442 	if (!skb)
6443 		return ERR_PTR(-ENOMEM);
6444 
6445 	cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
6446 	ether_addr_copy(cmd->mac_addr.addr, macaddr);
6447 
6448 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6449 		   "wmi pdev basemac %pM\n", macaddr);
6450 	return skb;
6451 }
6452 
6453 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6454 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
6455 			      u16 ctl2g, u16 ctl5g,
6456 			      enum wmi_dfs_region dfs_reg)
6457 {
6458 	struct wmi_pdev_set_regdomain_cmd *cmd;
6459 	struct sk_buff *skb;
6460 
6461 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6462 	if (!skb)
6463 		return ERR_PTR(-ENOMEM);
6464 
6465 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
6466 	cmd->reg_domain = __cpu_to_le32(rd);
6467 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6468 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6469 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6470 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6471 
6472 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6473 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
6474 		   rd, rd2g, rd5g, ctl2g, ctl5g);
6475 	return skb;
6476 }
6477 
6478 static struct sk_buff *
ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6479 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
6480 				  rd5g, u16 ctl2g, u16 ctl5g,
6481 				  enum wmi_dfs_region dfs_reg)
6482 {
6483 	struct wmi_pdev_set_regdomain_cmd_10x *cmd;
6484 	struct sk_buff *skb;
6485 
6486 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6487 	if (!skb)
6488 		return ERR_PTR(-ENOMEM);
6489 
6490 	cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
6491 	cmd->reg_domain = __cpu_to_le32(rd);
6492 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6493 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6494 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6495 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6496 	cmd->dfs_domain = __cpu_to_le32(dfs_reg);
6497 
6498 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6499 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
6500 		   rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
6501 	return skb;
6502 }
6503 
6504 static struct sk_buff *
ath10k_wmi_op_gen_pdev_suspend(struct ath10k * ar,u32 suspend_opt)6505 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
6506 {
6507 	struct wmi_pdev_suspend_cmd *cmd;
6508 	struct sk_buff *skb;
6509 
6510 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6511 	if (!skb)
6512 		return ERR_PTR(-ENOMEM);
6513 
6514 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
6515 	cmd->suspend_opt = __cpu_to_le32(suspend_opt);
6516 
6517 	return skb;
6518 }
6519 
6520 static struct sk_buff *
ath10k_wmi_op_gen_pdev_resume(struct ath10k * ar)6521 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
6522 {
6523 	struct sk_buff *skb;
6524 
6525 	skb = ath10k_wmi_alloc_skb(ar, 0);
6526 	if (!skb)
6527 		return ERR_PTR(-ENOMEM);
6528 
6529 	return skb;
6530 }
6531 
6532 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_param(struct ath10k * ar,u32 id,u32 value)6533 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
6534 {
6535 	struct wmi_pdev_set_param_cmd *cmd;
6536 	struct sk_buff *skb;
6537 
6538 	if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
6539 		ath10k_warn(ar, "pdev param %d not supported by firmware\n",
6540 			    id);
6541 		return ERR_PTR(-EOPNOTSUPP);
6542 	}
6543 
6544 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6545 	if (!skb)
6546 		return ERR_PTR(-ENOMEM);
6547 
6548 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
6549 	cmd->param_id    = __cpu_to_le32(id);
6550 	cmd->param_value = __cpu_to_le32(value);
6551 
6552 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
6553 		   id, value);
6554 	return skb;
6555 }
6556 
ath10k_wmi_put_host_mem_chunks(struct ath10k * ar,struct wmi_host_mem_chunks * chunks)6557 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
6558 				    struct wmi_host_mem_chunks *chunks)
6559 {
6560 	struct host_memory_chunk *chunk;
6561 	int i;
6562 
6563 	chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
6564 
6565 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
6566 		chunk = &chunks->items[i];
6567 		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
6568 		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
6569 		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
6570 
6571 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6572 			   "wmi chunk %d len %d requested, addr 0x%llx\n",
6573 			   i,
6574 			   ar->wmi.mem_chunks[i].len,
6575 			   (unsigned long long)ar->wmi.mem_chunks[i].paddr);
6576 	}
6577 }
6578 
ath10k_wmi_op_gen_init(struct ath10k * ar)6579 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
6580 {
6581 	struct wmi_init_cmd *cmd;
6582 	struct sk_buff *buf;
6583 	struct wmi_resource_config config = {};
6584 	u32 val;
6585 
6586 	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
6587 	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
6588 	config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
6589 
6590 	config.num_offload_reorder_bufs =
6591 		__cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
6592 
6593 	config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
6594 	config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
6595 	config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
6596 	config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
6597 	config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
6598 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6599 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6600 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6601 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
6602 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6603 	config.scan_max_pending_reqs =
6604 		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
6605 
6606 	config.bmiss_offload_max_vdev =
6607 		__cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
6608 
6609 	config.roam_offload_max_vdev =
6610 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
6611 
6612 	config.roam_offload_max_ap_profiles =
6613 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
6614 
6615 	config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
6616 	config.num_mcast_table_elems =
6617 		__cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
6618 
6619 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
6620 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
6621 	config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
6622 	config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
6623 	config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
6624 
6625 	val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6626 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6627 
6628 	config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
6629 
6630 	config.gtk_offload_max_vdev =
6631 		__cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
6632 
6633 	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
6634 	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
6635 
6636 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6637 						   ar->wmi.num_mem_chunks));
6638 	if (!buf)
6639 		return ERR_PTR(-ENOMEM);
6640 
6641 	cmd = (struct wmi_init_cmd *)buf->data;
6642 
6643 	memcpy(&cmd->resource_config, &config, sizeof(config));
6644 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6645 
6646 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
6647 	return buf;
6648 }
6649 
ath10k_wmi_10_1_op_gen_init(struct ath10k * ar)6650 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
6651 {
6652 	struct wmi_init_cmd_10x *cmd;
6653 	struct sk_buff *buf;
6654 	struct wmi_resource_config_10x config = {};
6655 	u32 val;
6656 
6657 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6658 	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6659 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6660 	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6661 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6662 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6663 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6664 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6665 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6666 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6667 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6668 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6669 	config.scan_max_pending_reqs =
6670 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6671 
6672 	config.bmiss_offload_max_vdev =
6673 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6674 
6675 	config.roam_offload_max_vdev =
6676 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6677 
6678 	config.roam_offload_max_ap_profiles =
6679 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6680 
6681 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6682 	config.num_mcast_table_elems =
6683 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6684 
6685 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6686 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6687 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6688 	config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
6689 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6690 
6691 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6692 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6693 
6694 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6695 
6696 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6697 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6698 
6699 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6700 						   ar->wmi.num_mem_chunks));
6701 	if (!buf)
6702 		return ERR_PTR(-ENOMEM);
6703 
6704 	cmd = (struct wmi_init_cmd_10x *)buf->data;
6705 
6706 	memcpy(&cmd->resource_config, &config, sizeof(config));
6707 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6708 
6709 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
6710 	return buf;
6711 }
6712 
ath10k_wmi_10_2_op_gen_init(struct ath10k * ar)6713 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
6714 {
6715 	struct wmi_init_cmd_10_2 *cmd;
6716 	struct sk_buff *buf;
6717 	struct wmi_resource_config_10x config = {};
6718 	u32 val, features;
6719 
6720 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6721 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6722 
6723 	if (ath10k_peer_stats_enabled(ar)) {
6724 		config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
6725 		config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
6726 	} else {
6727 		config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6728 		config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6729 	}
6730 
6731 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6732 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6733 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6734 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6735 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6736 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6737 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6738 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6739 
6740 	config.scan_max_pending_reqs =
6741 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6742 
6743 	config.bmiss_offload_max_vdev =
6744 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6745 
6746 	config.roam_offload_max_vdev =
6747 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6748 
6749 	config.roam_offload_max_ap_profiles =
6750 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6751 
6752 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6753 	config.num_mcast_table_elems =
6754 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6755 
6756 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6757 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6758 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6759 	config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
6760 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6761 
6762 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6763 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6764 
6765 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6766 
6767 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6768 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6769 
6770 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6771 						   ar->wmi.num_mem_chunks));
6772 	if (!buf)
6773 		return ERR_PTR(-ENOMEM);
6774 
6775 	cmd = (struct wmi_init_cmd_10_2 *)buf->data;
6776 
6777 	features = WMI_10_2_RX_BATCH_MODE;
6778 
6779 	if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
6780 	    test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
6781 		features |= WMI_10_2_COEX_GPIO;
6782 
6783 	if (ath10k_peer_stats_enabled(ar))
6784 		features |= WMI_10_2_PEER_STATS;
6785 
6786 	if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
6787 		features |= WMI_10_2_BSS_CHAN_INFO;
6788 
6789 	cmd->resource_config.feature_mask = __cpu_to_le32(features);
6790 
6791 	memcpy(&cmd->resource_config.common, &config, sizeof(config));
6792 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6793 
6794 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
6795 	return buf;
6796 }
6797 
ath10k_wmi_10_4_op_gen_init(struct ath10k * ar)6798 static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
6799 {
6800 	struct wmi_init_cmd_10_4 *cmd;
6801 	struct sk_buff *buf;
6802 	struct wmi_resource_config_10_4 config = {};
6803 
6804 	config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
6805 	config.num_peers = __cpu_to_le32(ar->max_num_peers);
6806 	config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
6807 	config.num_tids = __cpu_to_le32(ar->num_tids);
6808 
6809 	config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
6810 	config.num_offload_reorder_buffs =
6811 			__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
6812 	config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
6813 	config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
6814 	config.tx_chain_mask  = __cpu_to_le32(ar->hw_params.tx_chain_mask);
6815 	config.rx_chain_mask  = __cpu_to_le32(ar->hw_params.rx_chain_mask);
6816 
6817 	config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6818 	config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6819 	config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6820 	config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
6821 
6822 	config.rx_decap_mode	    = __cpu_to_le32(ar->wmi.rx_decap_mode);
6823 	config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
6824 	config.bmiss_offload_max_vdev =
6825 			__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
6826 	config.roam_offload_max_vdev  =
6827 			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
6828 	config.roam_offload_max_ap_profiles =
6829 			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
6830 	config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
6831 	config.num_mcast_table_elems =
6832 			__cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
6833 
6834 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
6835 	config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
6836 	config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
6837 	config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
6838 	config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
6839 
6840 	config.rx_skip_defrag_timeout_dup_detection_check =
6841 	  __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
6842 
6843 	config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
6844 	config.gtk_offload_max_vdev =
6845 			__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
6846 	config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
6847 	config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
6848 	config.max_peer_ext_stats =
6849 			__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
6850 	config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
6851 
6852 	config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
6853 	config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
6854 	config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
6855 	config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
6856 
6857 	config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
6858 	config.tt_support =
6859 			__cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
6860 	config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
6861 	config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
6862 	config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
6863 
6864 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6865 						   ar->wmi.num_mem_chunks));
6866 	if (!buf)
6867 		return ERR_PTR(-ENOMEM);
6868 
6869 	cmd = (struct wmi_init_cmd_10_4 *)buf->data;
6870 	memcpy(&cmd->resource_config, &config, sizeof(config));
6871 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6872 
6873 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
6874 	return buf;
6875 }
6876 
ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg * arg)6877 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
6878 {
6879 	if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
6880 		return -EINVAL;
6881 	if (arg->n_channels > ARRAY_SIZE(arg->channels))
6882 		return -EINVAL;
6883 	if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
6884 		return -EINVAL;
6885 	if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
6886 		return -EINVAL;
6887 
6888 	return 0;
6889 }
6890 
6891 static size_t
ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg * arg)6892 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
6893 {
6894 	int len = 0;
6895 
6896 	if (arg->ie_len) {
6897 		len += sizeof(struct wmi_ie_data);
6898 		len += roundup(arg->ie_len, 4);
6899 	}
6900 
6901 	if (arg->n_channels) {
6902 		len += sizeof(struct wmi_chan_list);
6903 		len += sizeof(__le32) * arg->n_channels;
6904 	}
6905 
6906 	if (arg->n_ssids) {
6907 		len += sizeof(struct wmi_ssid_list);
6908 		len += sizeof(struct wmi_ssid) * arg->n_ssids;
6909 	}
6910 
6911 	if (arg->n_bssids) {
6912 		len += sizeof(struct wmi_bssid_list);
6913 		len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6914 	}
6915 
6916 	return len;
6917 }
6918 
ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common * cmn,const struct wmi_start_scan_arg * arg)6919 void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
6920 				      const struct wmi_start_scan_arg *arg)
6921 {
6922 	u32 scan_id;
6923 	u32 scan_req_id;
6924 
6925 	scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
6926 	scan_id |= arg->scan_id;
6927 
6928 	scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
6929 	scan_req_id |= arg->scan_req_id;
6930 
6931 	cmn->scan_id            = __cpu_to_le32(scan_id);
6932 	cmn->scan_req_id        = __cpu_to_le32(scan_req_id);
6933 	cmn->vdev_id            = __cpu_to_le32(arg->vdev_id);
6934 	cmn->scan_priority      = __cpu_to_le32(arg->scan_priority);
6935 	cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
6936 	cmn->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
6937 	cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
6938 	cmn->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
6939 	cmn->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
6940 	cmn->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
6941 	cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
6942 	cmn->idle_time          = __cpu_to_le32(arg->idle_time);
6943 	cmn->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
6944 	cmn->probe_delay        = __cpu_to_le32(arg->probe_delay);
6945 	cmn->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
6946 }
6947 
6948 static void
ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs * tlvs,const struct wmi_start_scan_arg * arg)6949 ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
6950 			       const struct wmi_start_scan_arg *arg)
6951 {
6952 	struct wmi_ie_data *ie;
6953 	struct wmi_chan_list *channels;
6954 	struct wmi_ssid_list *ssids;
6955 	struct wmi_bssid_list *bssids;
6956 	void *ptr = tlvs->tlvs;
6957 	int i;
6958 
6959 	if (arg->n_channels) {
6960 		channels = ptr;
6961 		channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
6962 		channels->num_chan = __cpu_to_le32(arg->n_channels);
6963 
6964 		for (i = 0; i < arg->n_channels; i++)
6965 			channels->channel_list[i].freq =
6966 				__cpu_to_le16(arg->channels[i]);
6967 
6968 		ptr += sizeof(*channels);
6969 		ptr += sizeof(__le32) * arg->n_channels;
6970 	}
6971 
6972 	if (arg->n_ssids) {
6973 		ssids = ptr;
6974 		ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
6975 		ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
6976 
6977 		for (i = 0; i < arg->n_ssids; i++) {
6978 			ssids->ssids[i].ssid_len =
6979 				__cpu_to_le32(arg->ssids[i].len);
6980 			memcpy(&ssids->ssids[i].ssid,
6981 			       arg->ssids[i].ssid,
6982 			       arg->ssids[i].len);
6983 		}
6984 
6985 		ptr += sizeof(*ssids);
6986 		ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
6987 	}
6988 
6989 	if (arg->n_bssids) {
6990 		bssids = ptr;
6991 		bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
6992 		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
6993 
6994 		for (i = 0; i < arg->n_bssids; i++)
6995 			ether_addr_copy(bssids->bssid_list[i].addr,
6996 					arg->bssids[i].bssid);
6997 
6998 		ptr += sizeof(*bssids);
6999 		ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
7000 	}
7001 
7002 	if (arg->ie_len) {
7003 		ie = ptr;
7004 		ie->tag = __cpu_to_le32(WMI_IE_TAG);
7005 		ie->ie_len = __cpu_to_le32(arg->ie_len);
7006 		memcpy(ie->ie_data, arg->ie, arg->ie_len);
7007 
7008 		ptr += sizeof(*ie);
7009 		ptr += roundup(arg->ie_len, 4);
7010 	}
7011 }
7012 
7013 static struct sk_buff *
ath10k_wmi_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7014 ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
7015 			     const struct wmi_start_scan_arg *arg)
7016 {
7017 	struct wmi_start_scan_cmd *cmd;
7018 	struct sk_buff *skb;
7019 	size_t len;
7020 	int ret;
7021 
7022 	ret = ath10k_wmi_start_scan_verify(arg);
7023 	if (ret)
7024 		return ERR_PTR(ret);
7025 
7026 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7027 	skb = ath10k_wmi_alloc_skb(ar, len);
7028 	if (!skb)
7029 		return ERR_PTR(-ENOMEM);
7030 
7031 	cmd = (struct wmi_start_scan_cmd *)skb->data;
7032 
7033 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7034 	ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
7035 
7036 	cmd->burst_duration_ms = __cpu_to_le32(0);
7037 
7038 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
7039 	return skb;
7040 }
7041 
7042 static struct sk_buff *
ath10k_wmi_10x_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7043 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
7044 				 const struct wmi_start_scan_arg *arg)
7045 {
7046 	struct wmi_10x_start_scan_cmd *cmd;
7047 	struct sk_buff *skb;
7048 	size_t len;
7049 	int ret;
7050 
7051 	ret = ath10k_wmi_start_scan_verify(arg);
7052 	if (ret)
7053 		return ERR_PTR(ret);
7054 
7055 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7056 	skb = ath10k_wmi_alloc_skb(ar, len);
7057 	if (!skb)
7058 		return ERR_PTR(-ENOMEM);
7059 
7060 	cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
7061 
7062 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7063 	ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
7064 
7065 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
7066 	return skb;
7067 }
7068 
ath10k_wmi_start_scan_init(struct ath10k * ar,struct wmi_start_scan_arg * arg)7069 void ath10k_wmi_start_scan_init(struct ath10k *ar,
7070 				struct wmi_start_scan_arg *arg)
7071 {
7072 	/* setup commonly used values */
7073 	arg->scan_req_id = 1;
7074 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
7075 	arg->dwell_time_active = 50;
7076 	arg->dwell_time_passive = 150;
7077 	arg->min_rest_time = 50;
7078 	arg->max_rest_time = 500;
7079 	arg->repeat_probe_time = 0;
7080 	arg->probe_spacing_time = 0;
7081 	arg->idle_time = 0;
7082 	arg->max_scan_time = 20000;
7083 	arg->probe_delay = 5;
7084 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
7085 		| WMI_SCAN_EVENT_COMPLETED
7086 		| WMI_SCAN_EVENT_BSS_CHANNEL
7087 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
7088 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
7089 		| WMI_SCAN_EVENT_DEQUEUED;
7090 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
7091 	arg->n_bssids = 1;
7092 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
7093 }
7094 
7095 static struct sk_buff *
ath10k_wmi_op_gen_stop_scan(struct ath10k * ar,const struct wmi_stop_scan_arg * arg)7096 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
7097 			    const struct wmi_stop_scan_arg *arg)
7098 {
7099 	struct wmi_stop_scan_cmd *cmd;
7100 	struct sk_buff *skb;
7101 	u32 scan_id;
7102 	u32 req_id;
7103 
7104 	if (arg->req_id > 0xFFF)
7105 		return ERR_PTR(-EINVAL);
7106 	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
7107 		return ERR_PTR(-EINVAL);
7108 
7109 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7110 	if (!skb)
7111 		return ERR_PTR(-ENOMEM);
7112 
7113 	scan_id = arg->u.scan_id;
7114 	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
7115 
7116 	req_id = arg->req_id;
7117 	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
7118 
7119 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
7120 	cmd->req_type    = __cpu_to_le32(arg->req_type);
7121 	cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
7122 	cmd->scan_id     = __cpu_to_le32(scan_id);
7123 	cmd->scan_req_id = __cpu_to_le32(req_id);
7124 
7125 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7126 		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
7127 		   arg->req_id, arg->req_type, arg->u.scan_id);
7128 	return skb;
7129 }
7130 
7131 static struct sk_buff *
ath10k_wmi_op_gen_vdev_create(struct ath10k * ar,u32 vdev_id,enum wmi_vdev_type type,enum wmi_vdev_subtype subtype,const u8 macaddr[ETH_ALEN])7132 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
7133 			      enum wmi_vdev_type type,
7134 			      enum wmi_vdev_subtype subtype,
7135 			      const u8 macaddr[ETH_ALEN])
7136 {
7137 	struct wmi_vdev_create_cmd *cmd;
7138 	struct sk_buff *skb;
7139 
7140 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7141 	if (!skb)
7142 		return ERR_PTR(-ENOMEM);
7143 
7144 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
7145 	cmd->vdev_id      = __cpu_to_le32(vdev_id);
7146 	cmd->vdev_type    = __cpu_to_le32(type);
7147 	cmd->vdev_subtype = __cpu_to_le32(subtype);
7148 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
7149 
7150 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7151 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
7152 		   vdev_id, type, subtype, macaddr);
7153 	return skb;
7154 }
7155 
7156 static struct sk_buff *
ath10k_wmi_op_gen_vdev_delete(struct ath10k * ar,u32 vdev_id)7157 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
7158 {
7159 	struct wmi_vdev_delete_cmd *cmd;
7160 	struct sk_buff *skb;
7161 
7162 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7163 	if (!skb)
7164 		return ERR_PTR(-ENOMEM);
7165 
7166 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
7167 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7168 
7169 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7170 		   "WMI vdev delete id %d\n", vdev_id);
7171 	return skb;
7172 }
7173 
7174 static struct sk_buff *
ath10k_wmi_op_gen_vdev_start(struct ath10k * ar,const struct wmi_vdev_start_request_arg * arg,bool restart)7175 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
7176 			     const struct wmi_vdev_start_request_arg *arg,
7177 			     bool restart)
7178 {
7179 	struct wmi_vdev_start_request_cmd *cmd;
7180 	struct sk_buff *skb;
7181 	const char *cmdname;
7182 	u32 flags = 0;
7183 
7184 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
7185 		return ERR_PTR(-EINVAL);
7186 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
7187 		return ERR_PTR(-EINVAL);
7188 
7189 	if (restart)
7190 		cmdname = "restart";
7191 	else
7192 		cmdname = "start";
7193 
7194 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7195 	if (!skb)
7196 		return ERR_PTR(-ENOMEM);
7197 
7198 	if (arg->hidden_ssid)
7199 		flags |= WMI_VDEV_START_HIDDEN_SSID;
7200 	if (arg->pmf_enabled)
7201 		flags |= WMI_VDEV_START_PMF_ENABLED;
7202 
7203 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
7204 	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
7205 	cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
7206 	cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
7207 	cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
7208 	cmd->flags           = __cpu_to_le32(flags);
7209 	cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
7210 	cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
7211 
7212 	if (arg->ssid) {
7213 		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
7214 		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
7215 	}
7216 
7217 	ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
7218 
7219 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7220 		   "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
7221 		   cmdname, arg->vdev_id,
7222 		   flags, arg->channel.freq, arg->channel.mode,
7223 		   cmd->chan.flags, arg->channel.max_power);
7224 
7225 	return skb;
7226 }
7227 
7228 static struct sk_buff *
ath10k_wmi_op_gen_vdev_stop(struct ath10k * ar,u32 vdev_id)7229 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
7230 {
7231 	struct wmi_vdev_stop_cmd *cmd;
7232 	struct sk_buff *skb;
7233 
7234 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7235 	if (!skb)
7236 		return ERR_PTR(-ENOMEM);
7237 
7238 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
7239 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7240 
7241 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
7242 	return skb;
7243 }
7244 
7245 static struct sk_buff *
ath10k_wmi_op_gen_vdev_up(struct ath10k * ar,u32 vdev_id,u32 aid,const u8 * bssid)7246 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
7247 			  const u8 *bssid)
7248 {
7249 	struct wmi_vdev_up_cmd *cmd;
7250 	struct sk_buff *skb;
7251 
7252 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7253 	if (!skb)
7254 		return ERR_PTR(-ENOMEM);
7255 
7256 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
7257 	cmd->vdev_id       = __cpu_to_le32(vdev_id);
7258 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
7259 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
7260 
7261 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7262 		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
7263 		   vdev_id, aid, bssid);
7264 	return skb;
7265 }
7266 
7267 static struct sk_buff *
ath10k_wmi_op_gen_vdev_down(struct ath10k * ar,u32 vdev_id)7268 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
7269 {
7270 	struct wmi_vdev_down_cmd *cmd;
7271 	struct sk_buff *skb;
7272 
7273 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7274 	if (!skb)
7275 		return ERR_PTR(-ENOMEM);
7276 
7277 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
7278 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7279 
7280 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7281 		   "wmi mgmt vdev down id 0x%x\n", vdev_id);
7282 	return skb;
7283 }
7284 
7285 static struct sk_buff *
ath10k_wmi_op_gen_vdev_set_param(struct ath10k * ar,u32 vdev_id,u32 param_id,u32 param_value)7286 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
7287 				 u32 param_id, u32 param_value)
7288 {
7289 	struct wmi_vdev_set_param_cmd *cmd;
7290 	struct sk_buff *skb;
7291 
7292 	if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
7293 		ath10k_dbg(ar, ATH10K_DBG_WMI,
7294 			   "vdev param %d not supported by firmware\n",
7295 			    param_id);
7296 		return ERR_PTR(-EOPNOTSUPP);
7297 	}
7298 
7299 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7300 	if (!skb)
7301 		return ERR_PTR(-ENOMEM);
7302 
7303 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
7304 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7305 	cmd->param_id    = __cpu_to_le32(param_id);
7306 	cmd->param_value = __cpu_to_le32(param_value);
7307 
7308 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7309 		   "wmi vdev id 0x%x set param %d value %d\n",
7310 		   vdev_id, param_id, param_value);
7311 	return skb;
7312 }
7313 
7314 static struct sk_buff *
ath10k_wmi_op_gen_vdev_install_key(struct ath10k * ar,const struct wmi_vdev_install_key_arg * arg)7315 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
7316 				   const struct wmi_vdev_install_key_arg *arg)
7317 {
7318 	struct wmi_vdev_install_key_cmd *cmd;
7319 	struct sk_buff *skb;
7320 
7321 	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
7322 		return ERR_PTR(-EINVAL);
7323 	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
7324 		return ERR_PTR(-EINVAL);
7325 
7326 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
7327 	if (!skb)
7328 		return ERR_PTR(-ENOMEM);
7329 
7330 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
7331 	cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
7332 	cmd->key_idx       = __cpu_to_le32(arg->key_idx);
7333 	cmd->key_flags     = __cpu_to_le32(arg->key_flags);
7334 	cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
7335 	cmd->key_len       = __cpu_to_le32(arg->key_len);
7336 	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
7337 	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
7338 
7339 	if (arg->macaddr)
7340 		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
7341 	if (arg->key_data)
7342 		memcpy(cmd->key_data, arg->key_data, arg->key_len);
7343 
7344 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7345 		   "wmi vdev install key idx %d cipher %d len %d\n",
7346 		   arg->key_idx, arg->key_cipher, arg->key_len);
7347 	return skb;
7348 }
7349 
7350 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k * ar,const struct wmi_vdev_spectral_conf_arg * arg)7351 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
7352 				     const struct wmi_vdev_spectral_conf_arg *arg)
7353 {
7354 	struct wmi_vdev_spectral_conf_cmd *cmd;
7355 	struct sk_buff *skb;
7356 
7357 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7358 	if (!skb)
7359 		return ERR_PTR(-ENOMEM);
7360 
7361 	cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
7362 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7363 	cmd->scan_count = __cpu_to_le32(arg->scan_count);
7364 	cmd->scan_period = __cpu_to_le32(arg->scan_period);
7365 	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
7366 	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
7367 	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
7368 	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
7369 	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
7370 	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
7371 	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
7372 	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
7373 	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
7374 	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
7375 	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
7376 	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
7377 	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
7378 	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
7379 	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
7380 	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
7381 
7382 	return skb;
7383 }
7384 
7385 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k * ar,u32 vdev_id,u32 trigger,u32 enable)7386 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
7387 				       u32 trigger, u32 enable)
7388 {
7389 	struct wmi_vdev_spectral_enable_cmd *cmd;
7390 	struct sk_buff *skb;
7391 
7392 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7393 	if (!skb)
7394 		return ERR_PTR(-ENOMEM);
7395 
7396 	cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
7397 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7398 	cmd->trigger_cmd = __cpu_to_le32(trigger);
7399 	cmd->enable_cmd = __cpu_to_le32(enable);
7400 
7401 	return skb;
7402 }
7403 
7404 static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],enum wmi_peer_type peer_type)7405 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
7406 			      const u8 peer_addr[ETH_ALEN],
7407 			      enum wmi_peer_type peer_type)
7408 {
7409 	struct wmi_peer_create_cmd *cmd;
7410 	struct sk_buff *skb;
7411 
7412 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7413 	if (!skb)
7414 		return ERR_PTR(-ENOMEM);
7415 
7416 	cmd = (struct wmi_peer_create_cmd *)skb->data;
7417 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7418 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7419 	cmd->peer_type = __cpu_to_le32(peer_type);
7420 
7421 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7422 		   "wmi peer create vdev_id %d peer_addr %pM\n",
7423 		   vdev_id, peer_addr);
7424 	return skb;
7425 }
7426 
7427 static struct sk_buff *
ath10k_wmi_op_gen_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN])7428 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
7429 			      const u8 peer_addr[ETH_ALEN])
7430 {
7431 	struct wmi_peer_delete_cmd *cmd;
7432 	struct sk_buff *skb;
7433 
7434 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7435 	if (!skb)
7436 		return ERR_PTR(-ENOMEM);
7437 
7438 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
7439 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7440 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7441 
7442 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7443 		   "wmi peer delete vdev_id %d peer_addr %pM\n",
7444 		   vdev_id, peer_addr);
7445 	return skb;
7446 }
7447 
7448 static struct sk_buff *
ath10k_wmi_op_gen_peer_flush(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],u32 tid_bitmap)7449 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
7450 			     const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
7451 {
7452 	struct wmi_peer_flush_tids_cmd *cmd;
7453 	struct sk_buff *skb;
7454 
7455 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7456 	if (!skb)
7457 		return ERR_PTR(-ENOMEM);
7458 
7459 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
7460 	cmd->vdev_id         = __cpu_to_le32(vdev_id);
7461 	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
7462 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7463 
7464 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7465 		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
7466 		   vdev_id, peer_addr, tid_bitmap);
7467 	return skb;
7468 }
7469 
7470 static struct sk_buff *
ath10k_wmi_op_gen_peer_set_param(struct ath10k * ar,u32 vdev_id,const u8 * peer_addr,enum wmi_peer_param param_id,u32 param_value)7471 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
7472 				 const u8 *peer_addr,
7473 				 enum wmi_peer_param param_id,
7474 				 u32 param_value)
7475 {
7476 	struct wmi_peer_set_param_cmd *cmd;
7477 	struct sk_buff *skb;
7478 
7479 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7480 	if (!skb)
7481 		return ERR_PTR(-ENOMEM);
7482 
7483 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
7484 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7485 	cmd->param_id    = __cpu_to_le32(param_id);
7486 	cmd->param_value = __cpu_to_le32(param_value);
7487 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7488 
7489 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7490 		   "wmi vdev %d peer 0x%pM set param %d value %d\n",
7491 		   vdev_id, peer_addr, param_id, param_value);
7492 	return skb;
7493 }
7494 
7495 static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k * ar,u32 vdev_id,enum wmi_sta_ps_mode psmode)7496 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
7497 			     enum wmi_sta_ps_mode psmode)
7498 {
7499 	struct wmi_sta_powersave_mode_cmd *cmd;
7500 	struct sk_buff *skb;
7501 
7502 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7503 	if (!skb)
7504 		return ERR_PTR(-ENOMEM);
7505 
7506 	cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
7507 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7508 	cmd->sta_ps_mode = __cpu_to_le32(psmode);
7509 
7510 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7511 		   "wmi set powersave id 0x%x mode %d\n",
7512 		   vdev_id, psmode);
7513 	return skb;
7514 }
7515 
7516 static struct sk_buff *
ath10k_wmi_op_gen_set_sta_ps(struct ath10k * ar,u32 vdev_id,enum wmi_sta_powersave_param param_id,u32 value)7517 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
7518 			     enum wmi_sta_powersave_param param_id,
7519 			     u32 value)
7520 {
7521 	struct wmi_sta_powersave_param_cmd *cmd;
7522 	struct sk_buff *skb;
7523 
7524 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7525 	if (!skb)
7526 		return ERR_PTR(-ENOMEM);
7527 
7528 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
7529 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7530 	cmd->param_id    = __cpu_to_le32(param_id);
7531 	cmd->param_value = __cpu_to_le32(value);
7532 
7533 	ath10k_dbg(ar, ATH10K_DBG_STA,
7534 		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
7535 		   vdev_id, param_id, value);
7536 	return skb;
7537 }
7538 
7539 static struct sk_buff *
ath10k_wmi_op_gen_set_ap_ps(struct ath10k * ar,u32 vdev_id,const u8 * mac,enum wmi_ap_ps_peer_param param_id,u32 value)7540 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7541 			    enum wmi_ap_ps_peer_param param_id, u32 value)
7542 {
7543 	struct wmi_ap_ps_peer_cmd *cmd;
7544 	struct sk_buff *skb;
7545 
7546 	if (!mac)
7547 		return ERR_PTR(-EINVAL);
7548 
7549 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7550 	if (!skb)
7551 		return ERR_PTR(-ENOMEM);
7552 
7553 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
7554 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7555 	cmd->param_id = __cpu_to_le32(param_id);
7556 	cmd->param_value = __cpu_to_le32(value);
7557 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
7558 
7559 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7560 		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
7561 		   vdev_id, param_id, value, mac);
7562 	return skb;
7563 }
7564 
7565 static struct sk_buff *
ath10k_wmi_op_gen_scan_chan_list(struct ath10k * ar,const struct wmi_scan_chan_list_arg * arg)7566 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
7567 				 const struct wmi_scan_chan_list_arg *arg)
7568 {
7569 	struct wmi_scan_chan_list_cmd *cmd;
7570 	struct sk_buff *skb;
7571 	struct wmi_channel_arg *ch;
7572 	struct wmi_channel *ci;
7573 	int i;
7574 
7575 	skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels));
7576 	if (!skb)
7577 		return ERR_PTR(-EINVAL);
7578 
7579 	cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
7580 	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
7581 
7582 	for (i = 0; i < arg->n_channels; i++) {
7583 		ch = &arg->channels[i];
7584 		ci = &cmd->chan_info[i];
7585 
7586 		ath10k_wmi_put_wmi_channel(ar, ci, ch);
7587 	}
7588 
7589 	return skb;
7590 }
7591 
7592 static void
ath10k_wmi_peer_assoc_fill(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7593 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
7594 			   const struct wmi_peer_assoc_complete_arg *arg)
7595 {
7596 	struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
7597 
7598 	cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
7599 	cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
7600 	cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
7601 	cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
7602 	cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
7603 	cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
7604 	cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
7605 	cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
7606 	cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
7607 	cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
7608 	cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
7609 	cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
7610 	cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
7611 
7612 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
7613 
7614 	cmd->peer_legacy_rates.num_rates =
7615 		__cpu_to_le32(arg->peer_legacy_rates.num_rates);
7616 	memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
7617 	       arg->peer_legacy_rates.num_rates);
7618 
7619 	cmd->peer_ht_rates.num_rates =
7620 		__cpu_to_le32(arg->peer_ht_rates.num_rates);
7621 	memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
7622 	       arg->peer_ht_rates.num_rates);
7623 
7624 	cmd->peer_vht_rates.rx_max_rate =
7625 		__cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
7626 	cmd->peer_vht_rates.rx_mcs_set =
7627 		__cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
7628 	cmd->peer_vht_rates.tx_max_rate =
7629 		__cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
7630 	cmd->peer_vht_rates.tx_mcs_set =
7631 		__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
7632 }
7633 
7634 static void
ath10k_wmi_peer_assoc_fill_main(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7635 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
7636 				const struct wmi_peer_assoc_complete_arg *arg)
7637 {
7638 	struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
7639 
7640 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7641 	memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
7642 }
7643 
7644 static void
ath10k_wmi_peer_assoc_fill_10_1(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7645 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
7646 				const struct wmi_peer_assoc_complete_arg *arg)
7647 {
7648 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7649 }
7650 
7651 static void
ath10k_wmi_peer_assoc_fill_10_2(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7652 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
7653 				const struct wmi_peer_assoc_complete_arg *arg)
7654 {
7655 	struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
7656 	int max_mcs, max_nss;
7657 	u32 info0;
7658 
7659 	/* TODO: Is using max values okay with firmware? */
7660 	max_mcs = 0xf;
7661 	max_nss = 0xf;
7662 
7663 	info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
7664 		SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
7665 
7666 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7667 	cmd->info0 = __cpu_to_le32(info0);
7668 }
7669 
7670 static void
ath10k_wmi_peer_assoc_fill_10_4(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7671 ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
7672 				const struct wmi_peer_assoc_complete_arg *arg)
7673 {
7674 	struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
7675 
7676 	ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
7677 	cmd->peer_bw_rxnss_override =
7678 		__cpu_to_le32(arg->peer_bw_rxnss_override);
7679 }
7680 
7681 static int
ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg * arg)7682 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
7683 {
7684 	if (arg->peer_mpdu_density > 16)
7685 		return -EINVAL;
7686 	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
7687 		return -EINVAL;
7688 	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
7689 		return -EINVAL;
7690 
7691 	return 0;
7692 }
7693 
7694 static struct sk_buff *
ath10k_wmi_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7695 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
7696 			     const struct wmi_peer_assoc_complete_arg *arg)
7697 {
7698 	size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
7699 	struct sk_buff *skb;
7700 	int ret;
7701 
7702 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7703 	if (ret)
7704 		return ERR_PTR(ret);
7705 
7706 	skb = ath10k_wmi_alloc_skb(ar, len);
7707 	if (!skb)
7708 		return ERR_PTR(-ENOMEM);
7709 
7710 	ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
7711 
7712 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7713 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7714 		   arg->vdev_id, arg->addr,
7715 		   arg->peer_reassoc ? "reassociate" : "new");
7716 	return skb;
7717 }
7718 
7719 static struct sk_buff *
ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7720 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
7721 				  const struct wmi_peer_assoc_complete_arg *arg)
7722 {
7723 	size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
7724 	struct sk_buff *skb;
7725 	int ret;
7726 
7727 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7728 	if (ret)
7729 		return ERR_PTR(ret);
7730 
7731 	skb = ath10k_wmi_alloc_skb(ar, len);
7732 	if (!skb)
7733 		return ERR_PTR(-ENOMEM);
7734 
7735 	ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
7736 
7737 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7738 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7739 		   arg->vdev_id, arg->addr,
7740 		   arg->peer_reassoc ? "reassociate" : "new");
7741 	return skb;
7742 }
7743 
7744 static struct sk_buff *
ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7745 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
7746 				  const struct wmi_peer_assoc_complete_arg *arg)
7747 {
7748 	size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
7749 	struct sk_buff *skb;
7750 	int ret;
7751 
7752 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7753 	if (ret)
7754 		return ERR_PTR(ret);
7755 
7756 	skb = ath10k_wmi_alloc_skb(ar, len);
7757 	if (!skb)
7758 		return ERR_PTR(-ENOMEM);
7759 
7760 	ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
7761 
7762 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7763 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7764 		   arg->vdev_id, arg->addr,
7765 		   arg->peer_reassoc ? "reassociate" : "new");
7766 	return skb;
7767 }
7768 
7769 static struct sk_buff *
ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7770 ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
7771 				  const struct wmi_peer_assoc_complete_arg *arg)
7772 {
7773 	size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
7774 	struct sk_buff *skb;
7775 	int ret;
7776 
7777 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7778 	if (ret)
7779 		return ERR_PTR(ret);
7780 
7781 	skb = ath10k_wmi_alloc_skb(ar, len);
7782 	if (!skb)
7783 		return ERR_PTR(-ENOMEM);
7784 
7785 	ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
7786 
7787 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7788 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7789 		   arg->vdev_id, arg->addr,
7790 		   arg->peer_reassoc ? "reassociate" : "new");
7791 	return skb;
7792 }
7793 
7794 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k * ar)7795 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
7796 {
7797 	struct sk_buff *skb;
7798 
7799 	skb = ath10k_wmi_alloc_skb(ar, 0);
7800 	if (!skb)
7801 		return ERR_PTR(-ENOMEM);
7802 
7803 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
7804 	return skb;
7805 }
7806 
7807 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k * ar,enum wmi_bss_survey_req_type type)7808 ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
7809 					  enum wmi_bss_survey_req_type type)
7810 {
7811 	struct wmi_pdev_chan_info_req_cmd *cmd;
7812 	struct sk_buff *skb;
7813 
7814 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7815 	if (!skb)
7816 		return ERR_PTR(-ENOMEM);
7817 
7818 	cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
7819 	cmd->type = __cpu_to_le32(type);
7820 
7821 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7822 		   "wmi pdev bss info request type %d\n", type);
7823 
7824 	return skb;
7825 }
7826 
7827 /* This function assumes the beacon is already DMA mapped */
7828 static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k * ar,u32 vdev_id,const void * bcn,size_t bcn_len,u32 bcn_paddr,bool dtim_zero,bool deliver_cab)7829 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
7830 			     size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
7831 			     bool deliver_cab)
7832 {
7833 	struct wmi_bcn_tx_ref_cmd *cmd;
7834 	struct sk_buff *skb;
7835 	struct ieee80211_hdr *hdr;
7836 	u16 fc;
7837 
7838 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7839 	if (!skb)
7840 		return ERR_PTR(-ENOMEM);
7841 
7842 	hdr = (struct ieee80211_hdr *)bcn;
7843 	fc = le16_to_cpu(hdr->frame_control);
7844 
7845 	cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
7846 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7847 	cmd->data_len = __cpu_to_le32(bcn_len);
7848 	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
7849 	cmd->msdu_id = 0;
7850 	cmd->frame_control = __cpu_to_le32(fc);
7851 	cmd->flags = 0;
7852 	cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
7853 
7854 	if (dtim_zero)
7855 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
7856 
7857 	if (deliver_cab)
7858 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
7859 
7860 	return skb;
7861 }
7862 
ath10k_wmi_set_wmm_param(struct wmi_wmm_params * params,const struct wmi_wmm_params_arg * arg)7863 void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
7864 			      const struct wmi_wmm_params_arg *arg)
7865 {
7866 	params->cwmin  = __cpu_to_le32(arg->cwmin);
7867 	params->cwmax  = __cpu_to_le32(arg->cwmax);
7868 	params->aifs   = __cpu_to_le32(arg->aifs);
7869 	params->txop   = __cpu_to_le32(arg->txop);
7870 	params->acm    = __cpu_to_le32(arg->acm);
7871 	params->no_ack = __cpu_to_le32(arg->no_ack);
7872 }
7873 
7874 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k * ar,const struct wmi_wmm_params_all_arg * arg)7875 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
7876 			       const struct wmi_wmm_params_all_arg *arg)
7877 {
7878 	struct wmi_pdev_set_wmm_params *cmd;
7879 	struct sk_buff *skb;
7880 
7881 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7882 	if (!skb)
7883 		return ERR_PTR(-ENOMEM);
7884 
7885 	cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
7886 	ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
7887 	ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
7888 	ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
7889 	ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
7890 
7891 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
7892 	return skb;
7893 }
7894 
7895 static struct sk_buff *
ath10k_wmi_op_gen_request_stats(struct ath10k * ar,u32 stats_mask)7896 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
7897 {
7898 	struct wmi_request_stats_cmd *cmd;
7899 	struct sk_buff *skb;
7900 
7901 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7902 	if (!skb)
7903 		return ERR_PTR(-ENOMEM);
7904 
7905 	cmd = (struct wmi_request_stats_cmd *)skb->data;
7906 	cmd->stats_id = __cpu_to_le32(stats_mask);
7907 
7908 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
7909 		   stats_mask);
7910 	return skb;
7911 }
7912 
7913 static struct sk_buff *
ath10k_wmi_op_gen_force_fw_hang(struct ath10k * ar,enum wmi_force_fw_hang_type type,u32 delay_ms)7914 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
7915 				enum wmi_force_fw_hang_type type, u32 delay_ms)
7916 {
7917 	struct wmi_force_fw_hang_cmd *cmd;
7918 	struct sk_buff *skb;
7919 
7920 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7921 	if (!skb)
7922 		return ERR_PTR(-ENOMEM);
7923 
7924 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
7925 	cmd->type = __cpu_to_le32(type);
7926 	cmd->delay_ms = __cpu_to_le32(delay_ms);
7927 
7928 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
7929 		   type, delay_ms);
7930 	return skb;
7931 }
7932 
7933 static struct sk_buff *
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7934 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7935 			     u32 log_level)
7936 {
7937 	struct wmi_dbglog_cfg_cmd *cmd;
7938 	struct sk_buff *skb;
7939 	u32 cfg;
7940 
7941 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7942 	if (!skb)
7943 		return ERR_PTR(-ENOMEM);
7944 
7945 	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
7946 
7947 	if (module_enable) {
7948 		cfg = SM(log_level,
7949 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7950 	} else {
7951 		/* set back defaults, all modules with WARN level */
7952 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7953 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7954 		module_enable = ~0;
7955 	}
7956 
7957 	cmd->module_enable = __cpu_to_le32(module_enable);
7958 	cmd->module_valid = __cpu_to_le32(~0);
7959 	cmd->config_enable = __cpu_to_le32(cfg);
7960 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
7961 
7962 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7963 		   "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
7964 		   __le32_to_cpu(cmd->module_enable),
7965 		   __le32_to_cpu(cmd->module_valid),
7966 		   __le32_to_cpu(cmd->config_enable),
7967 		   __le32_to_cpu(cmd->config_valid));
7968 	return skb;
7969 }
7970 
7971 static struct sk_buff *
ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7972 ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7973 				  u32 log_level)
7974 {
7975 	struct wmi_10_4_dbglog_cfg_cmd *cmd;
7976 	struct sk_buff *skb;
7977 	u32 cfg;
7978 
7979 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7980 	if (!skb)
7981 		return ERR_PTR(-ENOMEM);
7982 
7983 	cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
7984 
7985 	if (module_enable) {
7986 		cfg = SM(log_level,
7987 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7988 	} else {
7989 		/* set back defaults, all modules with WARN level */
7990 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7991 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7992 		module_enable = ~0;
7993 	}
7994 
7995 	cmd->module_enable = __cpu_to_le64(module_enable);
7996 	cmd->module_valid = __cpu_to_le64(~0);
7997 	cmd->config_enable = __cpu_to_le32(cfg);
7998 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
7999 
8000 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8001 		   "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
8002 		   __le64_to_cpu(cmd->module_enable),
8003 		   __le64_to_cpu(cmd->module_valid),
8004 		   __le32_to_cpu(cmd->config_enable),
8005 		   __le32_to_cpu(cmd->config_valid));
8006 	return skb;
8007 }
8008 
8009 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k * ar,u32 ev_bitmap)8010 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
8011 {
8012 	struct wmi_pdev_pktlog_enable_cmd *cmd;
8013 	struct sk_buff *skb;
8014 
8015 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8016 	if (!skb)
8017 		return ERR_PTR(-ENOMEM);
8018 
8019 	ev_bitmap &= ATH10K_PKTLOG_ANY;
8020 
8021 	cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
8022 	cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
8023 
8024 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
8025 		   ev_bitmap);
8026 	return skb;
8027 }
8028 
8029 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_disable(struct ath10k * ar)8030 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
8031 {
8032 	struct sk_buff *skb;
8033 
8034 	skb = ath10k_wmi_alloc_skb(ar, 0);
8035 	if (!skb)
8036 		return ERR_PTR(-ENOMEM);
8037 
8038 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
8039 	return skb;
8040 }
8041 
8042 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k * ar,u32 period,u32 duration,u32 next_offset,u32 enabled)8043 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
8044 				      u32 duration, u32 next_offset,
8045 				      u32 enabled)
8046 {
8047 	struct wmi_pdev_set_quiet_cmd *cmd;
8048 	struct sk_buff *skb;
8049 
8050 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8051 	if (!skb)
8052 		return ERR_PTR(-ENOMEM);
8053 
8054 	cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
8055 	cmd->period = __cpu_to_le32(period);
8056 	cmd->duration = __cpu_to_le32(duration);
8057 	cmd->next_start = __cpu_to_le32(next_offset);
8058 	cmd->enabled = __cpu_to_le32(enabled);
8059 
8060 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8061 		   "wmi quiet param: period %u duration %u enabled %d\n",
8062 		   period, duration, enabled);
8063 	return skb;
8064 }
8065 
8066 static struct sk_buff *
ath10k_wmi_op_gen_addba_clear_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac)8067 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
8068 				   const u8 *mac)
8069 {
8070 	struct wmi_addba_clear_resp_cmd *cmd;
8071 	struct sk_buff *skb;
8072 
8073 	if (!mac)
8074 		return ERR_PTR(-EINVAL);
8075 
8076 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8077 	if (!skb)
8078 		return ERR_PTR(-ENOMEM);
8079 
8080 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
8081 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8082 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8083 
8084 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8085 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
8086 		   vdev_id, mac);
8087 	return skb;
8088 }
8089 
8090 static struct sk_buff *
ath10k_wmi_op_gen_addba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)8091 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8092 			     u32 tid, u32 buf_size)
8093 {
8094 	struct wmi_addba_send_cmd *cmd;
8095 	struct sk_buff *skb;
8096 
8097 	if (!mac)
8098 		return ERR_PTR(-EINVAL);
8099 
8100 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8101 	if (!skb)
8102 		return ERR_PTR(-ENOMEM);
8103 
8104 	cmd = (struct wmi_addba_send_cmd *)skb->data;
8105 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8106 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8107 	cmd->tid = __cpu_to_le32(tid);
8108 	cmd->buffersize = __cpu_to_le32(buf_size);
8109 
8110 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8111 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
8112 		   vdev_id, mac, tid, buf_size);
8113 	return skb;
8114 }
8115 
8116 static struct sk_buff *
ath10k_wmi_op_gen_addba_set_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)8117 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8118 				 u32 tid, u32 status)
8119 {
8120 	struct wmi_addba_setresponse_cmd *cmd;
8121 	struct sk_buff *skb;
8122 
8123 	if (!mac)
8124 		return ERR_PTR(-EINVAL);
8125 
8126 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8127 	if (!skb)
8128 		return ERR_PTR(-ENOMEM);
8129 
8130 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
8131 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8132 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8133 	cmd->tid = __cpu_to_le32(tid);
8134 	cmd->statuscode = __cpu_to_le32(status);
8135 
8136 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8137 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
8138 		   vdev_id, mac, tid, status);
8139 	return skb;
8140 }
8141 
8142 static struct sk_buff *
ath10k_wmi_op_gen_delba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)8143 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8144 			     u32 tid, u32 initiator, u32 reason)
8145 {
8146 	struct wmi_delba_send_cmd *cmd;
8147 	struct sk_buff *skb;
8148 
8149 	if (!mac)
8150 		return ERR_PTR(-EINVAL);
8151 
8152 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8153 	if (!skb)
8154 		return ERR_PTR(-ENOMEM);
8155 
8156 	cmd = (struct wmi_delba_send_cmd *)skb->data;
8157 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8158 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8159 	cmd->tid = __cpu_to_le32(tid);
8160 	cmd->initiator = __cpu_to_le32(initiator);
8161 	cmd->reasoncode = __cpu_to_le32(reason);
8162 
8163 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8164 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
8165 		   vdev_id, mac, tid, initiator, reason);
8166 	return skb;
8167 }
8168 
8169 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k * ar,u32 param)8170 ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
8171 {
8172 	struct wmi_pdev_get_tpc_config_cmd *cmd;
8173 	struct sk_buff *skb;
8174 
8175 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8176 	if (!skb)
8177 		return ERR_PTR(-ENOMEM);
8178 
8179 	cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
8180 	cmd->param = __cpu_to_le32(param);
8181 
8182 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8183 		   "wmi pdev get tpc config param %d\n", param);
8184 	return skb;
8185 }
8186 
8187 static void
ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8188 ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8189 				   char *buf, u32 *length)
8190 {
8191 	u32 len = *length;
8192 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8193 
8194 	len += scnprintf(buf + len, buf_len - len, "\n");
8195 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
8196 			"ath10k PDEV stats");
8197 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8198 			"=================");
8199 
8200 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8201 			"Channel noise floor", pdev->ch_noise_floor);
8202 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8203 			"Channel TX power", pdev->chan_tx_power);
8204 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8205 			"TX frame count", pdev->tx_frame_count);
8206 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8207 			"RX frame count", pdev->rx_frame_count);
8208 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8209 			"RX clear count", pdev->rx_clear_count);
8210 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8211 			"Cycle count", pdev->cycle_count);
8212 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8213 			"PHY error count", pdev->phy_err_count);
8214 
8215 	*length = len;
8216 }
8217 
8218 static void
ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8219 ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8220 				    char *buf, u32 *length)
8221 {
8222 	u32 len = *length;
8223 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8224 
8225 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8226 			"RTS bad count", pdev->rts_bad);
8227 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8228 			"RTS good count", pdev->rts_good);
8229 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8230 			"FCS bad count", pdev->fcs_bad);
8231 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8232 			"No beacon count", pdev->no_beacons);
8233 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8234 			"MIB int count", pdev->mib_int_count);
8235 
8236 	len += scnprintf(buf + len, buf_len - len, "\n");
8237 	*length = len;
8238 }
8239 
8240 static void
ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8241 ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8242 				 char *buf, u32 *length)
8243 {
8244 	u32 len = *length;
8245 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8246 
8247 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8248 			 "ath10k PDEV TX stats");
8249 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8250 				 "=================");
8251 
8252 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8253 			 "HTT cookies queued", pdev->comp_queued);
8254 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8255 			 "HTT cookies disp.", pdev->comp_delivered);
8256 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8257 			 "MSDU queued", pdev->msdu_enqued);
8258 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8259 			 "MPDU queued", pdev->mpdu_enqued);
8260 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8261 			 "MSDUs dropped", pdev->wmm_drop);
8262 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8263 			 "Local enqued", pdev->local_enqued);
8264 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8265 			 "Local freed", pdev->local_freed);
8266 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8267 			 "HW queued", pdev->hw_queued);
8268 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8269 			 "PPDUs reaped", pdev->hw_reaped);
8270 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8271 			 "Num underruns", pdev->underrun);
8272 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8273 			 "PPDUs cleaned", pdev->tx_abort);
8274 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8275 			 "MPDUs requeued", pdev->mpdus_requeued);
8276 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8277 			 "Excessive retries", pdev->tx_ko);
8278 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8279 			 "HW rate", pdev->data_rc);
8280 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8281 			 "Sched self triggers", pdev->self_triggers);
8282 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8283 			 "Dropped due to SW retries",
8284 			 pdev->sw_retry_failure);
8285 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8286 			 "Illegal rate phy errors",
8287 			 pdev->illgl_rate_phy_err);
8288 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8289 			 "Pdev continuous xretry", pdev->pdev_cont_xretry);
8290 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8291 			 "TX timeout", pdev->pdev_tx_timeout);
8292 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8293 			 "PDEV resets", pdev->pdev_resets);
8294 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8295 			 "PHY underrun", pdev->phy_underrun);
8296 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8297 			 "MPDU is more than txop limit", pdev->txop_ovf);
8298 	*length = len;
8299 }
8300 
8301 static void
ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8302 ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8303 				 char *buf, u32 *length)
8304 {
8305 	u32 len = *length;
8306 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8307 
8308 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8309 			 "ath10k PDEV RX stats");
8310 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8311 				 "=================");
8312 
8313 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8314 			 "Mid PPDU route change",
8315 			 pdev->mid_ppdu_route_change);
8316 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8317 			 "Tot. number of statuses", pdev->status_rcvd);
8318 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8319 			 "Extra frags on rings 0", pdev->r0_frags);
8320 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8321 			 "Extra frags on rings 1", pdev->r1_frags);
8322 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8323 			 "Extra frags on rings 2", pdev->r2_frags);
8324 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8325 			 "Extra frags on rings 3", pdev->r3_frags);
8326 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8327 			 "MSDUs delivered to HTT", pdev->htt_msdus);
8328 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8329 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
8330 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8331 			 "MSDUs delivered to stack", pdev->loc_msdus);
8332 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8333 			 "MPDUs delivered to stack", pdev->loc_mpdus);
8334 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8335 			 "Oversized AMSDUs", pdev->oversize_amsdu);
8336 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8337 			 "PHY errors", pdev->phy_errs);
8338 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8339 			 "PHY errors drops", pdev->phy_err_drop);
8340 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8341 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
8342 	*length = len;
8343 }
8344 
8345 static void
ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev * vdev,char * buf,u32 * length)8346 ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
8347 			      char *buf, u32 *length)
8348 {
8349 	u32 len = *length;
8350 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8351 	int i;
8352 
8353 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8354 			"vdev id", vdev->vdev_id);
8355 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8356 			"beacon snr", vdev->beacon_snr);
8357 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8358 			"data snr", vdev->data_snr);
8359 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8360 			"num rx frames", vdev->num_rx_frames);
8361 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8362 			"num rts fail", vdev->num_rts_fail);
8363 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8364 			"num rts success", vdev->num_rts_success);
8365 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8366 			"num rx err", vdev->num_rx_err);
8367 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8368 			"num rx discard", vdev->num_rx_discard);
8369 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8370 			"num tx not acked", vdev->num_tx_not_acked);
8371 
8372 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
8373 		len += scnprintf(buf + len, buf_len - len,
8374 				"%25s [%02d] %u\n",
8375 				"num tx frames", i,
8376 				vdev->num_tx_frames[i]);
8377 
8378 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
8379 		len += scnprintf(buf + len, buf_len - len,
8380 				"%25s [%02d] %u\n",
8381 				"num tx frames retries", i,
8382 				vdev->num_tx_frames_retries[i]);
8383 
8384 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
8385 		len += scnprintf(buf + len, buf_len - len,
8386 				"%25s [%02d] %u\n",
8387 				"num tx frames failures", i,
8388 				vdev->num_tx_frames_failures[i]);
8389 
8390 	for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
8391 		len += scnprintf(buf + len, buf_len - len,
8392 				"%25s [%02d] 0x%08x\n",
8393 				"tx rate history", i,
8394 				vdev->tx_rate_history[i]);
8395 
8396 	for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
8397 		len += scnprintf(buf + len, buf_len - len,
8398 				"%25s [%02d] %u\n",
8399 				"beacon rssi history", i,
8400 				vdev->beacon_rssi_history[i]);
8401 
8402 	len += scnprintf(buf + len, buf_len - len, "\n");
8403 	*length = len;
8404 }
8405 
8406 static void
ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer * peer,char * buf,u32 * length,bool extended_peer)8407 ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
8408 			      char *buf, u32 *length, bool extended_peer)
8409 {
8410 	u32 len = *length;
8411 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8412 
8413 	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8414 			"Peer MAC address", peer->peer_macaddr);
8415 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8416 			"Peer RSSI", peer->peer_rssi);
8417 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8418 			"Peer TX rate", peer->peer_tx_rate);
8419 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8420 			"Peer RX rate", peer->peer_rx_rate);
8421 	if (!extended_peer)
8422 		len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8423 				"Peer RX duration", peer->rx_duration);
8424 
8425 	len += scnprintf(buf + len, buf_len - len, "\n");
8426 	*length = len;
8427 }
8428 
8429 static void
ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer * peer,char * buf,u32 * length)8430 ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
8431 				   char *buf, u32 *length)
8432 {
8433 	u32 len = *length;
8434 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8435 
8436 	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8437 			"Peer MAC address", peer->peer_macaddr);
8438 	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8439 			"Peer RX duration", peer->rx_duration);
8440 }
8441 
ath10k_wmi_main_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8442 void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
8443 				      struct ath10k_fw_stats *fw_stats,
8444 				      char *buf)
8445 {
8446 	u32 len = 0;
8447 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8448 	const struct ath10k_fw_stats_pdev *pdev;
8449 	const struct ath10k_fw_stats_vdev *vdev;
8450 	const struct ath10k_fw_stats_peer *peer;
8451 	size_t num_peers;
8452 	size_t num_vdevs;
8453 
8454 	spin_lock_bh(&ar->data_lock);
8455 
8456 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8457 					struct ath10k_fw_stats_pdev, list);
8458 	if (!pdev) {
8459 		ath10k_warn(ar, "failed to get pdev stats\n");
8460 		goto unlock;
8461 	}
8462 
8463 	num_peers = list_count_nodes(&fw_stats->peers);
8464 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8465 
8466 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8467 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8468 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8469 
8470 	len += scnprintf(buf + len, buf_len - len, "\n");
8471 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8472 			 "ath10k VDEV stats", num_vdevs);
8473 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8474 				 "=================");
8475 
8476 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8477 		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8478 	}
8479 
8480 	len += scnprintf(buf + len, buf_len - len, "\n");
8481 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8482 			 "ath10k PEER stats", num_peers);
8483 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8484 				 "=================");
8485 
8486 	list_for_each_entry(peer, &fw_stats->peers, list) {
8487 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8488 					      fw_stats->extended);
8489 	}
8490 
8491 unlock:
8492 	spin_unlock_bh(&ar->data_lock);
8493 
8494 	if (len >= buf_len)
8495 		buf[len - 1] = 0;
8496 	else
8497 		buf[len] = 0;
8498 }
8499 
ath10k_wmi_10x_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8500 void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
8501 				     struct ath10k_fw_stats *fw_stats,
8502 				     char *buf)
8503 {
8504 	unsigned int len = 0;
8505 	unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
8506 	const struct ath10k_fw_stats_pdev *pdev;
8507 	const struct ath10k_fw_stats_vdev *vdev;
8508 	const struct ath10k_fw_stats_peer *peer;
8509 	size_t num_peers;
8510 	size_t num_vdevs;
8511 
8512 	spin_lock_bh(&ar->data_lock);
8513 
8514 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8515 					struct ath10k_fw_stats_pdev, list);
8516 	if (!pdev) {
8517 		ath10k_warn(ar, "failed to get pdev stats\n");
8518 		goto unlock;
8519 	}
8520 
8521 	num_peers = list_count_nodes(&fw_stats->peers);
8522 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8523 
8524 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8525 	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8526 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8527 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8528 
8529 	len += scnprintf(buf + len, buf_len - len, "\n");
8530 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8531 			 "ath10k VDEV stats", num_vdevs);
8532 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8533 				 "=================");
8534 
8535 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8536 		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8537 	}
8538 
8539 	len += scnprintf(buf + len, buf_len - len, "\n");
8540 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8541 			 "ath10k PEER stats", num_peers);
8542 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8543 				 "=================");
8544 
8545 	list_for_each_entry(peer, &fw_stats->peers, list) {
8546 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8547 					      fw_stats->extended);
8548 	}
8549 
8550 unlock:
8551 	spin_unlock_bh(&ar->data_lock);
8552 
8553 	if (len >= buf_len)
8554 		buf[len - 1] = 0;
8555 	else
8556 		buf[len] = 0;
8557 }
8558 
8559 static struct sk_buff *
ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k * ar,u8 enable,u32 detect_level,u32 detect_margin)8560 ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
8561 					   u32 detect_level, u32 detect_margin)
8562 {
8563 	struct wmi_pdev_set_adaptive_cca_params *cmd;
8564 	struct sk_buff *skb;
8565 
8566 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8567 	if (!skb)
8568 		return ERR_PTR(-ENOMEM);
8569 
8570 	cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
8571 	cmd->enable = __cpu_to_le32(enable);
8572 	cmd->cca_detect_level = __cpu_to_le32(detect_level);
8573 	cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
8574 
8575 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8576 		   "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
8577 		   enable, detect_level, detect_margin);
8578 	return skb;
8579 }
8580 
8581 static void
ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd * vdev,char * buf,u32 * length)8582 ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
8583 				   char *buf, u32 *length)
8584 {
8585 	u32 len = *length;
8586 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8587 	u32 val;
8588 
8589 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8590 			 "vdev id", vdev->vdev_id);
8591 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8592 			 "ppdu aggr count", vdev->ppdu_aggr_cnt);
8593 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8594 			 "ppdu noack", vdev->ppdu_noack);
8595 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8596 			 "mpdu queued", vdev->mpdu_queued);
8597 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8598 			 "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
8599 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8600 			 "mpdu sw requeued", vdev->mpdu_sw_requeued);
8601 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8602 			 "mpdu success retry", vdev->mpdu_suc_retry);
8603 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8604 			 "mpdu success multitry", vdev->mpdu_suc_multitry);
8605 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8606 			 "mpdu fail retry", vdev->mpdu_fail_retry);
8607 	val = vdev->tx_ftm_suc;
8608 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8609 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8610 				 "tx ftm success",
8611 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8612 	val = vdev->tx_ftm_suc_retry;
8613 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8614 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8615 				 "tx ftm success retry",
8616 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8617 	val = vdev->tx_ftm_fail;
8618 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8619 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8620 				 "tx ftm fail",
8621 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8622 	val = vdev->rx_ftmr_cnt;
8623 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8624 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8625 				 "rx ftm request count",
8626 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8627 	val = vdev->rx_ftmr_dup_cnt;
8628 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8629 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8630 				 "rx ftm request dup count",
8631 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8632 	val = vdev->rx_iftmr_cnt;
8633 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8634 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8635 				 "rx initial ftm req count",
8636 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8637 	val = vdev->rx_iftmr_dup_cnt;
8638 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8639 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8640 				 "rx initial ftm req dup cnt",
8641 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8642 	len += scnprintf(buf + len, buf_len - len, "\n");
8643 
8644 	*length = len;
8645 }
8646 
ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8647 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
8648 				      struct ath10k_fw_stats *fw_stats,
8649 				      char *buf)
8650 {
8651 	u32 len = 0;
8652 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8653 	const struct ath10k_fw_stats_pdev *pdev;
8654 	const struct ath10k_fw_stats_vdev_extd *vdev;
8655 	const struct ath10k_fw_stats_peer *peer;
8656 	const struct ath10k_fw_extd_stats_peer *extd_peer;
8657 	size_t num_peers;
8658 	size_t num_vdevs;
8659 
8660 	spin_lock_bh(&ar->data_lock);
8661 
8662 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8663 					struct ath10k_fw_stats_pdev, list);
8664 	if (!pdev) {
8665 		ath10k_warn(ar, "failed to get pdev stats\n");
8666 		goto unlock;
8667 	}
8668 
8669 	num_peers = list_count_nodes(&fw_stats->peers);
8670 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8671 
8672 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8673 	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8674 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8675 
8676 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8677 			"HW paused", pdev->hw_paused);
8678 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8679 			"Seqs posted", pdev->seq_posted);
8680 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8681 			"Seqs failed queueing", pdev->seq_failed_queueing);
8682 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8683 			"Seqs completed", pdev->seq_completed);
8684 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8685 			"Seqs restarted", pdev->seq_restarted);
8686 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8687 			"MU Seqs posted", pdev->mu_seq_posted);
8688 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8689 			"MPDUs SW flushed", pdev->mpdus_sw_flush);
8690 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8691 			"MPDUs HW filtered", pdev->mpdus_hw_filter);
8692 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8693 			"MPDUs truncated", pdev->mpdus_truncated);
8694 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8695 			"MPDUs receive no ACK", pdev->mpdus_ack_failed);
8696 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8697 			"MPDUs expired", pdev->mpdus_expired);
8698 
8699 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8700 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8701 			"Num Rx Overflow errors", pdev->rx_ovfl_errs);
8702 
8703 	len += scnprintf(buf + len, buf_len - len, "\n");
8704 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8705 			"ath10k VDEV stats", num_vdevs);
8706 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8707 				"=================");
8708 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8709 		ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
8710 	}
8711 
8712 	len += scnprintf(buf + len, buf_len - len, "\n");
8713 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8714 			"ath10k PEER stats", num_peers);
8715 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8716 				"=================");
8717 
8718 	list_for_each_entry(peer, &fw_stats->peers, list) {
8719 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8720 					      fw_stats->extended);
8721 	}
8722 
8723 	if (fw_stats->extended) {
8724 		list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) {
8725 			ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf,
8726 							   &len);
8727 		}
8728 	}
8729 
8730 unlock:
8731 	spin_unlock_bh(&ar->data_lock);
8732 
8733 	if (len >= buf_len)
8734 		buf[len - 1] = 0;
8735 	else
8736 		buf[len] = 0;
8737 }
8738 
ath10k_wmi_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8739 int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
8740 				   enum wmi_vdev_subtype subtype)
8741 {
8742 	switch (subtype) {
8743 	case WMI_VDEV_SUBTYPE_NONE:
8744 		return WMI_VDEV_SUBTYPE_LEGACY_NONE;
8745 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8746 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
8747 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8748 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
8749 	case WMI_VDEV_SUBTYPE_P2P_GO:
8750 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
8751 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8752 		return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
8753 	case WMI_VDEV_SUBTYPE_MESH_11S:
8754 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8755 		return -ENOTSUPP;
8756 	}
8757 	return -ENOTSUPP;
8758 }
8759 
ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8760 static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
8761 						 enum wmi_vdev_subtype subtype)
8762 {
8763 	switch (subtype) {
8764 	case WMI_VDEV_SUBTYPE_NONE:
8765 		return WMI_VDEV_SUBTYPE_10_2_4_NONE;
8766 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8767 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
8768 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8769 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
8770 	case WMI_VDEV_SUBTYPE_P2P_GO:
8771 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
8772 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8773 		return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
8774 	case WMI_VDEV_SUBTYPE_MESH_11S:
8775 		return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
8776 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8777 		return -ENOTSUPP;
8778 	}
8779 	return -ENOTSUPP;
8780 }
8781 
ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8782 static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
8783 					       enum wmi_vdev_subtype subtype)
8784 {
8785 	switch (subtype) {
8786 	case WMI_VDEV_SUBTYPE_NONE:
8787 		return WMI_VDEV_SUBTYPE_10_4_NONE;
8788 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8789 		return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
8790 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8791 		return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
8792 	case WMI_VDEV_SUBTYPE_P2P_GO:
8793 		return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
8794 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8795 		return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
8796 	case WMI_VDEV_SUBTYPE_MESH_11S:
8797 		return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
8798 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8799 		return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
8800 	}
8801 	return -ENOTSUPP;
8802 }
8803 
8804 static struct sk_buff *
ath10k_wmi_10_4_ext_resource_config(struct ath10k * ar,enum wmi_host_platform_type type,u32 fw_feature_bitmap)8805 ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
8806 				    enum wmi_host_platform_type type,
8807 				    u32 fw_feature_bitmap)
8808 {
8809 	struct wmi_ext_resource_config_10_4_cmd *cmd;
8810 	struct sk_buff *skb;
8811 	u32 num_tdls_sleep_sta = 0;
8812 
8813 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8814 	if (!skb)
8815 		return ERR_PTR(-ENOMEM);
8816 
8817 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
8818 		num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
8819 
8820 	cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
8821 	cmd->host_platform_config = __cpu_to_le32(type);
8822 	cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
8823 	cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin);
8824 	cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
8825 	cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
8826 	cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
8827 	cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
8828 	cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
8829 	cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
8830 	cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
8831 	cmd->max_tdls_concurrent_buffer_sta =
8832 			__cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
8833 
8834 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8835 		   "wmi ext resource config host type %d firmware feature bitmap %08x\n",
8836 		   type, fw_feature_bitmap);
8837 	return skb;
8838 }
8839 
8840 static struct sk_buff *
ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k * ar,u32 vdev_id,enum wmi_tdls_state state)8841 ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
8842 					 enum wmi_tdls_state state)
8843 {
8844 	struct wmi_10_4_tdls_set_state_cmd *cmd;
8845 	struct sk_buff *skb;
8846 	u32 options = 0;
8847 
8848 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8849 	if (!skb)
8850 		return ERR_PTR(-ENOMEM);
8851 
8852 	if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
8853 	    state == WMI_TDLS_ENABLE_ACTIVE)
8854 		state = WMI_TDLS_ENABLE_PASSIVE;
8855 
8856 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8857 		options |= WMI_TDLS_BUFFER_STA_EN;
8858 
8859 	cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
8860 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8861 	cmd->state = __cpu_to_le32(state);
8862 	cmd->notification_interval_ms = __cpu_to_le32(5000);
8863 	cmd->tx_discovery_threshold = __cpu_to_le32(100);
8864 	cmd->tx_teardown_threshold = __cpu_to_le32(5);
8865 	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
8866 	cmd->rssi_delta = __cpu_to_le32(-20);
8867 	cmd->tdls_options = __cpu_to_le32(options);
8868 	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
8869 	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
8870 	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
8871 	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
8872 	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
8873 	cmd->teardown_notification_ms = __cpu_to_le32(10);
8874 	cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
8875 
8876 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
8877 		   state, vdev_id);
8878 	return skb;
8879 }
8880 
ath10k_wmi_prepare_peer_qos(u8 uapsd_queues,u8 sp)8881 static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
8882 {
8883 	u32 peer_qos = 0;
8884 
8885 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
8886 		peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
8887 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
8888 		peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
8889 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
8890 		peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
8891 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
8892 		peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
8893 
8894 	peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
8895 
8896 	return peer_qos;
8897 }
8898 
8899 static struct sk_buff *
ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k * ar,u32 param)8900 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
8901 {
8902 	struct wmi_pdev_get_tpc_table_cmd *cmd;
8903 	struct sk_buff *skb;
8904 
8905 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8906 	if (!skb)
8907 		return ERR_PTR(-ENOMEM);
8908 
8909 	cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
8910 	cmd->param = __cpu_to_le32(param);
8911 
8912 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8913 		   "wmi pdev get tpc table param:%d\n", param);
8914 	return skb;
8915 }
8916 
8917 static struct sk_buff *
ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k * ar,const struct wmi_tdls_peer_update_cmd_arg * arg,const struct wmi_tdls_peer_capab_arg * cap,const struct wmi_channel_arg * chan_arg)8918 ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
8919 				     const struct wmi_tdls_peer_update_cmd_arg *arg,
8920 				     const struct wmi_tdls_peer_capab_arg *cap,
8921 				     const struct wmi_channel_arg *chan_arg)
8922 {
8923 	struct wmi_10_4_tdls_peer_update_cmd *cmd;
8924 	struct wmi_tdls_peer_capabilities *peer_cap;
8925 	struct wmi_channel *chan;
8926 	struct sk_buff *skb;
8927 	u32 peer_qos;
8928 	int len, chan_len;
8929 	int i;
8930 
8931 	/* tdls peer update cmd has place holder for one channel*/
8932 	chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
8933 
8934 	len = sizeof(*cmd) + chan_len * sizeof(*chan);
8935 
8936 	skb = ath10k_wmi_alloc_skb(ar, len);
8937 	if (!skb)
8938 		return ERR_PTR(-ENOMEM);
8939 
8940 	memset(skb->data, 0, sizeof(*cmd));
8941 
8942 	cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
8943 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
8944 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
8945 	cmd->peer_state = __cpu_to_le32(arg->peer_state);
8946 
8947 	peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
8948 					       cap->peer_max_sp);
8949 
8950 	peer_cap = &cmd->peer_capab;
8951 	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
8952 	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
8953 	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
8954 	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
8955 	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
8956 	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
8957 	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
8958 
8959 	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
8960 		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
8961 
8962 	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
8963 	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
8964 	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
8965 
8966 	for (i = 0; i < cap->peer_chan_len; i++) {
8967 		chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
8968 		ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
8969 	}
8970 
8971 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8972 		   "wmi tdls peer update vdev %i state %d n_chans %u\n",
8973 		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
8974 	return skb;
8975 }
8976 
8977 static struct sk_buff *
ath10k_wmi_10_4_gen_radar_found(struct ath10k * ar,const struct ath10k_radar_found_info * arg)8978 ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
8979 				const struct ath10k_radar_found_info *arg)
8980 {
8981 	struct wmi_radar_found_info *cmd;
8982 	struct sk_buff *skb;
8983 
8984 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8985 	if (!skb)
8986 		return ERR_PTR(-ENOMEM);
8987 
8988 	cmd = (struct wmi_radar_found_info *)skb->data;
8989 	cmd->pri_min   = __cpu_to_le32(arg->pri_min);
8990 	cmd->pri_max   = __cpu_to_le32(arg->pri_max);
8991 	cmd->width_min = __cpu_to_le32(arg->width_min);
8992 	cmd->width_max = __cpu_to_le32(arg->width_max);
8993 	cmd->sidx_min  = __cpu_to_le32(arg->sidx_min);
8994 	cmd->sidx_max  = __cpu_to_le32(arg->sidx_max);
8995 
8996 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8997 		   "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
8998 		   arg->pri_min, arg->pri_max, arg->width_min,
8999 		   arg->width_max, arg->sidx_min, arg->sidx_max);
9000 	return skb;
9001 }
9002 
9003 static struct sk_buff *
ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k * ar,const struct wmi_per_peer_per_tid_cfg_arg * arg)9004 ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar,
9005 					 const struct wmi_per_peer_per_tid_cfg_arg *arg)
9006 {
9007 	struct wmi_peer_per_tid_cfg_cmd *cmd;
9008 	struct sk_buff *skb;
9009 
9010 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9011 	if (!skb)
9012 		return ERR_PTR(-ENOMEM);
9013 
9014 	memset(skb->data, 0, sizeof(*cmd));
9015 
9016 	cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data;
9017 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9018 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr);
9019 	cmd->tid = cpu_to_le32(arg->tid);
9020 	cmd->ack_policy = cpu_to_le32(arg->ack_policy);
9021 	cmd->aggr_control = cpu_to_le32(arg->aggr_control);
9022 	cmd->rate_control = cpu_to_le32(arg->rate_ctrl);
9023 	cmd->retry_count = cpu_to_le32(arg->retry_count);
9024 	cmd->rcode_flags = cpu_to_le32(arg->rcode_flags);
9025 	cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap);
9026 	cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl);
9027 
9028 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9029 		   "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n",
9030 		   arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control,
9031 		   arg->rate_ctrl, arg->rcode_flags, arg->retry_count,
9032 		   arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr);
9033 	return skb;
9034 }
9035 
9036 static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k * ar,u32 value)9037 ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
9038 {
9039 	struct wmi_echo_cmd *cmd;
9040 	struct sk_buff *skb;
9041 
9042 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9043 	if (!skb)
9044 		return ERR_PTR(-ENOMEM);
9045 
9046 	cmd = (struct wmi_echo_cmd *)skb->data;
9047 	cmd->value = cpu_to_le32(value);
9048 
9049 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9050 		   "wmi echo value 0x%08x\n", value);
9051 	return skb;
9052 }
9053 
9054 int
ath10k_wmi_barrier(struct ath10k * ar)9055 ath10k_wmi_barrier(struct ath10k *ar)
9056 {
9057 	int ret;
9058 	int time_left;
9059 
9060 	spin_lock_bh(&ar->data_lock);
9061 	reinit_completion(&ar->wmi.barrier);
9062 	spin_unlock_bh(&ar->data_lock);
9063 
9064 	ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
9065 	if (ret) {
9066 		ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
9067 		return ret;
9068 	}
9069 
9070 	time_left = wait_for_completion_timeout(&ar->wmi.barrier,
9071 						ATH10K_WMI_BARRIER_TIMEOUT_HZ);
9072 	if (!time_left)
9073 		return -ETIMEDOUT;
9074 
9075 	return 0;
9076 }
9077 
9078 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k * ar,const struct wmi_bb_timing_cfg_arg * arg)9079 ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar,
9080 				   const struct wmi_bb_timing_cfg_arg *arg)
9081 {
9082 	struct wmi_pdev_bb_timing_cfg_cmd *cmd;
9083 	struct sk_buff *skb;
9084 
9085 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9086 	if (!skb)
9087 		return ERR_PTR(-ENOMEM);
9088 
9089 	cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
9090 	cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing);
9091 	cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing);
9092 
9093 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9094 		   "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
9095 		   arg->bb_tx_timing, arg->bb_xpa_timing);
9096 	return skb;
9097 }
9098 
9099 static const struct wmi_ops wmi_ops = {
9100 	.rx = ath10k_wmi_op_rx,
9101 	.map_svc = wmi_main_svc_map,
9102 
9103 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9104 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9105 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9106 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9107 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9108 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9109 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9110 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9111 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9112 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9113 	.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
9114 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9115 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9116 
9117 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9118 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9119 	.gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
9120 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9121 	.gen_init = ath10k_wmi_op_gen_init,
9122 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
9123 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9124 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9125 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9126 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9127 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9128 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9129 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9130 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9131 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9132 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9133 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9134 	/* .gen_vdev_wmm_conf not implemented */
9135 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9136 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9137 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9138 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9139 	.gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
9140 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9141 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9142 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9143 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9144 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9145 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9146 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9147 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9148 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9149 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9150 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9151 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9152 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9153 	/* .gen_pdev_get_temperature not implemented */
9154 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9155 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9156 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9157 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9158 	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
9159 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9160 	.gen_echo = ath10k_wmi_op_gen_echo,
9161 	/* .gen_bcn_tmpl not implemented */
9162 	/* .gen_prb_tmpl not implemented */
9163 	/* .gen_p2p_go_bcn_ie not implemented */
9164 	/* .gen_adaptive_qcs not implemented */
9165 	/* .gen_pdev_enable_adaptive_cca not implemented */
9166 };
9167 
9168 static const struct wmi_ops wmi_10_1_ops = {
9169 	.rx = ath10k_wmi_10_1_op_rx,
9170 	.map_svc = wmi_10x_svc_map,
9171 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9172 	.pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
9173 	.gen_init = ath10k_wmi_10_1_op_gen_init,
9174 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9175 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9176 	.gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
9177 	/* .gen_pdev_get_temperature not implemented */
9178 
9179 	/* shared with main branch */
9180 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9181 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9182 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9183 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9184 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9185 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9186 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9187 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9188 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9189 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9190 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9191 
9192 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9193 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9194 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9195 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9196 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9197 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9198 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9199 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9200 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9201 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9202 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9203 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9204 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9205 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9206 	/* .gen_vdev_wmm_conf not implemented */
9207 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9208 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9209 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9210 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9211 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9212 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9213 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9214 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9215 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9216 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9217 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9218 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9219 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9220 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9221 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9222 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9223 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9224 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9225 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9226 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9227 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9228 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9229 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9230 	.gen_echo = ath10k_wmi_op_gen_echo,
9231 	/* .gen_bcn_tmpl not implemented */
9232 	/* .gen_prb_tmpl not implemented */
9233 	/* .gen_p2p_go_bcn_ie not implemented */
9234 	/* .gen_adaptive_qcs not implemented */
9235 	/* .gen_pdev_enable_adaptive_cca not implemented */
9236 };
9237 
9238 static const struct wmi_ops wmi_10_2_ops = {
9239 	.rx = ath10k_wmi_10_2_op_rx,
9240 	.pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
9241 	.gen_init = ath10k_wmi_10_2_op_gen_init,
9242 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9243 	/* .gen_pdev_get_temperature not implemented */
9244 
9245 	/* shared with 10.1 */
9246 	.map_svc = wmi_10x_svc_map,
9247 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9248 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9249 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9250 	.gen_echo = ath10k_wmi_op_gen_echo,
9251 
9252 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9253 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9254 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9255 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9256 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9257 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9258 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9259 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9260 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9261 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9262 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9263 
9264 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9265 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9266 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9267 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9268 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9269 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9270 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9271 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9272 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9273 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9274 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9275 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9276 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9277 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9278 	/* .gen_vdev_wmm_conf not implemented */
9279 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9280 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9281 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9282 	.gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9283 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9284 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9285 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9286 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9287 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9288 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9289 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9290 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9291 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9292 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9293 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9294 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9295 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9296 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9297 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9298 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9299 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9300 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9301 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9302 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9303 	/* .gen_pdev_enable_adaptive_cca not implemented */
9304 };
9305 
9306 static const struct wmi_ops wmi_10_2_4_ops = {
9307 	.rx = ath10k_wmi_10_2_op_rx,
9308 	.pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
9309 	.gen_init = ath10k_wmi_10_2_op_gen_init,
9310 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9311 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9312 	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9313 
9314 	/* shared with 10.1 */
9315 	.map_svc = wmi_10x_svc_map,
9316 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9317 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9318 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9319 	.gen_echo = ath10k_wmi_op_gen_echo,
9320 
9321 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9322 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9323 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9324 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9325 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9326 	.pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
9327 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9328 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9329 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9330 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9331 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9332 
9333 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9334 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9335 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9336 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9337 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9338 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9339 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9340 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9341 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9342 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9343 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9344 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9345 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9346 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9347 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9348 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9349 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9350 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9351 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9352 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9353 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9354 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9355 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9356 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9357 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9358 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9359 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9360 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9361 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9362 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9363 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9364 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9365 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9366 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9367 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9368 	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9369 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9370 	.gen_pdev_enable_adaptive_cca =
9371 		ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
9372 	.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
9373 	.gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
9374 	/* .gen_bcn_tmpl not implemented */
9375 	/* .gen_prb_tmpl not implemented */
9376 	/* .gen_p2p_go_bcn_ie not implemented */
9377 	/* .gen_adaptive_qcs not implemented */
9378 };
9379 
9380 static const struct wmi_ops wmi_10_4_ops = {
9381 	.rx = ath10k_wmi_10_4_op_rx,
9382 	.map_svc = wmi_10_4_svc_map,
9383 
9384 	.pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
9385 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9386 	.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
9387 	.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
9388 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9389 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9390 	.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
9391 	.pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
9392 	.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
9393 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9394 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9395 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9396 	.pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
9397 	.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
9398 
9399 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9400 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9401 	.gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9402 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9403 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9404 	.gen_init = ath10k_wmi_10_4_op_gen_init,
9405 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
9406 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9407 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9408 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9409 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9410 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9411 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9412 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9413 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9414 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9415 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9416 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9417 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9418 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9419 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9420 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9421 	.gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
9422 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9423 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9424 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9425 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9426 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9427 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9428 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9429 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9430 	.gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
9431 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9432 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9433 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9434 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9435 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9436 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9437 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9438 	.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
9439 	.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
9440 	.gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
9441 	.gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
9442 	.gen_pdev_get_tpc_table_cmdid =
9443 			ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
9444 	.gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
9445 	.gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg,
9446 
9447 	/* shared with 10.2 */
9448 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9449 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9450 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9451 	.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
9452 	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9453 	.gen_echo = ath10k_wmi_op_gen_echo,
9454 	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9455 };
9456 
ath10k_wmi_attach(struct ath10k * ar)9457 int ath10k_wmi_attach(struct ath10k *ar)
9458 {
9459 	switch (ar->running_fw->fw_file.wmi_op_version) {
9460 	case ATH10K_FW_WMI_OP_VERSION_10_4:
9461 		ar->wmi.ops = &wmi_10_4_ops;
9462 		ar->wmi.cmd = &wmi_10_4_cmd_map;
9463 		ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
9464 		ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
9465 		ar->wmi.peer_param = &wmi_peer_param_map;
9466 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9467 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9468 		break;
9469 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
9470 		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
9471 		ar->wmi.ops = &wmi_10_2_4_ops;
9472 		ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
9473 		ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
9474 		ar->wmi.peer_param = &wmi_peer_param_map;
9475 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9476 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9477 		break;
9478 	case ATH10K_FW_WMI_OP_VERSION_10_2:
9479 		ar->wmi.cmd = &wmi_10_2_cmd_map;
9480 		ar->wmi.ops = &wmi_10_2_ops;
9481 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9482 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9483 		ar->wmi.peer_param = &wmi_peer_param_map;
9484 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9485 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9486 		break;
9487 	case ATH10K_FW_WMI_OP_VERSION_10_1:
9488 		ar->wmi.cmd = &wmi_10x_cmd_map;
9489 		ar->wmi.ops = &wmi_10_1_ops;
9490 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9491 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9492 		ar->wmi.peer_param = &wmi_peer_param_map;
9493 		ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
9494 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9495 		break;
9496 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
9497 		ar->wmi.cmd = &wmi_cmd_map;
9498 		ar->wmi.ops = &wmi_ops;
9499 		ar->wmi.vdev_param = &wmi_vdev_param_map;
9500 		ar->wmi.pdev_param = &wmi_pdev_param_map;
9501 		ar->wmi.peer_param = &wmi_peer_param_map;
9502 		ar->wmi.peer_flags = &wmi_peer_flags_map;
9503 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9504 		break;
9505 	case ATH10K_FW_WMI_OP_VERSION_TLV:
9506 		ath10k_wmi_tlv_attach(ar);
9507 		ar->wmi_key_cipher = wmi_tlv_key_cipher_suites;
9508 		break;
9509 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
9510 	case ATH10K_FW_WMI_OP_VERSION_MAX:
9511 		ath10k_err(ar, "unsupported WMI op version: %d\n",
9512 			   ar->running_fw->fw_file.wmi_op_version);
9513 		return -EINVAL;
9514 	}
9515 
9516 	init_completion(&ar->wmi.service_ready);
9517 	init_completion(&ar->wmi.unified_ready);
9518 	init_completion(&ar->wmi.barrier);
9519 	init_completion(&ar->wmi.radar_confirm);
9520 
9521 	INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
9522 	INIT_WORK(&ar->radar_confirmation_work,
9523 		  ath10k_radar_confirmation_work);
9524 
9525 	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9526 		     ar->running_fw->fw_file.fw_features)) {
9527 		idr_init(&ar->wmi.mgmt_pending_tx);
9528 	}
9529 
9530 	return 0;
9531 }
9532 
ath10k_wmi_free_host_mem(struct ath10k * ar)9533 void ath10k_wmi_free_host_mem(struct ath10k *ar)
9534 {
9535 	int i;
9536 
9537 	/* free the host memory chunks requested by firmware */
9538 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
9539 		dma_free_coherent(ar->dev,
9540 				  ar->wmi.mem_chunks[i].len,
9541 				  ar->wmi.mem_chunks[i].vaddr,
9542 				  ar->wmi.mem_chunks[i].paddr);
9543 	}
9544 
9545 	ar->wmi.num_mem_chunks = 0;
9546 }
9547 
ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id,void * ptr,void * ctx)9548 static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
9549 					       void *ctx)
9550 {
9551 	struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
9552 	struct ath10k *ar = ctx;
9553 	struct sk_buff *msdu;
9554 
9555 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9556 		   "force cleanup mgmt msdu_id %u\n", msdu_id);
9557 
9558 	msdu = pkt_addr->vaddr;
9559 	dma_unmap_single(ar->dev, pkt_addr->paddr,
9560 			 msdu->len, DMA_TO_DEVICE);
9561 	ieee80211_free_txskb(ar->hw, msdu);
9562 
9563 	return 0;
9564 }
9565 
ath10k_wmi_detach(struct ath10k * ar)9566 void ath10k_wmi_detach(struct ath10k *ar)
9567 {
9568 	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9569 		     ar->running_fw->fw_file.fw_features)) {
9570 		spin_lock_bh(&ar->data_lock);
9571 		idr_for_each(&ar->wmi.mgmt_pending_tx,
9572 			     ath10k_wmi_mgmt_tx_clean_up_pending, ar);
9573 		idr_destroy(&ar->wmi.mgmt_pending_tx);
9574 		spin_unlock_bh(&ar->data_lock);
9575 	}
9576 
9577 	cancel_work_sync(&ar->svc_rdy_work);
9578 	dev_kfree_skb(ar->svc_rdy_skb);
9579 }
9580