1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/skbuff.h> 19 #include <linux/ctype.h> 20 21 #include "core.h" 22 #include "htc.h" 23 #include "debug.h" 24 #include "wmi.h" 25 #include "mac.h" 26 27 /* MAIN WMI cmd track */ 28 static struct wmi_cmd_map wmi_cmd_map = { 29 .init_cmdid = WMI_INIT_CMDID, 30 .start_scan_cmdid = WMI_START_SCAN_CMDID, 31 .stop_scan_cmdid = WMI_STOP_SCAN_CMDID, 32 .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID, 33 .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID, 34 .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID, 35 .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID, 36 .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID, 37 .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID, 38 .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID, 39 .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID, 40 .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID, 41 .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID, 42 .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID, 43 .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID, 44 .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, 45 .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID, 46 .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID, 47 .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID, 48 .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID, 49 .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID, 50 .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID, 51 .vdev_up_cmdid = WMI_VDEV_UP_CMDID, 52 .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID, 53 .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID, 54 .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID, 55 .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID, 56 .peer_create_cmdid = WMI_PEER_CREATE_CMDID, 57 .peer_delete_cmdid = WMI_PEER_DELETE_CMDID, 58 .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID, 59 .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID, 60 .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID, 61 .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID, 62 .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID, 63 .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID, 64 .bcn_tx_cmdid = WMI_BCN_TX_CMDID, 65 .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID, 66 .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID, 67 .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID, 68 .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID, 69 .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID, 70 .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID, 71 .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID, 72 .addba_send_cmdid = WMI_ADDBA_SEND_CMDID, 73 .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID, 74 .delba_send_cmdid = WMI_DELBA_SEND_CMDID, 75 .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID, 76 .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID, 77 .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID, 78 .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID, 79 .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID, 80 .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID, 81 .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID, 82 .roam_scan_mode = WMI_ROAM_SCAN_MODE, 83 .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD, 84 .roam_scan_period = WMI_ROAM_SCAN_PERIOD, 85 .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 86 .roam_ap_profile = WMI_ROAM_AP_PROFILE, 87 .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE, 88 .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE, 89 .ofl_scan_period = WMI_OFL_SCAN_PERIOD, 90 .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO, 91 .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY, 92 .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE, 93 .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE, 94 .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID, 95 .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID, 96 .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID, 97 .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID, 98 .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID, 99 .wlan_profile_set_hist_intvl_cmdid = 100 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 101 .wlan_profile_get_profile_data_cmdid = 102 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 103 .wlan_profile_enable_profile_id_cmdid = 104 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 105 .wlan_profile_list_profile_id_cmdid = 106 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 107 .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID, 108 .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID, 109 .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID, 110 .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID, 111 .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID, 112 .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID, 113 .wow_enable_disable_wake_event_cmdid = 114 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 115 .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID, 116 .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 117 .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID, 118 .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID, 119 .vdev_spectral_scan_configure_cmdid = 120 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 121 .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 122 .request_stats_cmdid = WMI_REQUEST_STATS_CMDID, 123 .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID, 124 .network_list_offload_config_cmdid = 125 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, 126 .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID, 127 .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID, 128 .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, 129 .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID, 130 .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID, 131 .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID, 132 .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID, 133 .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID, 134 .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD, 135 .echo_cmdid = WMI_ECHO_CMDID, 136 .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID, 137 .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID, 138 .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID, 139 .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID, 140 .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID, 141 .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID, 142 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID, 143 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, 144 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, 145 }; 146 147 /* 10.X WMI cmd track */ 148 static struct wmi_cmd_map wmi_10x_cmd_map = { 149 .init_cmdid = WMI_10X_INIT_CMDID, 150 .start_scan_cmdid = WMI_10X_START_SCAN_CMDID, 151 .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID, 152 .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID, 153 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, 154 .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID, 155 .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID, 156 .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID, 157 .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, 158 .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, 159 .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, 160 .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, 161 .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, 162 .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, 163 .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID, 164 .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, 165 .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, 166 .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, 167 .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID, 168 .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID, 169 .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID, 170 .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID, 171 .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID, 172 .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID, 173 .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID, 174 .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID, 175 .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID, 176 .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID, 177 .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID, 178 .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID, 179 .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID, 180 .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID, 181 .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, 182 .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, 183 .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID, 184 .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID, 185 .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID, 186 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 187 .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID, 188 .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID, 189 .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID, 190 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, 191 .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID, 192 .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID, 193 .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID, 194 .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID, 195 .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID, 196 .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID, 197 .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID, 198 .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID, 199 .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID, 200 .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID, 201 .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID, 202 .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE, 203 .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, 204 .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD, 205 .roam_scan_rssi_change_threshold = 206 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 207 .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE, 208 .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE, 209 .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, 210 .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD, 211 .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO, 212 .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY, 213 .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE, 214 .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE, 215 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, 216 .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID, 217 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, 218 .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, 219 .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, 220 .wlan_profile_set_hist_intvl_cmdid = 221 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 222 .wlan_profile_get_profile_data_cmdid = 223 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 224 .wlan_profile_enable_profile_id_cmdid = 225 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 226 .wlan_profile_list_profile_id_cmdid = 227 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 228 .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID, 229 .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID, 230 .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID, 231 .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID, 232 .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, 233 .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, 234 .wow_enable_disable_wake_event_cmdid = 235 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 236 .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID, 237 .wow_hostwakeup_from_sleep_cmdid = 238 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 239 .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID, 240 .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID, 241 .vdev_spectral_scan_configure_cmdid = 242 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, 243 .vdev_spectral_scan_enable_cmdid = 244 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, 245 .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID, 246 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, 247 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, 248 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, 249 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, 250 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, 251 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, 252 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, 253 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, 254 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, 255 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, 256 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, 257 .echo_cmdid = WMI_10X_ECHO_CMDID, 258 .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID, 259 .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID, 260 .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID, 261 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, 262 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 263 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 264 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, 265 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, 266 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, 267 }; 268 269 /* MAIN WMI VDEV param map */ 270 static struct wmi_vdev_param_map wmi_vdev_param_map = { 271 .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD, 272 .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 273 .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL, 274 .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL, 275 .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE, 276 .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE, 277 .slot_time = WMI_VDEV_PARAM_SLOT_TIME, 278 .preamble = WMI_VDEV_PARAM_PREAMBLE, 279 .swba_time = WMI_VDEV_PARAM_SWBA_TIME, 280 .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD, 281 .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME, 282 .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL, 283 .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD, 284 .wmi_vdev_oc_scheduler_air_time_limit = 285 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 286 .wds = WMI_VDEV_PARAM_WDS, 287 .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW, 288 .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX, 289 .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT, 290 .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT, 291 .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM, 292 .chwidth = WMI_VDEV_PARAM_CHWIDTH, 293 .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET, 294 .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION, 295 .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT, 296 .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE, 297 .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE, 298 .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE, 299 .sgi = WMI_VDEV_PARAM_SGI, 300 .ldpc = WMI_VDEV_PARAM_LDPC, 301 .tx_stbc = WMI_VDEV_PARAM_TX_STBC, 302 .rx_stbc = WMI_VDEV_PARAM_RX_STBC, 303 .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD, 304 .def_keyid = WMI_VDEV_PARAM_DEF_KEYID, 305 .nss = WMI_VDEV_PARAM_NSS, 306 .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE, 307 .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE, 308 .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE, 309 .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE, 310 .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 311 .ap_keepalive_min_idle_inactive_time_secs = 312 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 313 .ap_keepalive_max_idle_inactive_time_secs = 314 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 315 .ap_keepalive_max_unresponsive_time_secs = 316 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 317 .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS, 318 .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED, 319 .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS, 320 .txbf = WMI_VDEV_PARAM_TXBF, 321 .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE, 322 .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY, 323 .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE, 324 .ap_detect_out_of_sync_sleeping_sta_time_secs = 325 WMI_VDEV_PARAM_UNSUPPORTED, 326 }; 327 328 /* 10.X WMI VDEV param map */ 329 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { 330 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD, 331 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 332 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL, 333 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, 334 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE, 335 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE, 336 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME, 337 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE, 338 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME, 339 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD, 340 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, 341 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL, 342 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD, 343 .wmi_vdev_oc_scheduler_air_time_limit = 344 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 345 .wds = WMI_10X_VDEV_PARAM_WDS, 346 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW, 347 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, 348 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, 349 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, 350 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM, 351 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH, 352 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET, 353 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, 354 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, 355 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE, 356 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE, 357 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE, 358 .sgi = WMI_10X_VDEV_PARAM_SGI, 359 .ldpc = WMI_10X_VDEV_PARAM_LDPC, 360 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC, 361 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC, 362 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, 363 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID, 364 .nss = WMI_10X_VDEV_PARAM_NSS, 365 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, 366 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, 367 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE, 368 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE, 369 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 370 .ap_keepalive_min_idle_inactive_time_secs = 371 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 372 .ap_keepalive_max_idle_inactive_time_secs = 373 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 374 .ap_keepalive_max_unresponsive_time_secs = 375 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 376 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, 377 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, 378 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, 379 .txbf = WMI_VDEV_PARAM_UNSUPPORTED, 380 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED, 381 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED, 382 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, 383 .ap_detect_out_of_sync_sleeping_sta_time_secs = 384 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, 385 }; 386 387 static struct wmi_pdev_param_map wmi_pdev_param_map = { 388 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK, 389 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK, 390 .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G, 391 .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G, 392 .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE, 393 .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE, 394 .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE, 395 .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 396 .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE, 397 .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW, 398 .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 399 .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH, 400 .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH, 401 .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, 402 .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE, 403 .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, 404 .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, 405 .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, 406 .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, 407 .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 408 .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 409 .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE, 410 .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 411 .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE, 412 .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE, 413 .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, 414 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 415 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 416 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, 417 .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 418 .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 419 .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 420 .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 421 .pmf_qos = WMI_PDEV_PARAM_PMF_QOS, 422 .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 423 .dcs = WMI_PDEV_PARAM_DCS, 424 .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE, 425 .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD, 426 .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, 427 .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL, 428 .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL, 429 .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN, 430 .proxy_sta = WMI_PDEV_PARAM_PROXY_STA, 431 .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG, 432 .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP, 433 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED, 434 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, 435 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, 436 }; 437 438 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { 439 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK, 440 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, 441 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, 442 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, 443 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE, 444 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, 445 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE, 446 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 447 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE, 448 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW, 449 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 450 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, 451 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, 452 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, 453 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE, 454 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, 455 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, 456 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, 457 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, 458 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 459 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 460 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, 461 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 462 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE, 463 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, 464 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED, 465 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED, 466 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED, 467 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED, 468 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 469 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 470 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 471 .bcnflt_stats_update_period = 472 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 473 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, 474 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, 475 .dcs = WMI_10X_PDEV_PARAM_DCS, 476 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, 477 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, 478 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, 479 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, 480 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, 481 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN, 482 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED, 483 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED, 484 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED, 485 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, 486 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, 487 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, 488 }; 489 490 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) 491 { 492 int ret; 493 ret = wait_for_completion_timeout(&ar->wmi.service_ready, 494 WMI_SERVICE_READY_TIMEOUT_HZ); 495 return ret; 496 } 497 498 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) 499 { 500 int ret; 501 ret = wait_for_completion_timeout(&ar->wmi.unified_ready, 502 WMI_UNIFIED_READY_TIMEOUT_HZ); 503 return ret; 504 } 505 506 static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) 507 { 508 struct sk_buff *skb; 509 u32 round_len = roundup(len, 4); 510 511 skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); 512 if (!skb) 513 return NULL; 514 515 skb_reserve(skb, WMI_SKB_HEADROOM); 516 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 517 ath10k_warn("Unaligned WMI skb\n"); 518 519 skb_put(skb, round_len); 520 memset(skb->data, 0, round_len); 521 522 return skb; 523 } 524 525 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 526 { 527 dev_kfree_skb(skb); 528 } 529 530 static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, 531 u32 cmd_id) 532 { 533 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 534 struct wmi_cmd_hdr *cmd_hdr; 535 int ret; 536 u32 cmd = 0; 537 538 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 539 return -ENOMEM; 540 541 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID); 542 543 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 544 cmd_hdr->cmd_id = __cpu_to_le32(cmd); 545 546 memset(skb_cb, 0, sizeof(*skb_cb)); 547 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); 548 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret); 549 550 if (ret) 551 goto err_pull; 552 553 return 0; 554 555 err_pull: 556 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 557 return ret; 558 } 559 560 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) 561 { 562 int ret; 563 564 lockdep_assert_held(&arvif->ar->data_lock); 565 566 if (arvif->beacon == NULL) 567 return; 568 569 if (arvif->beacon_sent) 570 return; 571 572 ret = ath10k_wmi_beacon_send_ref_nowait(arvif); 573 if (ret) 574 return; 575 576 /* We need to retain the arvif->beacon reference for DMA unmapping and 577 * freeing the skbuff later. */ 578 arvif->beacon_sent = true; 579 } 580 581 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, 582 struct ieee80211_vif *vif) 583 { 584 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 585 586 ath10k_wmi_tx_beacon_nowait(arvif); 587 } 588 589 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) 590 { 591 spin_lock_bh(&ar->data_lock); 592 ieee80211_iterate_active_interfaces_atomic(ar->hw, 593 IEEE80211_IFACE_ITER_NORMAL, 594 ath10k_wmi_tx_beacons_iter, 595 NULL); 596 spin_unlock_bh(&ar->data_lock); 597 } 598 599 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) 600 { 601 /* try to send pending beacons first. they take priority */ 602 ath10k_wmi_tx_beacons_nowait(ar); 603 604 wake_up(&ar->wmi.tx_credits_wq); 605 } 606 607 static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, 608 u32 cmd_id) 609 { 610 int ret = -EOPNOTSUPP; 611 612 might_sleep(); 613 614 if (cmd_id == WMI_CMD_UNSUPPORTED) { 615 ath10k_warn("wmi command %d is not supported by firmware\n", 616 cmd_id); 617 return ret; 618 } 619 620 wait_event_timeout(ar->wmi.tx_credits_wq, ({ 621 /* try to send pending beacons first. they take priority */ 622 ath10k_wmi_tx_beacons_nowait(ar); 623 624 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); 625 (ret != -EAGAIN); 626 }), 3*HZ); 627 628 if (ret) 629 dev_kfree_skb_any(skb); 630 631 return ret; 632 } 633 634 int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) 635 { 636 int ret = 0; 637 struct wmi_mgmt_tx_cmd *cmd; 638 struct ieee80211_hdr *hdr; 639 struct sk_buff *wmi_skb; 640 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 641 int len; 642 u16 fc; 643 644 hdr = (struct ieee80211_hdr *)skb->data; 645 fc = le16_to_cpu(hdr->frame_control); 646 647 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) 648 return -EINVAL; 649 650 len = sizeof(cmd->hdr) + skb->len; 651 len = round_up(len, 4); 652 653 wmi_skb = ath10k_wmi_alloc_skb(len); 654 if (!wmi_skb) 655 return -ENOMEM; 656 657 cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data; 658 659 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id); 660 cmd->hdr.tx_rate = 0; 661 cmd->hdr.tx_power = 0; 662 cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len)); 663 664 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); 665 memcpy(cmd->buf, skb->data, skb->len); 666 667 ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", 668 wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, 669 fc & IEEE80211_FCTL_STYPE); 670 671 /* Send the management frame buffer to the target */ 672 ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid); 673 if (ret) 674 return ret; 675 676 /* TODO: report tx status to mac80211 - temporary just ACK */ 677 info->flags |= IEEE80211_TX_STAT_ACK; 678 ieee80211_tx_status_irqsafe(ar->hw, skb); 679 680 return ret; 681 } 682 683 static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) 684 { 685 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; 686 enum wmi_scan_event_type event_type; 687 enum wmi_scan_completion_reason reason; 688 u32 freq; 689 u32 req_id; 690 u32 scan_id; 691 u32 vdev_id; 692 693 event_type = __le32_to_cpu(event->event_type); 694 reason = __le32_to_cpu(event->reason); 695 freq = __le32_to_cpu(event->channel_freq); 696 req_id = __le32_to_cpu(event->scan_req_id); 697 scan_id = __le32_to_cpu(event->scan_id); 698 vdev_id = __le32_to_cpu(event->vdev_id); 699 700 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n"); 701 ath10k_dbg(ATH10K_DBG_WMI, 702 "scan event type %d reason %d freq %d req_id %d " 703 "scan_id %d vdev_id %d\n", 704 event_type, reason, freq, req_id, scan_id, vdev_id); 705 706 spin_lock_bh(&ar->data_lock); 707 708 switch (event_type) { 709 case WMI_SCAN_EVENT_STARTED: 710 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); 711 if (ar->scan.in_progress && ar->scan.is_roc) 712 ieee80211_ready_on_channel(ar->hw); 713 714 complete(&ar->scan.started); 715 break; 716 case WMI_SCAN_EVENT_COMPLETED: 717 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); 718 switch (reason) { 719 case WMI_SCAN_REASON_COMPLETED: 720 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n"); 721 break; 722 case WMI_SCAN_REASON_CANCELLED: 723 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n"); 724 break; 725 case WMI_SCAN_REASON_PREEMPTED: 726 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n"); 727 break; 728 case WMI_SCAN_REASON_TIMEDOUT: 729 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n"); 730 break; 731 default: 732 break; 733 } 734 735 ar->scan_channel = NULL; 736 if (!ar->scan.in_progress) { 737 ath10k_warn("no scan requested, ignoring\n"); 738 break; 739 } 740 741 if (ar->scan.is_roc) { 742 ath10k_offchan_tx_purge(ar); 743 744 if (!ar->scan.aborting) 745 ieee80211_remain_on_channel_expired(ar->hw); 746 } else { 747 ieee80211_scan_completed(ar->hw, ar->scan.aborting); 748 } 749 750 del_timer(&ar->scan.timeout); 751 complete_all(&ar->scan.completed); 752 ar->scan.in_progress = false; 753 break; 754 case WMI_SCAN_EVENT_BSS_CHANNEL: 755 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); 756 ar->scan_channel = NULL; 757 break; 758 case WMI_SCAN_EVENT_FOREIGN_CHANNEL: 759 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); 760 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); 761 if (ar->scan.in_progress && ar->scan.is_roc && 762 ar->scan.roc_freq == freq) { 763 complete(&ar->scan.on_channel); 764 } 765 break; 766 case WMI_SCAN_EVENT_DEQUEUED: 767 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n"); 768 break; 769 case WMI_SCAN_EVENT_PREEMPTED: 770 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n"); 771 break; 772 case WMI_SCAN_EVENT_START_FAILED: 773 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); 774 break; 775 default: 776 break; 777 } 778 779 spin_unlock_bh(&ar->data_lock); 780 return 0; 781 } 782 783 static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) 784 { 785 enum ieee80211_band band; 786 787 switch (phy_mode) { 788 case MODE_11A: 789 case MODE_11NA_HT20: 790 case MODE_11NA_HT40: 791 case MODE_11AC_VHT20: 792 case MODE_11AC_VHT40: 793 case MODE_11AC_VHT80: 794 band = IEEE80211_BAND_5GHZ; 795 break; 796 case MODE_11G: 797 case MODE_11B: 798 case MODE_11GONLY: 799 case MODE_11NG_HT20: 800 case MODE_11NG_HT40: 801 case MODE_11AC_VHT20_2G: 802 case MODE_11AC_VHT40_2G: 803 case MODE_11AC_VHT80_2G: 804 default: 805 band = IEEE80211_BAND_2GHZ; 806 } 807 808 return band; 809 } 810 811 static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) 812 { 813 u8 rate_idx = 0; 814 815 /* rate in Kbps */ 816 switch (rate) { 817 case 1000: 818 rate_idx = 0; 819 break; 820 case 2000: 821 rate_idx = 1; 822 break; 823 case 5500: 824 rate_idx = 2; 825 break; 826 case 11000: 827 rate_idx = 3; 828 break; 829 case 6000: 830 rate_idx = 4; 831 break; 832 case 9000: 833 rate_idx = 5; 834 break; 835 case 12000: 836 rate_idx = 6; 837 break; 838 case 18000: 839 rate_idx = 7; 840 break; 841 case 24000: 842 rate_idx = 8; 843 break; 844 case 36000: 845 rate_idx = 9; 846 break; 847 case 48000: 848 rate_idx = 10; 849 break; 850 case 54000: 851 rate_idx = 11; 852 break; 853 default: 854 break; 855 } 856 857 if (band == IEEE80211_BAND_5GHZ) { 858 if (rate_idx > 3) 859 /* Omit CCK rates */ 860 rate_idx -= 4; 861 else 862 rate_idx = 0; 863 } 864 865 return rate_idx; 866 } 867 868 static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) 869 { 870 struct wmi_mgmt_rx_event_v1 *ev_v1; 871 struct wmi_mgmt_rx_event_v2 *ev_v2; 872 struct wmi_mgmt_rx_hdr_v1 *ev_hdr; 873 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 874 struct ieee80211_channel *ch; 875 struct ieee80211_hdr *hdr; 876 u32 rx_status; 877 u32 channel; 878 u32 phy_mode; 879 u32 snr; 880 u32 rate; 881 u32 buf_len; 882 u16 fc; 883 int pull_len; 884 885 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { 886 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; 887 ev_hdr = &ev_v2->hdr.v1; 888 pull_len = sizeof(*ev_v2); 889 } else { 890 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data; 891 ev_hdr = &ev_v1->hdr; 892 pull_len = sizeof(*ev_v1); 893 } 894 895 channel = __le32_to_cpu(ev_hdr->channel); 896 buf_len = __le32_to_cpu(ev_hdr->buf_len); 897 rx_status = __le32_to_cpu(ev_hdr->status); 898 snr = __le32_to_cpu(ev_hdr->snr); 899 phy_mode = __le32_to_cpu(ev_hdr->phy_mode); 900 rate = __le32_to_cpu(ev_hdr->rate); 901 902 memset(status, 0, sizeof(*status)); 903 904 ath10k_dbg(ATH10K_DBG_MGMT, 905 "event mgmt rx status %08x\n", rx_status); 906 907 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 908 dev_kfree_skb(skb); 909 return 0; 910 } 911 912 if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { 913 dev_kfree_skb(skb); 914 return 0; 915 } 916 917 if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) { 918 dev_kfree_skb(skb); 919 return 0; 920 } 921 922 if (rx_status & WMI_RX_STATUS_ERR_CRC) 923 status->flag |= RX_FLAG_FAILED_FCS_CRC; 924 if (rx_status & WMI_RX_STATUS_ERR_MIC) 925 status->flag |= RX_FLAG_MMIC_ERROR; 926 927 /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to 928 * MODE_11B. This means phy_mode is not a reliable source for the band 929 * of mgmt rx. */ 930 931 ch = ar->scan_channel; 932 if (!ch) 933 ch = ar->rx_channel; 934 935 if (ch) { 936 status->band = ch->band; 937 938 if (phy_mode == MODE_11B && 939 status->band == IEEE80211_BAND_5GHZ) 940 ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); 941 } else { 942 ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n"); 943 status->band = phy_mode_to_band(phy_mode); 944 } 945 946 status->freq = ieee80211_channel_to_frequency(channel, status->band); 947 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; 948 status->rate_idx = get_rate_idx(rate, status->band); 949 950 skb_pull(skb, pull_len); 951 952 hdr = (struct ieee80211_hdr *)skb->data; 953 fc = le16_to_cpu(hdr->frame_control); 954 955 /* FW delivers WEP Shared Auth frame with Protected Bit set and 956 * encrypted payload. However in case of PMF it delivers decrypted 957 * frames with Protected Bit set. */ 958 if (ieee80211_has_protected(hdr->frame_control) && 959 !ieee80211_is_auth(hdr->frame_control)) { 960 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | 961 RX_FLAG_MMIC_STRIPPED; 962 hdr->frame_control = __cpu_to_le16(fc & 963 ~IEEE80211_FCTL_PROTECTED); 964 } 965 966 ath10k_dbg(ATH10K_DBG_MGMT, 967 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 968 skb, skb->len, 969 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 970 971 ath10k_dbg(ATH10K_DBG_MGMT, 972 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 973 status->freq, status->band, status->signal, 974 status->rate_idx); 975 976 /* 977 * packets from HTC come aligned to 4byte boundaries 978 * because they can originally come in along with a trailer 979 */ 980 skb_trim(skb, buf_len); 981 982 ieee80211_rx(ar->hw, skb); 983 return 0; 984 } 985 986 static int freq_to_idx(struct ath10k *ar, int freq) 987 { 988 struct ieee80211_supported_band *sband; 989 int band, ch, idx = 0; 990 991 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 992 sband = ar->hw->wiphy->bands[band]; 993 if (!sband) 994 continue; 995 996 for (ch = 0; ch < sband->n_channels; ch++, idx++) 997 if (sband->channels[ch].center_freq == freq) 998 goto exit; 999 } 1000 1001 exit: 1002 return idx; 1003 } 1004 1005 static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) 1006 { 1007 struct wmi_chan_info_event *ev; 1008 struct survey_info *survey; 1009 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count; 1010 int idx; 1011 1012 ev = (struct wmi_chan_info_event *)skb->data; 1013 1014 err_code = __le32_to_cpu(ev->err_code); 1015 freq = __le32_to_cpu(ev->freq); 1016 cmd_flags = __le32_to_cpu(ev->cmd_flags); 1017 noise_floor = __le32_to_cpu(ev->noise_floor); 1018 rx_clear_count = __le32_to_cpu(ev->rx_clear_count); 1019 cycle_count = __le32_to_cpu(ev->cycle_count); 1020 1021 ath10k_dbg(ATH10K_DBG_WMI, 1022 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", 1023 err_code, freq, cmd_flags, noise_floor, rx_clear_count, 1024 cycle_count); 1025 1026 spin_lock_bh(&ar->data_lock); 1027 1028 if (!ar->scan.in_progress) { 1029 ath10k_warn("chan info event without a scan request?\n"); 1030 goto exit; 1031 } 1032 1033 idx = freq_to_idx(ar, freq); 1034 if (idx >= ARRAY_SIZE(ar->survey)) { 1035 ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n", 1036 freq, idx); 1037 goto exit; 1038 } 1039 1040 if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { 1041 /* During scanning chan info is reported twice for each 1042 * visited channel. The reported cycle count is global 1043 * and per-channel cycle count must be calculated */ 1044 1045 cycle_count -= ar->survey_last_cycle_count; 1046 rx_clear_count -= ar->survey_last_rx_clear_count; 1047 1048 survey = &ar->survey[idx]; 1049 survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count); 1050 survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count); 1051 survey->noise = noise_floor; 1052 survey->filled = SURVEY_INFO_CHANNEL_TIME | 1053 SURVEY_INFO_CHANNEL_TIME_RX | 1054 SURVEY_INFO_NOISE_DBM; 1055 } 1056 1057 ar->survey_last_rx_clear_count = rx_clear_count; 1058 ar->survey_last_cycle_count = cycle_count; 1059 1060 exit: 1061 spin_unlock_bh(&ar->data_lock); 1062 } 1063 1064 static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) 1065 { 1066 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); 1067 } 1068 1069 static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) 1070 { 1071 ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", 1072 skb->len); 1073 1074 trace_ath10k_wmi_dbglog(skb->data, skb->len); 1075 1076 return 0; 1077 } 1078 1079 static void ath10k_wmi_event_update_stats(struct ath10k *ar, 1080 struct sk_buff *skb) 1081 { 1082 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; 1083 1084 ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); 1085 1086 ath10k_debug_read_target_stats(ar, ev); 1087 } 1088 1089 static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, 1090 struct sk_buff *skb) 1091 { 1092 struct wmi_vdev_start_response_event *ev; 1093 1094 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); 1095 1096 ev = (struct wmi_vdev_start_response_event *)skb->data; 1097 1098 if (WARN_ON(__le32_to_cpu(ev->status))) 1099 return; 1100 1101 complete(&ar->vdev_setup_done); 1102 } 1103 1104 static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, 1105 struct sk_buff *skb) 1106 { 1107 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); 1108 complete(&ar->vdev_setup_done); 1109 } 1110 1111 static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, 1112 struct sk_buff *skb) 1113 { 1114 struct wmi_peer_sta_kickout_event *ev; 1115 struct ieee80211_sta *sta; 1116 1117 ev = (struct wmi_peer_sta_kickout_event *)skb->data; 1118 1119 ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", 1120 ev->peer_macaddr.addr); 1121 1122 rcu_read_lock(); 1123 1124 sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); 1125 if (!sta) { 1126 ath10k_warn("Spurious quick kickout for STA %pM\n", 1127 ev->peer_macaddr.addr); 1128 goto exit; 1129 } 1130 1131 ieee80211_report_low_ack(sta, 10); 1132 1133 exit: 1134 rcu_read_unlock(); 1135 } 1136 1137 /* 1138 * FIXME 1139 * 1140 * We don't report to mac80211 sleep state of connected 1141 * stations. Due to this mac80211 can't fill in TIM IE 1142 * correctly. 1143 * 1144 * I know of no way of getting nullfunc frames that contain 1145 * sleep transition from connected stations - these do not 1146 * seem to be sent from the target to the host. There also 1147 * doesn't seem to be a dedicated event for that. So the 1148 * only way left to do this would be to read tim_bitmap 1149 * during SWBA. 1150 * 1151 * We could probably try using tim_bitmap from SWBA to tell 1152 * mac80211 which stations are asleep and which are not. The 1153 * problem here is calling mac80211 functions so many times 1154 * could take too long and make us miss the time to submit 1155 * the beacon to the target. 1156 * 1157 * So as a workaround we try to extend the TIM IE if there 1158 * is unicast buffered for stations with aid > 7 and fill it 1159 * in ourselves. 1160 */ 1161 static void ath10k_wmi_update_tim(struct ath10k *ar, 1162 struct ath10k_vif *arvif, 1163 struct sk_buff *bcn, 1164 struct wmi_bcn_info *bcn_info) 1165 { 1166 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; 1167 struct ieee80211_tim_ie *tim; 1168 u8 *ies, *ie; 1169 u8 ie_len, pvm_len; 1170 1171 /* if next SWBA has no tim_changed the tim_bitmap is garbage. 1172 * we must copy the bitmap upon change and reuse it later */ 1173 if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) { 1174 int i; 1175 1176 BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != 1177 sizeof(bcn_info->tim_info.tim_bitmap)); 1178 1179 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { 1180 __le32 t = bcn_info->tim_info.tim_bitmap[i / 4]; 1181 u32 v = __le32_to_cpu(t); 1182 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; 1183 } 1184 1185 /* FW reports either length 0 or 16 1186 * so we calculate this on our own */ 1187 arvif->u.ap.tim_len = 0; 1188 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) 1189 if (arvif->u.ap.tim_bitmap[i]) 1190 arvif->u.ap.tim_len = i; 1191 1192 arvif->u.ap.tim_len++; 1193 } 1194 1195 ies = bcn->data; 1196 ies += ieee80211_hdrlen(hdr->frame_control); 1197 ies += 12; /* fixed parameters */ 1198 1199 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, 1200 (u8 *)skb_tail_pointer(bcn) - ies); 1201 if (!ie) { 1202 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1203 ath10k_warn("no tim ie found;\n"); 1204 return; 1205 } 1206 1207 tim = (void *)ie + 2; 1208 ie_len = ie[1]; 1209 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ 1210 1211 if (pvm_len < arvif->u.ap.tim_len) { 1212 int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len; 1213 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); 1214 void *next_ie = ie + 2 + ie_len; 1215 1216 if (skb_put(bcn, expand_size)) { 1217 memmove(next_ie + expand_size, next_ie, move_size); 1218 1219 ie[1] += expand_size; 1220 ie_len += expand_size; 1221 pvm_len += expand_size; 1222 } else { 1223 ath10k_warn("tim expansion failed\n"); 1224 } 1225 } 1226 1227 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { 1228 ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); 1229 return; 1230 } 1231 1232 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast); 1233 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); 1234 1235 if (tim->dtim_count == 0) { 1236 ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true; 1237 1238 if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1) 1239 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; 1240 } 1241 1242 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", 1243 tim->dtim_count, tim->dtim_period, 1244 tim->bitmap_ctrl, pvm_len); 1245 } 1246 1247 static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len, 1248 struct wmi_p2p_noa_info *noa) 1249 { 1250 struct ieee80211_p2p_noa_attr *noa_attr; 1251 u8 ctwindow_oppps = noa->ctwindow_oppps; 1252 u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET; 1253 bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT); 1254 __le16 *noa_attr_len; 1255 u16 attr_len; 1256 u8 noa_descriptors = noa->num_descriptors; 1257 int i; 1258 1259 /* P2P IE */ 1260 data[0] = WLAN_EID_VENDOR_SPECIFIC; 1261 data[1] = len - 2; 1262 data[2] = (WLAN_OUI_WFA >> 16) & 0xff; 1263 data[3] = (WLAN_OUI_WFA >> 8) & 0xff; 1264 data[4] = (WLAN_OUI_WFA >> 0) & 0xff; 1265 data[5] = WLAN_OUI_TYPE_WFA_P2P; 1266 1267 /* NOA ATTR */ 1268 data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; 1269 noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ 1270 noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; 1271 1272 noa_attr->index = noa->index; 1273 noa_attr->oppps_ctwindow = ctwindow; 1274 if (oppps) 1275 noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; 1276 1277 for (i = 0; i < noa_descriptors; i++) { 1278 noa_attr->desc[i].count = 1279 __le32_to_cpu(noa->descriptors[i].type_count); 1280 noa_attr->desc[i].duration = noa->descriptors[i].duration; 1281 noa_attr->desc[i].interval = noa->descriptors[i].interval; 1282 noa_attr->desc[i].start_time = noa->descriptors[i].start_time; 1283 } 1284 1285 attr_len = 2; /* index + oppps_ctwindow */ 1286 attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); 1287 *noa_attr_len = __cpu_to_le16(attr_len); 1288 } 1289 1290 static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa) 1291 { 1292 u32 len = 0; 1293 u8 noa_descriptors = noa->num_descriptors; 1294 u8 opp_ps_info = noa->ctwindow_oppps; 1295 bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT); 1296 1297 1298 if (!noa_descriptors && !opps_enabled) 1299 return len; 1300 1301 len += 1 + 1 + 4; /* EID + len + OUI */ 1302 len += 1 + 2; /* noa attr + attr len */ 1303 len += 1 + 1; /* index + oppps_ctwindow */ 1304 len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); 1305 1306 return len; 1307 } 1308 1309 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, 1310 struct sk_buff *bcn, 1311 struct wmi_bcn_info *bcn_info) 1312 { 1313 struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info; 1314 u8 *new_data, *old_data = arvif->u.ap.noa_data; 1315 u32 new_len; 1316 1317 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 1318 return; 1319 1320 ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); 1321 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { 1322 new_len = ath10k_p2p_calc_noa_ie_len(noa); 1323 if (!new_len) 1324 goto cleanup; 1325 1326 new_data = kmalloc(new_len, GFP_ATOMIC); 1327 if (!new_data) 1328 goto cleanup; 1329 1330 ath10k_p2p_fill_noa_ie(new_data, new_len, noa); 1331 1332 spin_lock_bh(&ar->data_lock); 1333 arvif->u.ap.noa_data = new_data; 1334 arvif->u.ap.noa_len = new_len; 1335 spin_unlock_bh(&ar->data_lock); 1336 kfree(old_data); 1337 } 1338 1339 if (arvif->u.ap.noa_data) 1340 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) 1341 memcpy(skb_put(bcn, arvif->u.ap.noa_len), 1342 arvif->u.ap.noa_data, 1343 arvif->u.ap.noa_len); 1344 return; 1345 1346 cleanup: 1347 spin_lock_bh(&ar->data_lock); 1348 arvif->u.ap.noa_data = NULL; 1349 arvif->u.ap.noa_len = 0; 1350 spin_unlock_bh(&ar->data_lock); 1351 kfree(old_data); 1352 } 1353 1354 1355 static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) 1356 { 1357 struct wmi_host_swba_event *ev; 1358 u32 map; 1359 int i = -1; 1360 struct wmi_bcn_info *bcn_info; 1361 struct ath10k_vif *arvif; 1362 struct sk_buff *bcn; 1363 int ret, vdev_id = 0; 1364 1365 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); 1366 1367 ev = (struct wmi_host_swba_event *)skb->data; 1368 map = __le32_to_cpu(ev->vdev_map); 1369 1370 ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" 1371 "-vdev map 0x%x\n", 1372 ev->vdev_map); 1373 1374 for (; map; map >>= 1, vdev_id++) { 1375 if (!(map & 0x1)) 1376 continue; 1377 1378 i++; 1379 1380 if (i >= WMI_MAX_AP_VDEV) { 1381 ath10k_warn("swba has corrupted vdev map\n"); 1382 break; 1383 } 1384 1385 bcn_info = &ev->bcn_info[i]; 1386 1387 ath10k_dbg(ATH10K_DBG_MGMT, 1388 "-bcn_info[%d]:\n" 1389 "--tim_len %d\n" 1390 "--tim_mcast %d\n" 1391 "--tim_changed %d\n" 1392 "--tim_num_ps_pending %d\n" 1393 "--tim_bitmap 0x%08x%08x%08x%08x\n", 1394 i, 1395 __le32_to_cpu(bcn_info->tim_info.tim_len), 1396 __le32_to_cpu(bcn_info->tim_info.tim_mcast), 1397 __le32_to_cpu(bcn_info->tim_info.tim_changed), 1398 __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending), 1399 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]), 1400 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]), 1401 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]), 1402 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0])); 1403 1404 arvif = ath10k_get_arvif(ar, vdev_id); 1405 if (arvif == NULL) { 1406 ath10k_warn("no vif for vdev_id %d found\n", vdev_id); 1407 continue; 1408 } 1409 1410 /* There are no completions for beacons so wait for next SWBA 1411 * before telling mac80211 to decrement CSA counter 1412 * 1413 * Once CSA counter is completed stop sending beacons until 1414 * actual channel switch is done */ 1415 if (arvif->vif->csa_active && 1416 ieee80211_csa_is_complete(arvif->vif)) { 1417 ieee80211_csa_finish(arvif->vif); 1418 continue; 1419 } 1420 1421 bcn = ieee80211_beacon_get(ar->hw, arvif->vif); 1422 if (!bcn) { 1423 ath10k_warn("could not get mac80211 beacon\n"); 1424 continue; 1425 } 1426 1427 ath10k_tx_h_seq_no(bcn); 1428 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); 1429 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); 1430 1431 spin_lock_bh(&ar->data_lock); 1432 1433 if (arvif->beacon) { 1434 if (!arvif->beacon_sent) 1435 ath10k_warn("SWBA overrun on vdev %d\n", 1436 arvif->vdev_id); 1437 1438 dma_unmap_single(arvif->ar->dev, 1439 ATH10K_SKB_CB(arvif->beacon)->paddr, 1440 arvif->beacon->len, DMA_TO_DEVICE); 1441 dev_kfree_skb_any(arvif->beacon); 1442 } 1443 1444 ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev, 1445 bcn->data, bcn->len, 1446 DMA_TO_DEVICE); 1447 ret = dma_mapping_error(arvif->ar->dev, 1448 ATH10K_SKB_CB(bcn)->paddr); 1449 if (ret) { 1450 ath10k_warn("failed to map beacon: %d\n", ret); 1451 goto skip; 1452 } 1453 1454 arvif->beacon = bcn; 1455 arvif->beacon_sent = false; 1456 1457 ath10k_wmi_tx_beacon_nowait(arvif); 1458 skip: 1459 spin_unlock_bh(&ar->data_lock); 1460 } 1461 } 1462 1463 static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, 1464 struct sk_buff *skb) 1465 { 1466 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); 1467 } 1468 1469 static void ath10k_dfs_radar_report(struct ath10k *ar, 1470 struct wmi_single_phyerr_rx_event *event, 1471 struct phyerr_radar_report *rr, 1472 u64 tsf) 1473 { 1474 u32 reg0, reg1, tsf32l; 1475 struct pulse_event pe; 1476 u64 tsf64; 1477 u8 rssi, width; 1478 1479 reg0 = __le32_to_cpu(rr->reg0); 1480 reg1 = __le32_to_cpu(rr->reg1); 1481 1482 ath10k_dbg(ATH10K_DBG_REGULATORY, 1483 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", 1484 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), 1485 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), 1486 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), 1487 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); 1488 ath10k_dbg(ATH10K_DBG_REGULATORY, 1489 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", 1490 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), 1491 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), 1492 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), 1493 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), 1494 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); 1495 ath10k_dbg(ATH10K_DBG_REGULATORY, 1496 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", 1497 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), 1498 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); 1499 1500 if (!ar->dfs_detector) 1501 return; 1502 1503 /* report event to DFS pattern detector */ 1504 tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp); 1505 tsf64 = tsf & (~0xFFFFFFFFULL); 1506 tsf64 |= tsf32l; 1507 1508 width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR); 1509 rssi = event->hdr.rssi_combined; 1510 1511 /* hardware store this as 8 bit signed value, 1512 * set to zero if negative number 1513 */ 1514 if (rssi & 0x80) 1515 rssi = 0; 1516 1517 pe.ts = tsf64; 1518 pe.freq = ar->hw->conf.chandef.chan->center_freq; 1519 pe.width = width; 1520 pe.rssi = rssi; 1521 1522 ath10k_dbg(ATH10K_DBG_REGULATORY, 1523 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", 1524 pe.freq, pe.width, pe.rssi, pe.ts); 1525 1526 ATH10K_DFS_STAT_INC(ar, pulses_detected); 1527 1528 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { 1529 ath10k_dbg(ATH10K_DBG_REGULATORY, 1530 "dfs no pulse pattern detected, yet\n"); 1531 return; 1532 } 1533 1534 ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n"); 1535 ATH10K_DFS_STAT_INC(ar, radar_detected); 1536 1537 /* Control radar events reporting in debugfs file 1538 dfs_block_radar_events */ 1539 if (ar->dfs_block_radar_events) { 1540 ath10k_info("DFS Radar detected, but ignored as requested\n"); 1541 return; 1542 } 1543 1544 ieee80211_radar_detected(ar->hw); 1545 } 1546 1547 static int ath10k_dfs_fft_report(struct ath10k *ar, 1548 struct wmi_single_phyerr_rx_event *event, 1549 struct phyerr_fft_report *fftr, 1550 u64 tsf) 1551 { 1552 u32 reg0, reg1; 1553 u8 rssi, peak_mag; 1554 1555 reg0 = __le32_to_cpu(fftr->reg0); 1556 reg1 = __le32_to_cpu(fftr->reg1); 1557 rssi = event->hdr.rssi_combined; 1558 1559 ath10k_dbg(ATH10K_DBG_REGULATORY, 1560 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", 1561 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), 1562 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), 1563 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), 1564 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); 1565 ath10k_dbg(ATH10K_DBG_REGULATORY, 1566 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", 1567 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), 1568 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), 1569 MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG), 1570 MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB)); 1571 1572 peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); 1573 1574 /* false event detection */ 1575 if (rssi == DFS_RSSI_POSSIBLY_FALSE && 1576 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { 1577 ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); 1578 ATH10K_DFS_STAT_INC(ar, pulses_discarded); 1579 return -EINVAL; 1580 } 1581 1582 return 0; 1583 } 1584 1585 static void ath10k_wmi_event_dfs(struct ath10k *ar, 1586 struct wmi_single_phyerr_rx_event *event, 1587 u64 tsf) 1588 { 1589 int buf_len, tlv_len, res, i = 0; 1590 struct phyerr_tlv *tlv; 1591 struct phyerr_radar_report *rr; 1592 struct phyerr_fft_report *fftr; 1593 u8 *tlv_buf; 1594 1595 buf_len = __le32_to_cpu(event->hdr.buf_len); 1596 ath10k_dbg(ATH10K_DBG_REGULATORY, 1597 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", 1598 event->hdr.phy_err_code, event->hdr.rssi_combined, 1599 __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); 1600 1601 /* Skip event if DFS disabled */ 1602 if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) 1603 return; 1604 1605 ATH10K_DFS_STAT_INC(ar, pulses_total); 1606 1607 while (i < buf_len) { 1608 if (i + sizeof(*tlv) > buf_len) { 1609 ath10k_warn("too short buf for tlv header (%d)\n", i); 1610 return; 1611 } 1612 1613 tlv = (struct phyerr_tlv *)&event->bufp[i]; 1614 tlv_len = __le16_to_cpu(tlv->len); 1615 tlv_buf = &event->bufp[i + sizeof(*tlv)]; 1616 ath10k_dbg(ATH10K_DBG_REGULATORY, 1617 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", 1618 tlv_len, tlv->tag, tlv->sig); 1619 1620 switch (tlv->tag) { 1621 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: 1622 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { 1623 ath10k_warn("too short radar pulse summary (%d)\n", 1624 i); 1625 return; 1626 } 1627 1628 rr = (struct phyerr_radar_report *)tlv_buf; 1629 ath10k_dfs_radar_report(ar, event, rr, tsf); 1630 break; 1631 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: 1632 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { 1633 ath10k_warn("too short fft report (%d)\n", i); 1634 return; 1635 } 1636 1637 fftr = (struct phyerr_fft_report *)tlv_buf; 1638 res = ath10k_dfs_fft_report(ar, event, fftr, tsf); 1639 if (res) 1640 return; 1641 break; 1642 } 1643 1644 i += sizeof(*tlv) + tlv_len; 1645 } 1646 } 1647 1648 static void ath10k_wmi_event_spectral_scan(struct ath10k *ar, 1649 struct wmi_single_phyerr_rx_event *event, 1650 u64 tsf) 1651 { 1652 ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n"); 1653 } 1654 1655 static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) 1656 { 1657 struct wmi_comb_phyerr_rx_event *comb_event; 1658 struct wmi_single_phyerr_rx_event *event; 1659 u32 count, i, buf_len, phy_err_code; 1660 u64 tsf; 1661 int left_len = skb->len; 1662 1663 ATH10K_DFS_STAT_INC(ar, phy_errors); 1664 1665 /* Check if combined event available */ 1666 if (left_len < sizeof(*comb_event)) { 1667 ath10k_warn("wmi phyerr combined event wrong len\n"); 1668 return; 1669 } 1670 1671 left_len -= sizeof(*comb_event); 1672 1673 /* Check number of included events */ 1674 comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data; 1675 count = __le32_to_cpu(comb_event->hdr.num_phyerr_events); 1676 1677 tsf = __le32_to_cpu(comb_event->hdr.tsf_u32); 1678 tsf <<= 32; 1679 tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); 1680 1681 ath10k_dbg(ATH10K_DBG_WMI, 1682 "wmi event phyerr count %d tsf64 0x%llX\n", 1683 count, tsf); 1684 1685 event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp; 1686 for (i = 0; i < count; i++) { 1687 /* Check if we can read event header */ 1688 if (left_len < sizeof(*event)) { 1689 ath10k_warn("single event (%d) wrong head len\n", i); 1690 return; 1691 } 1692 1693 left_len -= sizeof(*event); 1694 1695 buf_len = __le32_to_cpu(event->hdr.buf_len); 1696 phy_err_code = event->hdr.phy_err_code; 1697 1698 if (left_len < buf_len) { 1699 ath10k_warn("single event (%d) wrong buf len\n", i); 1700 return; 1701 } 1702 1703 left_len -= buf_len; 1704 1705 switch (phy_err_code) { 1706 case PHY_ERROR_RADAR: 1707 ath10k_wmi_event_dfs(ar, event, tsf); 1708 break; 1709 case PHY_ERROR_SPECTRAL_SCAN: 1710 ath10k_wmi_event_spectral_scan(ar, event, tsf); 1711 break; 1712 case PHY_ERROR_FALSE_RADAR_EXT: 1713 ath10k_wmi_event_dfs(ar, event, tsf); 1714 ath10k_wmi_event_spectral_scan(ar, event, tsf); 1715 break; 1716 default: 1717 break; 1718 } 1719 1720 event += sizeof(*event) + buf_len; 1721 } 1722 } 1723 1724 static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) 1725 { 1726 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); 1727 } 1728 1729 static void ath10k_wmi_event_profile_match(struct ath10k *ar, 1730 struct sk_buff *skb) 1731 { 1732 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); 1733 } 1734 1735 static void ath10k_wmi_event_debug_print(struct ath10k *ar, 1736 struct sk_buff *skb) 1737 { 1738 char buf[101], c; 1739 int i; 1740 1741 for (i = 0; i < sizeof(buf) - 1; i++) { 1742 if (i >= skb->len) 1743 break; 1744 1745 c = skb->data[i]; 1746 1747 if (c == '\0') 1748 break; 1749 1750 if (isascii(c) && isprint(c)) 1751 buf[i] = c; 1752 else 1753 buf[i] = '.'; 1754 } 1755 1756 if (i == sizeof(buf) - 1) 1757 ath10k_warn("wmi debug print truncated: %d\n", skb->len); 1758 1759 /* for some reason the debug prints end with \n, remove that */ 1760 if (skb->data[i - 1] == '\n') 1761 i--; 1762 1763 /* the last byte is always reserved for the null character */ 1764 buf[i] = '\0'; 1765 1766 ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); 1767 } 1768 1769 static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) 1770 { 1771 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); 1772 } 1773 1774 static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, 1775 struct sk_buff *skb) 1776 { 1777 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); 1778 } 1779 1780 static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, 1781 struct sk_buff *skb) 1782 { 1783 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); 1784 } 1785 1786 static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, 1787 struct sk_buff *skb) 1788 { 1789 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); 1790 } 1791 1792 static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, 1793 struct sk_buff *skb) 1794 { 1795 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); 1796 } 1797 1798 static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, 1799 struct sk_buff *skb) 1800 { 1801 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); 1802 } 1803 1804 static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, 1805 struct sk_buff *skb) 1806 { 1807 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); 1808 } 1809 1810 static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, 1811 struct sk_buff *skb) 1812 { 1813 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); 1814 } 1815 1816 static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, 1817 struct sk_buff *skb) 1818 { 1819 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); 1820 } 1821 1822 static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, 1823 struct sk_buff *skb) 1824 { 1825 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); 1826 } 1827 1828 static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, 1829 struct sk_buff *skb) 1830 { 1831 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); 1832 } 1833 1834 static void ath10k_wmi_event_delba_complete(struct ath10k *ar, 1835 struct sk_buff *skb) 1836 { 1837 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); 1838 } 1839 1840 static void ath10k_wmi_event_addba_complete(struct ath10k *ar, 1841 struct sk_buff *skb) 1842 { 1843 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); 1844 } 1845 1846 static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, 1847 struct sk_buff *skb) 1848 { 1849 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); 1850 } 1851 1852 static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, 1853 struct sk_buff *skb) 1854 { 1855 ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); 1856 } 1857 1858 static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, 1859 struct sk_buff *skb) 1860 { 1861 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); 1862 } 1863 1864 static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, 1865 struct sk_buff *skb) 1866 { 1867 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); 1868 } 1869 1870 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, 1871 u32 num_units, u32 unit_len) 1872 { 1873 dma_addr_t paddr; 1874 u32 pool_size; 1875 int idx = ar->wmi.num_mem_chunks; 1876 1877 pool_size = num_units * round_up(unit_len, 4); 1878 1879 if (!pool_size) 1880 return -EINVAL; 1881 1882 ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, 1883 pool_size, 1884 &paddr, 1885 GFP_ATOMIC); 1886 if (!ar->wmi.mem_chunks[idx].vaddr) { 1887 ath10k_warn("failed to allocate memory chunk\n"); 1888 return -ENOMEM; 1889 } 1890 1891 memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size); 1892 1893 ar->wmi.mem_chunks[idx].paddr = paddr; 1894 ar->wmi.mem_chunks[idx].len = pool_size; 1895 ar->wmi.mem_chunks[idx].req_id = req_id; 1896 ar->wmi.num_mem_chunks++; 1897 1898 return 0; 1899 } 1900 1901 static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, 1902 struct sk_buff *skb) 1903 { 1904 struct wmi_service_ready_event *ev = (void *)skb->data; 1905 1906 if (skb->len < sizeof(*ev)) { 1907 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 1908 skb->len, sizeof(*ev)); 1909 return; 1910 } 1911 1912 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); 1913 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); 1914 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); 1915 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); 1916 ar->fw_version_major = 1917 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; 1918 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); 1919 ar->fw_version_release = 1920 (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; 1921 ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); 1922 ar->phy_capability = __le32_to_cpu(ev->phy_capability); 1923 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 1924 1925 /* only manually set fw features when not using FW IE format */ 1926 if (ar->fw_api == 1 && ar->fw_version_build > 636) 1927 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); 1928 1929 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 1930 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 1931 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 1932 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 1933 } 1934 1935 ar->ath_common.regulatory.current_rd = 1936 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); 1937 1938 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, 1939 sizeof(ev->wmi_service_bitmap)); 1940 1941 if (strlen(ar->hw->wiphy->fw_version) == 0) { 1942 snprintf(ar->hw->wiphy->fw_version, 1943 sizeof(ar->hw->wiphy->fw_version), 1944 "%u.%u.%u.%u", 1945 ar->fw_version_major, 1946 ar->fw_version_minor, 1947 ar->fw_version_release, 1948 ar->fw_version_build); 1949 } 1950 1951 /* FIXME: it probably should be better to support this */ 1952 if (__le32_to_cpu(ev->num_mem_reqs) > 0) { 1953 ath10k_warn("target requested %d memory chunks; ignoring\n", 1954 __le32_to_cpu(ev->num_mem_reqs)); 1955 } 1956 1957 ath10k_dbg(ATH10K_DBG_WMI, 1958 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 1959 __le32_to_cpu(ev->sw_version), 1960 __le32_to_cpu(ev->sw_version_1), 1961 __le32_to_cpu(ev->abi_version), 1962 __le32_to_cpu(ev->phy_capability), 1963 __le32_to_cpu(ev->ht_cap_info), 1964 __le32_to_cpu(ev->vht_cap_info), 1965 __le32_to_cpu(ev->vht_supp_mcs), 1966 __le32_to_cpu(ev->sys_cap_info), 1967 __le32_to_cpu(ev->num_mem_reqs), 1968 __le32_to_cpu(ev->num_rf_chains)); 1969 1970 complete(&ar->wmi.service_ready); 1971 } 1972 1973 static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, 1974 struct sk_buff *skb) 1975 { 1976 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; 1977 int ret; 1978 struct wmi_service_ready_event_10x *ev = (void *)skb->data; 1979 1980 if (skb->len < sizeof(*ev)) { 1981 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 1982 skb->len, sizeof(*ev)); 1983 return; 1984 } 1985 1986 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); 1987 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); 1988 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); 1989 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); 1990 ar->fw_version_major = 1991 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; 1992 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); 1993 ar->phy_capability = __le32_to_cpu(ev->phy_capability); 1994 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 1995 1996 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 1997 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 1998 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 1999 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 2000 } 2001 2002 ar->ath_common.regulatory.current_rd = 2003 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); 2004 2005 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, 2006 sizeof(ev->wmi_service_bitmap)); 2007 2008 if (strlen(ar->hw->wiphy->fw_version) == 0) { 2009 snprintf(ar->hw->wiphy->fw_version, 2010 sizeof(ar->hw->wiphy->fw_version), 2011 "%u.%u", 2012 ar->fw_version_major, 2013 ar->fw_version_minor); 2014 } 2015 2016 num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); 2017 2018 if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { 2019 ath10k_warn("requested memory chunks number (%d) exceeds the limit\n", 2020 num_mem_reqs); 2021 return; 2022 } 2023 2024 if (!num_mem_reqs) 2025 goto exit; 2026 2027 ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", 2028 num_mem_reqs); 2029 2030 for (i = 0; i < num_mem_reqs; ++i) { 2031 req_id = __le32_to_cpu(ev->mem_reqs[i].req_id); 2032 num_units = __le32_to_cpu(ev->mem_reqs[i].num_units); 2033 unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size); 2034 num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info); 2035 2036 if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) 2037 /* number of units to allocate is number of 2038 * peers, 1 extra for self peer on target */ 2039 /* this needs to be tied, host and target 2040 * can get out of sync */ 2041 num_units = TARGET_10X_NUM_PEERS + 1; 2042 else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) 2043 num_units = TARGET_10X_NUM_VDEVS + 1; 2044 2045 ath10k_dbg(ATH10K_DBG_WMI, 2046 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", 2047 req_id, 2048 __le32_to_cpu(ev->mem_reqs[i].num_units), 2049 num_unit_info, 2050 unit_size, 2051 num_units); 2052 2053 ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units, 2054 unit_size); 2055 if (ret) 2056 return; 2057 } 2058 2059 exit: 2060 ath10k_dbg(ATH10K_DBG_WMI, 2061 "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 2062 __le32_to_cpu(ev->sw_version), 2063 __le32_to_cpu(ev->abi_version), 2064 __le32_to_cpu(ev->phy_capability), 2065 __le32_to_cpu(ev->ht_cap_info), 2066 __le32_to_cpu(ev->vht_cap_info), 2067 __le32_to_cpu(ev->vht_supp_mcs), 2068 __le32_to_cpu(ev->sys_cap_info), 2069 __le32_to_cpu(ev->num_mem_reqs), 2070 __le32_to_cpu(ev->num_rf_chains)); 2071 2072 complete(&ar->wmi.service_ready); 2073 } 2074 2075 static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) 2076 { 2077 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; 2078 2079 if (WARN_ON(skb->len < sizeof(*ev))) 2080 return -EINVAL; 2081 2082 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); 2083 2084 ath10k_dbg(ATH10K_DBG_WMI, 2085 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n", 2086 __le32_to_cpu(ev->sw_version), 2087 __le32_to_cpu(ev->abi_version), 2088 ev->mac_addr.addr, 2089 __le32_to_cpu(ev->status), skb->len, sizeof(*ev)); 2090 2091 complete(&ar->wmi.unified_ready); 2092 return 0; 2093 } 2094 2095 static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) 2096 { 2097 struct wmi_cmd_hdr *cmd_hdr; 2098 enum wmi_event_id id; 2099 u16 len; 2100 2101 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 2102 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 2103 2104 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 2105 return; 2106 2107 len = skb->len; 2108 2109 trace_ath10k_wmi_event(id, skb->data, skb->len); 2110 2111 switch (id) { 2112 case WMI_MGMT_RX_EVENTID: 2113 ath10k_wmi_event_mgmt_rx(ar, skb); 2114 /* mgmt_rx() owns the skb now! */ 2115 return; 2116 case WMI_SCAN_EVENTID: 2117 ath10k_wmi_event_scan(ar, skb); 2118 break; 2119 case WMI_CHAN_INFO_EVENTID: 2120 ath10k_wmi_event_chan_info(ar, skb); 2121 break; 2122 case WMI_ECHO_EVENTID: 2123 ath10k_wmi_event_echo(ar, skb); 2124 break; 2125 case WMI_DEBUG_MESG_EVENTID: 2126 ath10k_wmi_event_debug_mesg(ar, skb); 2127 break; 2128 case WMI_UPDATE_STATS_EVENTID: 2129 ath10k_wmi_event_update_stats(ar, skb); 2130 break; 2131 case WMI_VDEV_START_RESP_EVENTID: 2132 ath10k_wmi_event_vdev_start_resp(ar, skb); 2133 break; 2134 case WMI_VDEV_STOPPED_EVENTID: 2135 ath10k_wmi_event_vdev_stopped(ar, skb); 2136 break; 2137 case WMI_PEER_STA_KICKOUT_EVENTID: 2138 ath10k_wmi_event_peer_sta_kickout(ar, skb); 2139 break; 2140 case WMI_HOST_SWBA_EVENTID: 2141 ath10k_wmi_event_host_swba(ar, skb); 2142 break; 2143 case WMI_TBTTOFFSET_UPDATE_EVENTID: 2144 ath10k_wmi_event_tbttoffset_update(ar, skb); 2145 break; 2146 case WMI_PHYERR_EVENTID: 2147 ath10k_wmi_event_phyerr(ar, skb); 2148 break; 2149 case WMI_ROAM_EVENTID: 2150 ath10k_wmi_event_roam(ar, skb); 2151 break; 2152 case WMI_PROFILE_MATCH: 2153 ath10k_wmi_event_profile_match(ar, skb); 2154 break; 2155 case WMI_DEBUG_PRINT_EVENTID: 2156 ath10k_wmi_event_debug_print(ar, skb); 2157 break; 2158 case WMI_PDEV_QVIT_EVENTID: 2159 ath10k_wmi_event_pdev_qvit(ar, skb); 2160 break; 2161 case WMI_WLAN_PROFILE_DATA_EVENTID: 2162 ath10k_wmi_event_wlan_profile_data(ar, skb); 2163 break; 2164 case WMI_RTT_MEASUREMENT_REPORT_EVENTID: 2165 ath10k_wmi_event_rtt_measurement_report(ar, skb); 2166 break; 2167 case WMI_TSF_MEASUREMENT_REPORT_EVENTID: 2168 ath10k_wmi_event_tsf_measurement_report(ar, skb); 2169 break; 2170 case WMI_RTT_ERROR_REPORT_EVENTID: 2171 ath10k_wmi_event_rtt_error_report(ar, skb); 2172 break; 2173 case WMI_WOW_WAKEUP_HOST_EVENTID: 2174 ath10k_wmi_event_wow_wakeup_host(ar, skb); 2175 break; 2176 case WMI_DCS_INTERFERENCE_EVENTID: 2177 ath10k_wmi_event_dcs_interference(ar, skb); 2178 break; 2179 case WMI_PDEV_TPC_CONFIG_EVENTID: 2180 ath10k_wmi_event_pdev_tpc_config(ar, skb); 2181 break; 2182 case WMI_PDEV_FTM_INTG_EVENTID: 2183 ath10k_wmi_event_pdev_ftm_intg(ar, skb); 2184 break; 2185 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 2186 ath10k_wmi_event_gtk_offload_status(ar, skb); 2187 break; 2188 case WMI_GTK_REKEY_FAIL_EVENTID: 2189 ath10k_wmi_event_gtk_rekey_fail(ar, skb); 2190 break; 2191 case WMI_TX_DELBA_COMPLETE_EVENTID: 2192 ath10k_wmi_event_delba_complete(ar, skb); 2193 break; 2194 case WMI_TX_ADDBA_COMPLETE_EVENTID: 2195 ath10k_wmi_event_addba_complete(ar, skb); 2196 break; 2197 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 2198 ath10k_wmi_event_vdev_install_key_complete(ar, skb); 2199 break; 2200 case WMI_SERVICE_READY_EVENTID: 2201 ath10k_wmi_service_ready_event_rx(ar, skb); 2202 break; 2203 case WMI_READY_EVENTID: 2204 ath10k_wmi_ready_event_rx(ar, skb); 2205 break; 2206 default: 2207 ath10k_warn("Unknown eventid: %d\n", id); 2208 break; 2209 } 2210 2211 dev_kfree_skb(skb); 2212 } 2213 2214 static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) 2215 { 2216 struct wmi_cmd_hdr *cmd_hdr; 2217 enum wmi_10x_event_id id; 2218 u16 len; 2219 2220 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 2221 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 2222 2223 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 2224 return; 2225 2226 len = skb->len; 2227 2228 trace_ath10k_wmi_event(id, skb->data, skb->len); 2229 2230 switch (id) { 2231 case WMI_10X_MGMT_RX_EVENTID: 2232 ath10k_wmi_event_mgmt_rx(ar, skb); 2233 /* mgmt_rx() owns the skb now! */ 2234 return; 2235 case WMI_10X_SCAN_EVENTID: 2236 ath10k_wmi_event_scan(ar, skb); 2237 break; 2238 case WMI_10X_CHAN_INFO_EVENTID: 2239 ath10k_wmi_event_chan_info(ar, skb); 2240 break; 2241 case WMI_10X_ECHO_EVENTID: 2242 ath10k_wmi_event_echo(ar, skb); 2243 break; 2244 case WMI_10X_DEBUG_MESG_EVENTID: 2245 ath10k_wmi_event_debug_mesg(ar, skb); 2246 break; 2247 case WMI_10X_UPDATE_STATS_EVENTID: 2248 ath10k_wmi_event_update_stats(ar, skb); 2249 break; 2250 case WMI_10X_VDEV_START_RESP_EVENTID: 2251 ath10k_wmi_event_vdev_start_resp(ar, skb); 2252 break; 2253 case WMI_10X_VDEV_STOPPED_EVENTID: 2254 ath10k_wmi_event_vdev_stopped(ar, skb); 2255 break; 2256 case WMI_10X_PEER_STA_KICKOUT_EVENTID: 2257 ath10k_wmi_event_peer_sta_kickout(ar, skb); 2258 break; 2259 case WMI_10X_HOST_SWBA_EVENTID: 2260 ath10k_wmi_event_host_swba(ar, skb); 2261 break; 2262 case WMI_10X_TBTTOFFSET_UPDATE_EVENTID: 2263 ath10k_wmi_event_tbttoffset_update(ar, skb); 2264 break; 2265 case WMI_10X_PHYERR_EVENTID: 2266 ath10k_wmi_event_phyerr(ar, skb); 2267 break; 2268 case WMI_10X_ROAM_EVENTID: 2269 ath10k_wmi_event_roam(ar, skb); 2270 break; 2271 case WMI_10X_PROFILE_MATCH: 2272 ath10k_wmi_event_profile_match(ar, skb); 2273 break; 2274 case WMI_10X_DEBUG_PRINT_EVENTID: 2275 ath10k_wmi_event_debug_print(ar, skb); 2276 break; 2277 case WMI_10X_PDEV_QVIT_EVENTID: 2278 ath10k_wmi_event_pdev_qvit(ar, skb); 2279 break; 2280 case WMI_10X_WLAN_PROFILE_DATA_EVENTID: 2281 ath10k_wmi_event_wlan_profile_data(ar, skb); 2282 break; 2283 case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID: 2284 ath10k_wmi_event_rtt_measurement_report(ar, skb); 2285 break; 2286 case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID: 2287 ath10k_wmi_event_tsf_measurement_report(ar, skb); 2288 break; 2289 case WMI_10X_RTT_ERROR_REPORT_EVENTID: 2290 ath10k_wmi_event_rtt_error_report(ar, skb); 2291 break; 2292 case WMI_10X_WOW_WAKEUP_HOST_EVENTID: 2293 ath10k_wmi_event_wow_wakeup_host(ar, skb); 2294 break; 2295 case WMI_10X_DCS_INTERFERENCE_EVENTID: 2296 ath10k_wmi_event_dcs_interference(ar, skb); 2297 break; 2298 case WMI_10X_PDEV_TPC_CONFIG_EVENTID: 2299 ath10k_wmi_event_pdev_tpc_config(ar, skb); 2300 break; 2301 case WMI_10X_INST_RSSI_STATS_EVENTID: 2302 ath10k_wmi_event_inst_rssi_stats(ar, skb); 2303 break; 2304 case WMI_10X_VDEV_STANDBY_REQ_EVENTID: 2305 ath10k_wmi_event_vdev_standby_req(ar, skb); 2306 break; 2307 case WMI_10X_VDEV_RESUME_REQ_EVENTID: 2308 ath10k_wmi_event_vdev_resume_req(ar, skb); 2309 break; 2310 case WMI_10X_SERVICE_READY_EVENTID: 2311 ath10k_wmi_10x_service_ready_event_rx(ar, skb); 2312 break; 2313 case WMI_10X_READY_EVENTID: 2314 ath10k_wmi_ready_event_rx(ar, skb); 2315 break; 2316 default: 2317 ath10k_warn("Unknown eventid: %d\n", id); 2318 break; 2319 } 2320 2321 dev_kfree_skb(skb); 2322 } 2323 2324 2325 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) 2326 { 2327 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 2328 ath10k_wmi_10x_process_rx(ar, skb); 2329 else 2330 ath10k_wmi_main_process_rx(ar, skb); 2331 } 2332 2333 /* WMI Initialization functions */ 2334 int ath10k_wmi_attach(struct ath10k *ar) 2335 { 2336 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 2337 ar->wmi.cmd = &wmi_10x_cmd_map; 2338 ar->wmi.vdev_param = &wmi_10x_vdev_param_map; 2339 ar->wmi.pdev_param = &wmi_10x_pdev_param_map; 2340 } else { 2341 ar->wmi.cmd = &wmi_cmd_map; 2342 ar->wmi.vdev_param = &wmi_vdev_param_map; 2343 ar->wmi.pdev_param = &wmi_pdev_param_map; 2344 } 2345 2346 init_completion(&ar->wmi.service_ready); 2347 init_completion(&ar->wmi.unified_ready); 2348 init_waitqueue_head(&ar->wmi.tx_credits_wq); 2349 2350 return 0; 2351 } 2352 2353 void ath10k_wmi_detach(struct ath10k *ar) 2354 { 2355 int i; 2356 2357 /* free the host memory chunks requested by firmware */ 2358 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 2359 dma_free_coherent(ar->dev, 2360 ar->wmi.mem_chunks[i].len, 2361 ar->wmi.mem_chunks[i].vaddr, 2362 ar->wmi.mem_chunks[i].paddr); 2363 } 2364 2365 ar->wmi.num_mem_chunks = 0; 2366 } 2367 2368 int ath10k_wmi_connect_htc_service(struct ath10k *ar) 2369 { 2370 int status; 2371 struct ath10k_htc_svc_conn_req conn_req; 2372 struct ath10k_htc_svc_conn_resp conn_resp; 2373 2374 memset(&conn_req, 0, sizeof(conn_req)); 2375 memset(&conn_resp, 0, sizeof(conn_resp)); 2376 2377 /* these fields are the same for all service endpoints */ 2378 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; 2379 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; 2380 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits; 2381 2382 /* connect to control service */ 2383 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; 2384 2385 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); 2386 if (status) { 2387 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", 2388 status); 2389 return status; 2390 } 2391 2392 ar->wmi.eid = conn_resp.eid; 2393 return 0; 2394 } 2395 2396 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 2397 u16 rd5g, u16 ctl2g, u16 ctl5g) 2398 { 2399 struct wmi_pdev_set_regdomain_cmd *cmd; 2400 struct sk_buff *skb; 2401 2402 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2403 if (!skb) 2404 return -ENOMEM; 2405 2406 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 2407 cmd->reg_domain = __cpu_to_le32(rd); 2408 cmd->reg_domain_2G = __cpu_to_le32(rd2g); 2409 cmd->reg_domain_5G = __cpu_to_le32(rd5g); 2410 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); 2411 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 2412 2413 ath10k_dbg(ATH10K_DBG_WMI, 2414 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", 2415 rd, rd2g, rd5g, ctl2g, ctl5g); 2416 2417 return ath10k_wmi_cmd_send(ar, skb, 2418 ar->wmi.cmd->pdev_set_regdomain_cmdid); 2419 } 2420 2421 int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 2422 const struct wmi_channel_arg *arg) 2423 { 2424 struct wmi_set_channel_cmd *cmd; 2425 struct sk_buff *skb; 2426 u32 ch_flags = 0; 2427 2428 if (arg->passive) 2429 return -EINVAL; 2430 2431 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2432 if (!skb) 2433 return -ENOMEM; 2434 2435 if (arg->chan_radar) 2436 ch_flags |= WMI_CHAN_FLAG_DFS; 2437 2438 cmd = (struct wmi_set_channel_cmd *)skb->data; 2439 cmd->chan.mhz = __cpu_to_le32(arg->freq); 2440 cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); 2441 cmd->chan.mode = arg->mode; 2442 cmd->chan.flags |= __cpu_to_le32(ch_flags); 2443 cmd->chan.min_power = arg->min_power; 2444 cmd->chan.max_power = arg->max_power; 2445 cmd->chan.reg_power = arg->max_reg_power; 2446 cmd->chan.reg_classid = arg->reg_class_id; 2447 cmd->chan.antenna_max = arg->max_antenna_gain; 2448 2449 ath10k_dbg(ATH10K_DBG_WMI, 2450 "wmi set channel mode %d freq %d\n", 2451 arg->mode, arg->freq); 2452 2453 return ath10k_wmi_cmd_send(ar, skb, 2454 ar->wmi.cmd->pdev_set_channel_cmdid); 2455 } 2456 2457 int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 2458 { 2459 struct wmi_pdev_suspend_cmd *cmd; 2460 struct sk_buff *skb; 2461 2462 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2463 if (!skb) 2464 return -ENOMEM; 2465 2466 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 2467 cmd->suspend_opt = __cpu_to_le32(suspend_opt); 2468 2469 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 2470 } 2471 2472 int ath10k_wmi_pdev_resume_target(struct ath10k *ar) 2473 { 2474 struct sk_buff *skb; 2475 2476 skb = ath10k_wmi_alloc_skb(0); 2477 if (skb == NULL) 2478 return -ENOMEM; 2479 2480 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 2481 } 2482 2483 int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 2484 { 2485 struct wmi_pdev_set_param_cmd *cmd; 2486 struct sk_buff *skb; 2487 2488 if (id == WMI_PDEV_PARAM_UNSUPPORTED) { 2489 ath10k_warn("pdev param %d not supported by firmware\n", id); 2490 return -EOPNOTSUPP; 2491 } 2492 2493 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2494 if (!skb) 2495 return -ENOMEM; 2496 2497 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 2498 cmd->param_id = __cpu_to_le32(id); 2499 cmd->param_value = __cpu_to_le32(value); 2500 2501 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", 2502 id, value); 2503 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 2504 } 2505 2506 static int ath10k_wmi_main_cmd_init(struct ath10k *ar) 2507 { 2508 struct wmi_init_cmd *cmd; 2509 struct sk_buff *buf; 2510 struct wmi_resource_config config = {}; 2511 u32 len, val; 2512 int i; 2513 2514 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); 2515 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); 2516 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); 2517 2518 config.num_offload_reorder_bufs = 2519 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS); 2520 2521 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS); 2522 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS); 2523 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT); 2524 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK); 2525 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK); 2526 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 2527 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 2528 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 2529 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); 2530 config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE); 2531 2532 config.scan_max_pending_reqs = 2533 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); 2534 2535 config.bmiss_offload_max_vdev = 2536 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV); 2537 2538 config.roam_offload_max_vdev = 2539 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV); 2540 2541 config.roam_offload_max_ap_profiles = 2542 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES); 2543 2544 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS); 2545 config.num_mcast_table_elems = 2546 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS); 2547 2548 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE); 2549 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE); 2550 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES); 2551 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE); 2552 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM); 2553 2554 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 2555 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 2556 2557 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG); 2558 2559 config.gtk_offload_max_vdev = 2560 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV); 2561 2562 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); 2563 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); 2564 2565 len = sizeof(*cmd) + 2566 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 2567 2568 buf = ath10k_wmi_alloc_skb(len); 2569 if (!buf) 2570 return -ENOMEM; 2571 2572 cmd = (struct wmi_init_cmd *)buf->data; 2573 2574 if (ar->wmi.num_mem_chunks == 0) { 2575 cmd->num_host_mem_chunks = 0; 2576 goto out; 2577 } 2578 2579 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 2580 ar->wmi.num_mem_chunks); 2581 2582 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 2583 2584 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 2585 cmd->host_mem_chunks[i].ptr = 2586 __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); 2587 cmd->host_mem_chunks[i].size = 2588 __cpu_to_le32(ar->wmi.mem_chunks[i].len); 2589 cmd->host_mem_chunks[i].req_id = 2590 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 2591 2592 ath10k_dbg(ATH10K_DBG_WMI, 2593 "wmi chunk %d len %d requested, addr 0x%llx\n", 2594 i, 2595 ar->wmi.mem_chunks[i].len, 2596 (unsigned long long)ar->wmi.mem_chunks[i].paddr); 2597 } 2598 out: 2599 memcpy(&cmd->resource_config, &config, sizeof(config)); 2600 2601 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); 2602 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 2603 } 2604 2605 static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) 2606 { 2607 struct wmi_init_cmd_10x *cmd; 2608 struct sk_buff *buf; 2609 struct wmi_resource_config_10x config = {}; 2610 u32 len, val; 2611 int i; 2612 2613 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); 2614 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); 2615 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); 2616 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); 2617 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); 2618 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); 2619 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); 2620 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 2621 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 2622 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); 2623 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); 2624 config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); 2625 2626 config.scan_max_pending_reqs = 2627 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); 2628 2629 config.bmiss_offload_max_vdev = 2630 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); 2631 2632 config.roam_offload_max_vdev = 2633 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); 2634 2635 config.roam_offload_max_ap_profiles = 2636 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); 2637 2638 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); 2639 config.num_mcast_table_elems = 2640 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); 2641 2642 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); 2643 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); 2644 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); 2645 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); 2646 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); 2647 2648 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 2649 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 2650 2651 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); 2652 2653 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); 2654 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); 2655 2656 len = sizeof(*cmd) + 2657 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 2658 2659 buf = ath10k_wmi_alloc_skb(len); 2660 if (!buf) 2661 return -ENOMEM; 2662 2663 cmd = (struct wmi_init_cmd_10x *)buf->data; 2664 2665 if (ar->wmi.num_mem_chunks == 0) { 2666 cmd->num_host_mem_chunks = 0; 2667 goto out; 2668 } 2669 2670 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 2671 ar->wmi.num_mem_chunks); 2672 2673 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 2674 2675 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 2676 cmd->host_mem_chunks[i].ptr = 2677 __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); 2678 cmd->host_mem_chunks[i].size = 2679 __cpu_to_le32(ar->wmi.mem_chunks[i].len); 2680 cmd->host_mem_chunks[i].req_id = 2681 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 2682 2683 ath10k_dbg(ATH10K_DBG_WMI, 2684 "wmi chunk %d len %d requested, addr 0x%llx\n", 2685 i, 2686 ar->wmi.mem_chunks[i].len, 2687 (unsigned long long)ar->wmi.mem_chunks[i].paddr); 2688 } 2689 out: 2690 memcpy(&cmd->resource_config, &config, sizeof(config)); 2691 2692 ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n"); 2693 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 2694 } 2695 2696 int ath10k_wmi_cmd_init(struct ath10k *ar) 2697 { 2698 int ret; 2699 2700 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 2701 ret = ath10k_wmi_10x_cmd_init(ar); 2702 else 2703 ret = ath10k_wmi_main_cmd_init(ar); 2704 2705 return ret; 2706 } 2707 2708 static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar, 2709 const struct wmi_start_scan_arg *arg) 2710 { 2711 int len; 2712 2713 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 2714 len = sizeof(struct wmi_start_scan_cmd_10x); 2715 else 2716 len = sizeof(struct wmi_start_scan_cmd); 2717 2718 if (arg->ie_len) { 2719 if (!arg->ie) 2720 return -EINVAL; 2721 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) 2722 return -EINVAL; 2723 2724 len += sizeof(struct wmi_ie_data); 2725 len += roundup(arg->ie_len, 4); 2726 } 2727 2728 if (arg->n_channels) { 2729 if (!arg->channels) 2730 return -EINVAL; 2731 if (arg->n_channels > ARRAY_SIZE(arg->channels)) 2732 return -EINVAL; 2733 2734 len += sizeof(struct wmi_chan_list); 2735 len += sizeof(__le32) * arg->n_channels; 2736 } 2737 2738 if (arg->n_ssids) { 2739 if (!arg->ssids) 2740 return -EINVAL; 2741 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) 2742 return -EINVAL; 2743 2744 len += sizeof(struct wmi_ssid_list); 2745 len += sizeof(struct wmi_ssid) * arg->n_ssids; 2746 } 2747 2748 if (arg->n_bssids) { 2749 if (!arg->bssids) 2750 return -EINVAL; 2751 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) 2752 return -EINVAL; 2753 2754 len += sizeof(struct wmi_bssid_list); 2755 len += sizeof(struct wmi_mac_addr) * arg->n_bssids; 2756 } 2757 2758 return len; 2759 } 2760 2761 int ath10k_wmi_start_scan(struct ath10k *ar, 2762 const struct wmi_start_scan_arg *arg) 2763 { 2764 struct wmi_start_scan_cmd *cmd; 2765 struct sk_buff *skb; 2766 struct wmi_ie_data *ie; 2767 struct wmi_chan_list *channels; 2768 struct wmi_ssid_list *ssids; 2769 struct wmi_bssid_list *bssids; 2770 u32 scan_id; 2771 u32 scan_req_id; 2772 int off; 2773 int len = 0; 2774 int i; 2775 2776 len = ath10k_wmi_start_scan_calc_len(ar, arg); 2777 if (len < 0) 2778 return len; /* len contains error code here */ 2779 2780 skb = ath10k_wmi_alloc_skb(len); 2781 if (!skb) 2782 return -ENOMEM; 2783 2784 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; 2785 scan_id |= arg->scan_id; 2786 2787 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 2788 scan_req_id |= arg->scan_req_id; 2789 2790 cmd = (struct wmi_start_scan_cmd *)skb->data; 2791 cmd->scan_id = __cpu_to_le32(scan_id); 2792 cmd->scan_req_id = __cpu_to_le32(scan_req_id); 2793 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 2794 cmd->scan_priority = __cpu_to_le32(arg->scan_priority); 2795 cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); 2796 cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); 2797 cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); 2798 cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time); 2799 cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time); 2800 cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); 2801 cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); 2802 cmd->idle_time = __cpu_to_le32(arg->idle_time); 2803 cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time); 2804 cmd->probe_delay = __cpu_to_le32(arg->probe_delay); 2805 cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); 2806 2807 /* TLV list starts after fields included in the struct */ 2808 /* There's just one filed that differes the two start_scan 2809 * structures - burst_duration, which we are not using btw, 2810 no point to make the split here, just shift the buffer to fit with 2811 given FW */ 2812 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 2813 off = sizeof(struct wmi_start_scan_cmd_10x); 2814 else 2815 off = sizeof(struct wmi_start_scan_cmd); 2816 2817 if (arg->n_channels) { 2818 channels = (void *)skb->data + off; 2819 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); 2820 channels->num_chan = __cpu_to_le32(arg->n_channels); 2821 2822 for (i = 0; i < arg->n_channels; i++) 2823 channels->channel_list[i] = 2824 __cpu_to_le32(arg->channels[i]); 2825 2826 off += sizeof(*channels); 2827 off += sizeof(__le32) * arg->n_channels; 2828 } 2829 2830 if (arg->n_ssids) { 2831 ssids = (void *)skb->data + off; 2832 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); 2833 ssids->num_ssids = __cpu_to_le32(arg->n_ssids); 2834 2835 for (i = 0; i < arg->n_ssids; i++) { 2836 ssids->ssids[i].ssid_len = 2837 __cpu_to_le32(arg->ssids[i].len); 2838 memcpy(&ssids->ssids[i].ssid, 2839 arg->ssids[i].ssid, 2840 arg->ssids[i].len); 2841 } 2842 2843 off += sizeof(*ssids); 2844 off += sizeof(struct wmi_ssid) * arg->n_ssids; 2845 } 2846 2847 if (arg->n_bssids) { 2848 bssids = (void *)skb->data + off; 2849 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); 2850 bssids->num_bssid = __cpu_to_le32(arg->n_bssids); 2851 2852 for (i = 0; i < arg->n_bssids; i++) 2853 memcpy(&bssids->bssid_list[i], 2854 arg->bssids[i].bssid, 2855 ETH_ALEN); 2856 2857 off += sizeof(*bssids); 2858 off += sizeof(struct wmi_mac_addr) * arg->n_bssids; 2859 } 2860 2861 if (arg->ie_len) { 2862 ie = (void *)skb->data + off; 2863 ie->tag = __cpu_to_le32(WMI_IE_TAG); 2864 ie->ie_len = __cpu_to_le32(arg->ie_len); 2865 memcpy(ie->ie_data, arg->ie, arg->ie_len); 2866 2867 off += sizeof(*ie); 2868 off += roundup(arg->ie_len, 4); 2869 } 2870 2871 if (off != skb->len) { 2872 dev_kfree_skb(skb); 2873 return -EINVAL; 2874 } 2875 2876 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); 2877 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 2878 } 2879 2880 void ath10k_wmi_start_scan_init(struct ath10k *ar, 2881 struct wmi_start_scan_arg *arg) 2882 { 2883 /* setup commonly used values */ 2884 arg->scan_req_id = 1; 2885 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2886 arg->dwell_time_active = 50; 2887 arg->dwell_time_passive = 150; 2888 arg->min_rest_time = 50; 2889 arg->max_rest_time = 500; 2890 arg->repeat_probe_time = 0; 2891 arg->probe_spacing_time = 0; 2892 arg->idle_time = 0; 2893 arg->max_scan_time = 20000; 2894 arg->probe_delay = 5; 2895 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED 2896 | WMI_SCAN_EVENT_COMPLETED 2897 | WMI_SCAN_EVENT_BSS_CHANNEL 2898 | WMI_SCAN_EVENT_FOREIGN_CHANNEL 2899 | WMI_SCAN_EVENT_DEQUEUED; 2900 arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; 2901 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; 2902 arg->n_bssids = 1; 2903 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; 2904 } 2905 2906 int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 2907 { 2908 struct wmi_stop_scan_cmd *cmd; 2909 struct sk_buff *skb; 2910 u32 scan_id; 2911 u32 req_id; 2912 2913 if (arg->req_id > 0xFFF) 2914 return -EINVAL; 2915 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 2916 return -EINVAL; 2917 2918 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2919 if (!skb) 2920 return -ENOMEM; 2921 2922 scan_id = arg->u.scan_id; 2923 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; 2924 2925 req_id = arg->req_id; 2926 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 2927 2928 cmd = (struct wmi_stop_scan_cmd *)skb->data; 2929 cmd->req_type = __cpu_to_le32(arg->req_type); 2930 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); 2931 cmd->scan_id = __cpu_to_le32(scan_id); 2932 cmd->scan_req_id = __cpu_to_le32(req_id); 2933 2934 ath10k_dbg(ATH10K_DBG_WMI, 2935 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", 2936 arg->req_id, arg->req_type, arg->u.scan_id); 2937 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 2938 } 2939 2940 int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 2941 enum wmi_vdev_type type, 2942 enum wmi_vdev_subtype subtype, 2943 const u8 macaddr[ETH_ALEN]) 2944 { 2945 struct wmi_vdev_create_cmd *cmd; 2946 struct sk_buff *skb; 2947 2948 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2949 if (!skb) 2950 return -ENOMEM; 2951 2952 cmd = (struct wmi_vdev_create_cmd *)skb->data; 2953 cmd->vdev_id = __cpu_to_le32(vdev_id); 2954 cmd->vdev_type = __cpu_to_le32(type); 2955 cmd->vdev_subtype = __cpu_to_le32(subtype); 2956 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); 2957 2958 ath10k_dbg(ATH10K_DBG_WMI, 2959 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", 2960 vdev_id, type, subtype, macaddr); 2961 2962 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 2963 } 2964 2965 int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 2966 { 2967 struct wmi_vdev_delete_cmd *cmd; 2968 struct sk_buff *skb; 2969 2970 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2971 if (!skb) 2972 return -ENOMEM; 2973 2974 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 2975 cmd->vdev_id = __cpu_to_le32(vdev_id); 2976 2977 ath10k_dbg(ATH10K_DBG_WMI, 2978 "WMI vdev delete id %d\n", vdev_id); 2979 2980 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 2981 } 2982 2983 static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, 2984 const struct wmi_vdev_start_request_arg *arg, 2985 u32 cmd_id) 2986 { 2987 struct wmi_vdev_start_request_cmd *cmd; 2988 struct sk_buff *skb; 2989 const char *cmdname; 2990 u32 flags = 0; 2991 u32 ch_flags = 0; 2992 2993 if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid && 2994 cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid) 2995 return -EINVAL; 2996 if (WARN_ON(arg->ssid && arg->ssid_len == 0)) 2997 return -EINVAL; 2998 if (WARN_ON(arg->hidden_ssid && !arg->ssid)) 2999 return -EINVAL; 3000 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 3001 return -EINVAL; 3002 3003 if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid) 3004 cmdname = "start"; 3005 else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid) 3006 cmdname = "restart"; 3007 else 3008 return -EINVAL; /* should not happen, we already check cmd_id */ 3009 3010 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3011 if (!skb) 3012 return -ENOMEM; 3013 3014 if (arg->hidden_ssid) 3015 flags |= WMI_VDEV_START_HIDDEN_SSID; 3016 if (arg->pmf_enabled) 3017 flags |= WMI_VDEV_START_PMF_ENABLED; 3018 if (arg->channel.chan_radar) 3019 ch_flags |= WMI_CHAN_FLAG_DFS; 3020 3021 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 3022 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3023 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); 3024 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval); 3025 cmd->dtim_period = __cpu_to_le32(arg->dtim_period); 3026 cmd->flags = __cpu_to_le32(flags); 3027 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); 3028 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); 3029 3030 if (arg->ssid) { 3031 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); 3032 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 3033 } 3034 3035 cmd->chan.mhz = __cpu_to_le32(arg->channel.freq); 3036 3037 cmd->chan.band_center_freq1 = 3038 __cpu_to_le32(arg->channel.band_center_freq1); 3039 3040 cmd->chan.mode = arg->channel.mode; 3041 cmd->chan.flags |= __cpu_to_le32(ch_flags); 3042 cmd->chan.min_power = arg->channel.min_power; 3043 cmd->chan.max_power = arg->channel.max_power; 3044 cmd->chan.reg_power = arg->channel.max_reg_power; 3045 cmd->chan.reg_classid = arg->channel.reg_class_id; 3046 cmd->chan.antenna_max = arg->channel.max_antenna_gain; 3047 3048 ath10k_dbg(ATH10K_DBG_WMI, 3049 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, " 3050 "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id, 3051 flags, arg->channel.freq, arg->channel.mode, 3052 cmd->chan.flags, arg->channel.max_power); 3053 3054 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 3055 } 3056 3057 int ath10k_wmi_vdev_start(struct ath10k *ar, 3058 const struct wmi_vdev_start_request_arg *arg) 3059 { 3060 u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid; 3061 3062 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id); 3063 } 3064 3065 int ath10k_wmi_vdev_restart(struct ath10k *ar, 3066 const struct wmi_vdev_start_request_arg *arg) 3067 { 3068 u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid; 3069 3070 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id); 3071 } 3072 3073 int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 3074 { 3075 struct wmi_vdev_stop_cmd *cmd; 3076 struct sk_buff *skb; 3077 3078 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3079 if (!skb) 3080 return -ENOMEM; 3081 3082 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 3083 cmd->vdev_id = __cpu_to_le32(vdev_id); 3084 3085 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); 3086 3087 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 3088 } 3089 3090 int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 3091 { 3092 struct wmi_vdev_up_cmd *cmd; 3093 struct sk_buff *skb; 3094 3095 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3096 if (!skb) 3097 return -ENOMEM; 3098 3099 cmd = (struct wmi_vdev_up_cmd *)skb->data; 3100 cmd->vdev_id = __cpu_to_le32(vdev_id); 3101 cmd->vdev_assoc_id = __cpu_to_le32(aid); 3102 memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN); 3103 3104 ath10k_dbg(ATH10K_DBG_WMI, 3105 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 3106 vdev_id, aid, bssid); 3107 3108 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 3109 } 3110 3111 int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 3112 { 3113 struct wmi_vdev_down_cmd *cmd; 3114 struct sk_buff *skb; 3115 3116 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3117 if (!skb) 3118 return -ENOMEM; 3119 3120 cmd = (struct wmi_vdev_down_cmd *)skb->data; 3121 cmd->vdev_id = __cpu_to_le32(vdev_id); 3122 3123 ath10k_dbg(ATH10K_DBG_WMI, 3124 "wmi mgmt vdev down id 0x%x\n", vdev_id); 3125 3126 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 3127 } 3128 3129 int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, 3130 u32 param_id, u32 param_value) 3131 { 3132 struct wmi_vdev_set_param_cmd *cmd; 3133 struct sk_buff *skb; 3134 3135 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { 3136 ath10k_dbg(ATH10K_DBG_WMI, 3137 "vdev param %d not supported by firmware\n", 3138 param_id); 3139 return -EOPNOTSUPP; 3140 } 3141 3142 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3143 if (!skb) 3144 return -ENOMEM; 3145 3146 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 3147 cmd->vdev_id = __cpu_to_le32(vdev_id); 3148 cmd->param_id = __cpu_to_le32(param_id); 3149 cmd->param_value = __cpu_to_le32(param_value); 3150 3151 ath10k_dbg(ATH10K_DBG_WMI, 3152 "wmi vdev id 0x%x set param %d value %d\n", 3153 vdev_id, param_id, param_value); 3154 3155 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 3156 } 3157 3158 int ath10k_wmi_vdev_install_key(struct ath10k *ar, 3159 const struct wmi_vdev_install_key_arg *arg) 3160 { 3161 struct wmi_vdev_install_key_cmd *cmd; 3162 struct sk_buff *skb; 3163 3164 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) 3165 return -EINVAL; 3166 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) 3167 return -EINVAL; 3168 3169 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); 3170 if (!skb) 3171 return -ENOMEM; 3172 3173 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 3174 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3175 cmd->key_idx = __cpu_to_le32(arg->key_idx); 3176 cmd->key_flags = __cpu_to_le32(arg->key_flags); 3177 cmd->key_cipher = __cpu_to_le32(arg->key_cipher); 3178 cmd->key_len = __cpu_to_le32(arg->key_len); 3179 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); 3180 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); 3181 3182 if (arg->macaddr) 3183 memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN); 3184 if (arg->key_data) 3185 memcpy(cmd->key_data, arg->key_data, arg->key_len); 3186 3187 ath10k_dbg(ATH10K_DBG_WMI, 3188 "wmi vdev install key idx %d cipher %d len %d\n", 3189 arg->key_idx, arg->key_cipher, arg->key_len); 3190 return ath10k_wmi_cmd_send(ar, skb, 3191 ar->wmi.cmd->vdev_install_key_cmdid); 3192 } 3193 3194 int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 3195 const u8 peer_addr[ETH_ALEN]) 3196 { 3197 struct wmi_peer_create_cmd *cmd; 3198 struct sk_buff *skb; 3199 3200 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3201 if (!skb) 3202 return -ENOMEM; 3203 3204 cmd = (struct wmi_peer_create_cmd *)skb->data; 3205 cmd->vdev_id = __cpu_to_le32(vdev_id); 3206 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3207 3208 ath10k_dbg(ATH10K_DBG_WMI, 3209 "wmi peer create vdev_id %d peer_addr %pM\n", 3210 vdev_id, peer_addr); 3211 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 3212 } 3213 3214 int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 3215 const u8 peer_addr[ETH_ALEN]) 3216 { 3217 struct wmi_peer_delete_cmd *cmd; 3218 struct sk_buff *skb; 3219 3220 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3221 if (!skb) 3222 return -ENOMEM; 3223 3224 cmd = (struct wmi_peer_delete_cmd *)skb->data; 3225 cmd->vdev_id = __cpu_to_le32(vdev_id); 3226 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3227 3228 ath10k_dbg(ATH10K_DBG_WMI, 3229 "wmi peer delete vdev_id %d peer_addr %pM\n", 3230 vdev_id, peer_addr); 3231 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 3232 } 3233 3234 int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 3235 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 3236 { 3237 struct wmi_peer_flush_tids_cmd *cmd; 3238 struct sk_buff *skb; 3239 3240 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3241 if (!skb) 3242 return -ENOMEM; 3243 3244 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 3245 cmd->vdev_id = __cpu_to_le32(vdev_id); 3246 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); 3247 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3248 3249 ath10k_dbg(ATH10K_DBG_WMI, 3250 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", 3251 vdev_id, peer_addr, tid_bitmap); 3252 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 3253 } 3254 3255 int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, 3256 const u8 *peer_addr, enum wmi_peer_param param_id, 3257 u32 param_value) 3258 { 3259 struct wmi_peer_set_param_cmd *cmd; 3260 struct sk_buff *skb; 3261 3262 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3263 if (!skb) 3264 return -ENOMEM; 3265 3266 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 3267 cmd->vdev_id = __cpu_to_le32(vdev_id); 3268 cmd->param_id = __cpu_to_le32(param_id); 3269 cmd->param_value = __cpu_to_le32(param_value); 3270 memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3271 3272 ath10k_dbg(ATH10K_DBG_WMI, 3273 "wmi vdev %d peer 0x%pM set param %d value %d\n", 3274 vdev_id, peer_addr, param_id, param_value); 3275 3276 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 3277 } 3278 3279 int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 3280 enum wmi_sta_ps_mode psmode) 3281 { 3282 struct wmi_sta_powersave_mode_cmd *cmd; 3283 struct sk_buff *skb; 3284 3285 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3286 if (!skb) 3287 return -ENOMEM; 3288 3289 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data; 3290 cmd->vdev_id = __cpu_to_le32(vdev_id); 3291 cmd->sta_ps_mode = __cpu_to_le32(psmode); 3292 3293 ath10k_dbg(ATH10K_DBG_WMI, 3294 "wmi set powersave id 0x%x mode %d\n", 3295 vdev_id, psmode); 3296 3297 return ath10k_wmi_cmd_send(ar, skb, 3298 ar->wmi.cmd->sta_powersave_mode_cmdid); 3299 } 3300 3301 int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 3302 enum wmi_sta_powersave_param param_id, 3303 u32 value) 3304 { 3305 struct wmi_sta_powersave_param_cmd *cmd; 3306 struct sk_buff *skb; 3307 3308 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3309 if (!skb) 3310 return -ENOMEM; 3311 3312 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 3313 cmd->vdev_id = __cpu_to_le32(vdev_id); 3314 cmd->param_id = __cpu_to_le32(param_id); 3315 cmd->param_value = __cpu_to_le32(value); 3316 3317 ath10k_dbg(ATH10K_DBG_WMI, 3318 "wmi sta ps param vdev_id 0x%x param %d value %d\n", 3319 vdev_id, param_id, value); 3320 return ath10k_wmi_cmd_send(ar, skb, 3321 ar->wmi.cmd->sta_powersave_param_cmdid); 3322 } 3323 3324 int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 3325 enum wmi_ap_ps_peer_param param_id, u32 value) 3326 { 3327 struct wmi_ap_ps_peer_cmd *cmd; 3328 struct sk_buff *skb; 3329 3330 if (!mac) 3331 return -EINVAL; 3332 3333 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3334 if (!skb) 3335 return -ENOMEM; 3336 3337 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 3338 cmd->vdev_id = __cpu_to_le32(vdev_id); 3339 cmd->param_id = __cpu_to_le32(param_id); 3340 cmd->param_value = __cpu_to_le32(value); 3341 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); 3342 3343 ath10k_dbg(ATH10K_DBG_WMI, 3344 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", 3345 vdev_id, param_id, value, mac); 3346 3347 return ath10k_wmi_cmd_send(ar, skb, 3348 ar->wmi.cmd->ap_ps_peer_param_cmdid); 3349 } 3350 3351 int ath10k_wmi_scan_chan_list(struct ath10k *ar, 3352 const struct wmi_scan_chan_list_arg *arg) 3353 { 3354 struct wmi_scan_chan_list_cmd *cmd; 3355 struct sk_buff *skb; 3356 struct wmi_channel_arg *ch; 3357 struct wmi_channel *ci; 3358 int len; 3359 int i; 3360 3361 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); 3362 3363 skb = ath10k_wmi_alloc_skb(len); 3364 if (!skb) 3365 return -EINVAL; 3366 3367 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 3368 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); 3369 3370 for (i = 0; i < arg->n_channels; i++) { 3371 u32 flags = 0; 3372 3373 ch = &arg->channels[i]; 3374 ci = &cmd->chan_info[i]; 3375 3376 if (ch->passive) 3377 flags |= WMI_CHAN_FLAG_PASSIVE; 3378 if (ch->allow_ibss) 3379 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; 3380 if (ch->allow_ht) 3381 flags |= WMI_CHAN_FLAG_ALLOW_HT; 3382 if (ch->allow_vht) 3383 flags |= WMI_CHAN_FLAG_ALLOW_VHT; 3384 if (ch->ht40plus) 3385 flags |= WMI_CHAN_FLAG_HT40_PLUS; 3386 if (ch->chan_radar) 3387 flags |= WMI_CHAN_FLAG_DFS; 3388 3389 ci->mhz = __cpu_to_le32(ch->freq); 3390 ci->band_center_freq1 = __cpu_to_le32(ch->freq); 3391 ci->band_center_freq2 = 0; 3392 ci->min_power = ch->min_power; 3393 ci->max_power = ch->max_power; 3394 ci->reg_power = ch->max_reg_power; 3395 ci->antenna_max = ch->max_antenna_gain; 3396 3397 /* mode & flags share storage */ 3398 ci->mode = ch->mode; 3399 ci->flags |= __cpu_to_le32(flags); 3400 } 3401 3402 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 3403 } 3404 3405 int ath10k_wmi_peer_assoc(struct ath10k *ar, 3406 const struct wmi_peer_assoc_complete_arg *arg) 3407 { 3408 struct wmi_peer_assoc_complete_cmd *cmd; 3409 struct sk_buff *skb; 3410 3411 if (arg->peer_mpdu_density > 16) 3412 return -EINVAL; 3413 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) 3414 return -EINVAL; 3415 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) 3416 return -EINVAL; 3417 3418 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3419 if (!skb) 3420 return -ENOMEM; 3421 3422 cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data; 3423 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3424 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); 3425 cmd->peer_associd = __cpu_to_le32(arg->peer_aid); 3426 cmd->peer_flags = __cpu_to_le32(arg->peer_flags); 3427 cmd->peer_caps = __cpu_to_le32(arg->peer_caps); 3428 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval); 3429 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps); 3430 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); 3431 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); 3432 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps); 3433 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams); 3434 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps); 3435 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode); 3436 3437 memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN); 3438 3439 cmd->peer_legacy_rates.num_rates = 3440 __cpu_to_le32(arg->peer_legacy_rates.num_rates); 3441 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates, 3442 arg->peer_legacy_rates.num_rates); 3443 3444 cmd->peer_ht_rates.num_rates = 3445 __cpu_to_le32(arg->peer_ht_rates.num_rates); 3446 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates, 3447 arg->peer_ht_rates.num_rates); 3448 3449 cmd->peer_vht_rates.rx_max_rate = 3450 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); 3451 cmd->peer_vht_rates.rx_mcs_set = 3452 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); 3453 cmd->peer_vht_rates.tx_max_rate = 3454 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); 3455 cmd->peer_vht_rates.tx_mcs_set = 3456 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 3457 3458 ath10k_dbg(ATH10K_DBG_WMI, 3459 "wmi peer assoc vdev %d addr %pM\n", 3460 arg->vdev_id, arg->addr); 3461 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 3462 } 3463 3464 /* This function assumes the beacon is already DMA mapped */ 3465 int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) 3466 { 3467 struct wmi_bcn_tx_ref_cmd *cmd; 3468 struct sk_buff *skb; 3469 struct sk_buff *beacon = arvif->beacon; 3470 struct ath10k *ar = arvif->ar; 3471 struct ieee80211_hdr *hdr; 3472 int ret; 3473 u16 fc; 3474 3475 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3476 if (!skb) 3477 return -ENOMEM; 3478 3479 hdr = (struct ieee80211_hdr *)beacon->data; 3480 fc = le16_to_cpu(hdr->frame_control); 3481 3482 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data; 3483 cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); 3484 cmd->data_len = __cpu_to_le32(beacon->len); 3485 cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr); 3486 cmd->msdu_id = 0; 3487 cmd->frame_control = __cpu_to_le32(fc); 3488 cmd->flags = 0; 3489 3490 if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) 3491 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); 3492 3493 if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab) 3494 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); 3495 3496 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 3497 ar->wmi.cmd->pdev_send_bcn_cmdid); 3498 3499 if (ret) 3500 dev_kfree_skb(skb); 3501 3502 return ret; 3503 } 3504 3505 static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, 3506 const struct wmi_wmm_params_arg *arg) 3507 { 3508 params->cwmin = __cpu_to_le32(arg->cwmin); 3509 params->cwmax = __cpu_to_le32(arg->cwmax); 3510 params->aifs = __cpu_to_le32(arg->aifs); 3511 params->txop = __cpu_to_le32(arg->txop); 3512 params->acm = __cpu_to_le32(arg->acm); 3513 params->no_ack = __cpu_to_le32(arg->no_ack); 3514 } 3515 3516 int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 3517 const struct wmi_pdev_set_wmm_params_arg *arg) 3518 { 3519 struct wmi_pdev_set_wmm_params *cmd; 3520 struct sk_buff *skb; 3521 3522 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3523 if (!skb) 3524 return -ENOMEM; 3525 3526 cmd = (struct wmi_pdev_set_wmm_params *)skb->data; 3527 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be); 3528 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); 3529 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); 3530 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); 3531 3532 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); 3533 return ath10k_wmi_cmd_send(ar, skb, 3534 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 3535 } 3536 3537 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) 3538 { 3539 struct wmi_request_stats_cmd *cmd; 3540 struct sk_buff *skb; 3541 3542 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3543 if (!skb) 3544 return -ENOMEM; 3545 3546 cmd = (struct wmi_request_stats_cmd *)skb->data; 3547 cmd->stats_id = __cpu_to_le32(stats_id); 3548 3549 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); 3550 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 3551 } 3552 3553 int ath10k_wmi_force_fw_hang(struct ath10k *ar, 3554 enum wmi_force_fw_hang_type type, u32 delay_ms) 3555 { 3556 struct wmi_force_fw_hang_cmd *cmd; 3557 struct sk_buff *skb; 3558 3559 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3560 if (!skb) 3561 return -ENOMEM; 3562 3563 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 3564 cmd->type = __cpu_to_le32(type); 3565 cmd->delay_ms = __cpu_to_le32(delay_ms); 3566 3567 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", 3568 type, delay_ms); 3569 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 3570 } 3571 3572 int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) 3573 { 3574 struct wmi_dbglog_cfg_cmd *cmd; 3575 struct sk_buff *skb; 3576 u32 cfg; 3577 3578 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3579 if (!skb) 3580 return -ENOMEM; 3581 3582 cmd = (struct wmi_dbglog_cfg_cmd *)skb->data; 3583 3584 if (module_enable) { 3585 cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE, 3586 ATH10K_DBGLOG_CFG_LOG_LVL); 3587 } else { 3588 /* set back defaults, all modules with WARN level */ 3589 cfg = SM(ATH10K_DBGLOG_LEVEL_WARN, 3590 ATH10K_DBGLOG_CFG_LOG_LVL); 3591 module_enable = ~0; 3592 } 3593 3594 cmd->module_enable = __cpu_to_le32(module_enable); 3595 cmd->module_valid = __cpu_to_le32(~0); 3596 cmd->config_enable = __cpu_to_le32(cfg); 3597 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); 3598 3599 ath10k_dbg(ATH10K_DBG_WMI, 3600 "wmi dbglog cfg modules %08x %08x config %08x %08x\n", 3601 __le32_to_cpu(cmd->module_enable), 3602 __le32_to_cpu(cmd->module_valid), 3603 __le32_to_cpu(cmd->config_enable), 3604 __le32_to_cpu(cmd->config_valid)); 3605 3606 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 3607 } 3608