1 /* 2 * Copyright (c) 2010-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "htc.h" 18 19 static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd) 20 { 21 switch (wmi_cmd) { 22 case WMI_ECHO_CMDID: 23 return "WMI_ECHO_CMDID"; 24 case WMI_ACCESS_MEMORY_CMDID: 25 return "WMI_ACCESS_MEMORY_CMDID"; 26 case WMI_GET_FW_VERSION: 27 return "WMI_GET_FW_VERSION"; 28 case WMI_DISABLE_INTR_CMDID: 29 return "WMI_DISABLE_INTR_CMDID"; 30 case WMI_ENABLE_INTR_CMDID: 31 return "WMI_ENABLE_INTR_CMDID"; 32 case WMI_ATH_INIT_CMDID: 33 return "WMI_ATH_INIT_CMDID"; 34 case WMI_ABORT_TXQ_CMDID: 35 return "WMI_ABORT_TXQ_CMDID"; 36 case WMI_STOP_TX_DMA_CMDID: 37 return "WMI_STOP_TX_DMA_CMDID"; 38 case WMI_ABORT_TX_DMA_CMDID: 39 return "WMI_ABORT_TX_DMA_CMDID"; 40 case WMI_DRAIN_TXQ_CMDID: 41 return "WMI_DRAIN_TXQ_CMDID"; 42 case WMI_DRAIN_TXQ_ALL_CMDID: 43 return "WMI_DRAIN_TXQ_ALL_CMDID"; 44 case WMI_START_RECV_CMDID: 45 return "WMI_START_RECV_CMDID"; 46 case WMI_STOP_RECV_CMDID: 47 return "WMI_STOP_RECV_CMDID"; 48 case WMI_FLUSH_RECV_CMDID: 49 return "WMI_FLUSH_RECV_CMDID"; 50 case WMI_SET_MODE_CMDID: 51 return "WMI_SET_MODE_CMDID"; 52 case WMI_NODE_CREATE_CMDID: 53 return "WMI_NODE_CREATE_CMDID"; 54 case WMI_NODE_REMOVE_CMDID: 55 return "WMI_NODE_REMOVE_CMDID"; 56 case WMI_VAP_REMOVE_CMDID: 57 return "WMI_VAP_REMOVE_CMDID"; 58 case WMI_VAP_CREATE_CMDID: 59 return "WMI_VAP_CREATE_CMDID"; 60 case WMI_REG_READ_CMDID: 61 return "WMI_REG_READ_CMDID"; 62 case WMI_REG_WRITE_CMDID: 63 return "WMI_REG_WRITE_CMDID"; 64 case WMI_REG_RMW_CMDID: 65 return "WMI_REG_RMW_CMDID"; 66 case WMI_RC_STATE_CHANGE_CMDID: 67 return "WMI_RC_STATE_CHANGE_CMDID"; 68 case WMI_RC_RATE_UPDATE_CMDID: 69 return "WMI_RC_RATE_UPDATE_CMDID"; 70 case WMI_TARGET_IC_UPDATE_CMDID: 71 return "WMI_TARGET_IC_UPDATE_CMDID"; 72 case WMI_TX_AGGR_ENABLE_CMDID: 73 return "WMI_TX_AGGR_ENABLE_CMDID"; 74 case WMI_TGT_DETACH_CMDID: 75 return "WMI_TGT_DETACH_CMDID"; 76 case WMI_NODE_UPDATE_CMDID: 77 return "WMI_NODE_UPDATE_CMDID"; 78 case WMI_INT_STATS_CMDID: 79 return "WMI_INT_STATS_CMDID"; 80 case WMI_TX_STATS_CMDID: 81 return "WMI_TX_STATS_CMDID"; 82 case WMI_RX_STATS_CMDID: 83 return "WMI_RX_STATS_CMDID"; 84 case WMI_BITRATE_MASK_CMDID: 85 return "WMI_BITRATE_MASK_CMDID"; 86 } 87 88 return "Bogus"; 89 } 90 91 struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) 92 { 93 struct wmi *wmi; 94 95 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL); 96 if (!wmi) 97 return NULL; 98 99 wmi->drv_priv = priv; 100 wmi->stopped = false; 101 skb_queue_head_init(&wmi->wmi_event_queue); 102 spin_lock_init(&wmi->wmi_lock); 103 spin_lock_init(&wmi->event_lock); 104 mutex_init(&wmi->op_mutex); 105 mutex_init(&wmi->multi_write_mutex); 106 mutex_init(&wmi->multi_rmw_mutex); 107 init_completion(&wmi->cmd_wait); 108 INIT_LIST_HEAD(&wmi->pending_tx_events); 109 tasklet_setup(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet); 110 111 return wmi; 112 } 113 114 void ath9k_stop_wmi(struct ath9k_htc_priv *priv) 115 { 116 struct wmi *wmi = priv->wmi; 117 118 mutex_lock(&wmi->op_mutex); 119 wmi->stopped = true; 120 mutex_unlock(&wmi->op_mutex); 121 } 122 123 void ath9k_destroy_wmi(struct ath9k_htc_priv *priv) 124 { 125 kfree(priv->wmi); 126 } 127 128 void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv) 129 { 130 unsigned long flags; 131 132 tasklet_kill(&priv->wmi->wmi_event_tasklet); 133 spin_lock_irqsave(&priv->wmi->wmi_lock, flags); 134 __skb_queue_purge(&priv->wmi->wmi_event_queue); 135 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags); 136 } 137 138 void ath9k_wmi_event_tasklet(struct tasklet_struct *t) 139 { 140 struct wmi *wmi = from_tasklet(wmi, t, wmi_event_tasklet); 141 struct ath9k_htc_priv *priv = wmi->drv_priv; 142 struct wmi_cmd_hdr *hdr; 143 void *wmi_event; 144 struct wmi_event_swba *swba; 145 struct sk_buff *skb = NULL; 146 unsigned long flags; 147 u16 cmd_id; 148 149 do { 150 spin_lock_irqsave(&wmi->wmi_lock, flags); 151 skb = __skb_dequeue(&wmi->wmi_event_queue); 152 if (!skb) { 153 spin_unlock_irqrestore(&wmi->wmi_lock, flags); 154 return; 155 } 156 spin_unlock_irqrestore(&wmi->wmi_lock, flags); 157 158 hdr = (struct wmi_cmd_hdr *) skb->data; 159 cmd_id = be16_to_cpu(hdr->command_id); 160 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 161 162 switch (cmd_id) { 163 case WMI_SWBA_EVENTID: 164 swba = wmi_event; 165 ath9k_htc_swba(priv, swba); 166 break; 167 case WMI_FATAL_EVENTID: 168 ieee80211_queue_work(wmi->drv_priv->hw, 169 &wmi->drv_priv->fatal_work); 170 break; 171 case WMI_TXSTATUS_EVENTID: 172 /* Check if ath9k_tx_init() completed. */ 173 if (!data_race(priv->tx.initialized)) 174 break; 175 176 spin_lock_bh(&priv->tx.tx_lock); 177 if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { 178 spin_unlock_bh(&priv->tx.tx_lock); 179 break; 180 } 181 spin_unlock_bh(&priv->tx.tx_lock); 182 183 ath9k_htc_txstatus(priv, wmi_event); 184 break; 185 default: 186 break; 187 } 188 189 kfree_skb(skb); 190 } while (1); 191 } 192 193 void ath9k_fatal_work(struct work_struct *work) 194 { 195 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, 196 fatal_work); 197 struct ath_common *common = ath9k_hw_common(priv->ah); 198 199 ath_dbg(common, FATAL, "FATAL Event received, resetting device\n"); 200 ath9k_htc_reset(priv); 201 } 202 203 static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb) 204 { 205 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 206 207 if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0) 208 memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len); 209 210 complete(&wmi->cmd_wait); 211 } 212 213 static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, 214 enum htc_endpoint_id epid) 215 { 216 struct wmi *wmi = priv; 217 struct wmi_cmd_hdr *hdr; 218 unsigned long flags; 219 u16 cmd_id; 220 221 if (unlikely(wmi->stopped)) 222 goto free_skb; 223 224 hdr = (struct wmi_cmd_hdr *) skb->data; 225 cmd_id = be16_to_cpu(hdr->command_id); 226 227 if (cmd_id & 0x1000) { 228 spin_lock_irqsave(&wmi->wmi_lock, flags); 229 __skb_queue_tail(&wmi->wmi_event_queue, skb); 230 spin_unlock_irqrestore(&wmi->wmi_lock, flags); 231 tasklet_schedule(&wmi->wmi_event_tasklet); 232 return; 233 } 234 235 /* Check if there has been a timeout. */ 236 spin_lock_irqsave(&wmi->wmi_lock, flags); 237 if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) { 238 spin_unlock_irqrestore(&wmi->wmi_lock, flags); 239 goto free_skb; 240 } 241 spin_unlock_irqrestore(&wmi->wmi_lock, flags); 242 243 /* WMI command response */ 244 ath9k_wmi_rsp_callback(wmi, skb); 245 246 free_skb: 247 kfree_skb(skb); 248 } 249 250 static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb, 251 enum htc_endpoint_id epid, bool txok) 252 { 253 kfree_skb(skb); 254 } 255 256 int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, 257 enum htc_endpoint_id *wmi_ctrl_epid) 258 { 259 struct htc_service_connreq connect; 260 int ret; 261 262 wmi->htc = htc; 263 264 memset(&connect, 0, sizeof(connect)); 265 266 connect.ep_callbacks.priv = wmi; 267 connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx; 268 connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx; 269 connect.service_id = WMI_CONTROL_SVC; 270 271 ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid); 272 if (ret) 273 return ret; 274 275 *wmi_ctrl_epid = wmi->ctrl_epid; 276 277 return 0; 278 } 279 280 static int ath9k_wmi_cmd_issue(struct wmi *wmi, 281 struct sk_buff *skb, 282 enum wmi_cmd_id cmd, u16 len) 283 { 284 struct wmi_cmd_hdr *hdr; 285 unsigned long flags; 286 287 hdr = skb_push(skb, sizeof(struct wmi_cmd_hdr)); 288 hdr->command_id = cpu_to_be16(cmd); 289 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id); 290 291 spin_lock_irqsave(&wmi->wmi_lock, flags); 292 wmi->last_seq_id = wmi->tx_seq_id; 293 spin_unlock_irqrestore(&wmi->wmi_lock, flags); 294 295 return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid); 296 } 297 298 int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, 299 u8 *cmd_buf, u32 cmd_len, 300 u8 *rsp_buf, u32 rsp_len, 301 u32 timeout) 302 { 303 struct ath_hw *ah = wmi->drv_priv->ah; 304 struct ath_common *common = ath9k_hw_common(ah); 305 u16 headroom = sizeof(struct htc_frame_hdr) + 306 sizeof(struct wmi_cmd_hdr); 307 struct sk_buff *skb; 308 unsigned long time_left; 309 int ret = 0; 310 311 if (ah->ah_flags & AH_UNPLUGGED) 312 return 0; 313 314 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC); 315 if (!skb) 316 return -ENOMEM; 317 318 skb_reserve(skb, headroom); 319 320 if (cmd_len != 0 && cmd_buf != NULL) { 321 skb_put_data(skb, cmd_buf, cmd_len); 322 } 323 324 mutex_lock(&wmi->op_mutex); 325 326 /* check if wmi stopped flag is set */ 327 if (unlikely(wmi->stopped)) { 328 ret = -EPROTO; 329 goto out; 330 } 331 332 /* record the rsp buffer and length */ 333 wmi->cmd_rsp_buf = rsp_buf; 334 wmi->cmd_rsp_len = rsp_len; 335 336 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len); 337 if (ret) 338 goto out; 339 340 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout); 341 if (!time_left) { 342 ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", 343 wmi_cmd_to_name(cmd_id)); 344 mutex_unlock(&wmi->op_mutex); 345 return -ETIMEDOUT; 346 } 347 348 mutex_unlock(&wmi->op_mutex); 349 350 return 0; 351 352 out: 353 ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); 354 mutex_unlock(&wmi->op_mutex); 355 kfree_skb(skb); 356 357 return ret; 358 } 359