1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/skbuff.h> 19 20 #include "core.h" 21 #include "htc.h" 22 #include "debug.h" 23 #include "wmi.h" 24 #include "mac.h" 25 26 void ath10k_wmi_flush_tx(struct ath10k *ar) 27 { 28 int ret; 29 30 lockdep_assert_held(&ar->conf_mutex); 31 32 if (ar->state == ATH10K_STATE_WEDGED) { 33 ath10k_warn("wmi flush skipped - device is wedged anyway\n"); 34 return; 35 } 36 37 ret = wait_event_timeout(ar->wmi.wq, 38 atomic_read(&ar->wmi.pending_tx_count) == 0, 39 5*HZ); 40 if (atomic_read(&ar->wmi.pending_tx_count) == 0) 41 return; 42 43 if (ret == 0) 44 ret = -ETIMEDOUT; 45 46 if (ret < 0) 47 ath10k_warn("wmi flush failed (%d)\n", ret); 48 } 49 50 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) 51 { 52 int ret; 53 ret = wait_for_completion_timeout(&ar->wmi.service_ready, 54 WMI_SERVICE_READY_TIMEOUT_HZ); 55 return ret; 56 } 57 58 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) 59 { 60 int ret; 61 ret = wait_for_completion_timeout(&ar->wmi.unified_ready, 62 WMI_UNIFIED_READY_TIMEOUT_HZ); 63 return ret; 64 } 65 66 static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) 67 { 68 struct sk_buff *skb; 69 u32 round_len = roundup(len, 4); 70 71 skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); 72 if (!skb) 73 return NULL; 74 75 skb_reserve(skb, WMI_SKB_HEADROOM); 76 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 77 ath10k_warn("Unaligned WMI skb\n"); 78 79 skb_put(skb, round_len); 80 memset(skb->data, 0, round_len); 81 82 return skb; 83 } 84 85 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 86 { 87 dev_kfree_skb(skb); 88 89 if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0) 90 wake_up(&ar->wmi.wq); 91 } 92 93 /* WMI command API */ 94 static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, 95 enum wmi_cmd_id cmd_id) 96 { 97 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 98 struct wmi_cmd_hdr *cmd_hdr; 99 int status; 100 u32 cmd = 0; 101 102 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 103 return -ENOMEM; 104 105 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID); 106 107 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 108 cmd_hdr->cmd_id = __cpu_to_le32(cmd); 109 110 if (atomic_add_return(1, &ar->wmi.pending_tx_count) > 111 WMI_MAX_PENDING_TX_COUNT) { 112 /* avoid using up memory when FW hangs */ 113 atomic_dec(&ar->wmi.pending_tx_count); 114 return -EBUSY; 115 } 116 117 memset(skb_cb, 0, sizeof(*skb_cb)); 118 119 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); 120 121 status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); 122 if (status) { 123 dev_kfree_skb_any(skb); 124 atomic_dec(&ar->wmi.pending_tx_count); 125 return status; 126 } 127 128 return 0; 129 } 130 131 static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) 132 { 133 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; 134 enum wmi_scan_event_type event_type; 135 enum wmi_scan_completion_reason reason; 136 u32 freq; 137 u32 req_id; 138 u32 scan_id; 139 u32 vdev_id; 140 141 event_type = __le32_to_cpu(event->event_type); 142 reason = __le32_to_cpu(event->reason); 143 freq = __le32_to_cpu(event->channel_freq); 144 req_id = __le32_to_cpu(event->scan_req_id); 145 scan_id = __le32_to_cpu(event->scan_id); 146 vdev_id = __le32_to_cpu(event->vdev_id); 147 148 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n"); 149 ath10k_dbg(ATH10K_DBG_WMI, 150 "scan event type %d reason %d freq %d req_id %d " 151 "scan_id %d vdev_id %d\n", 152 event_type, reason, freq, req_id, scan_id, vdev_id); 153 154 spin_lock_bh(&ar->data_lock); 155 156 switch (event_type) { 157 case WMI_SCAN_EVENT_STARTED: 158 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); 159 if (ar->scan.in_progress && ar->scan.is_roc) 160 ieee80211_ready_on_channel(ar->hw); 161 162 complete(&ar->scan.started); 163 break; 164 case WMI_SCAN_EVENT_COMPLETED: 165 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); 166 switch (reason) { 167 case WMI_SCAN_REASON_COMPLETED: 168 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n"); 169 break; 170 case WMI_SCAN_REASON_CANCELLED: 171 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n"); 172 break; 173 case WMI_SCAN_REASON_PREEMPTED: 174 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n"); 175 break; 176 case WMI_SCAN_REASON_TIMEDOUT: 177 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n"); 178 break; 179 default: 180 break; 181 } 182 183 ar->scan_channel = NULL; 184 if (!ar->scan.in_progress) { 185 ath10k_warn("no scan requested, ignoring\n"); 186 break; 187 } 188 189 if (ar->scan.is_roc) { 190 ath10k_offchan_tx_purge(ar); 191 192 if (!ar->scan.aborting) 193 ieee80211_remain_on_channel_expired(ar->hw); 194 } else { 195 ieee80211_scan_completed(ar->hw, ar->scan.aborting); 196 } 197 198 del_timer(&ar->scan.timeout); 199 complete_all(&ar->scan.completed); 200 ar->scan.in_progress = false; 201 break; 202 case WMI_SCAN_EVENT_BSS_CHANNEL: 203 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); 204 ar->scan_channel = NULL; 205 break; 206 case WMI_SCAN_EVENT_FOREIGN_CHANNEL: 207 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); 208 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); 209 if (ar->scan.in_progress && ar->scan.is_roc && 210 ar->scan.roc_freq == freq) { 211 complete(&ar->scan.on_channel); 212 } 213 break; 214 case WMI_SCAN_EVENT_DEQUEUED: 215 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n"); 216 break; 217 case WMI_SCAN_EVENT_PREEMPTED: 218 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n"); 219 break; 220 case WMI_SCAN_EVENT_START_FAILED: 221 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); 222 break; 223 default: 224 break; 225 } 226 227 spin_unlock_bh(&ar->data_lock); 228 return 0; 229 } 230 231 static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) 232 { 233 enum ieee80211_band band; 234 235 switch (phy_mode) { 236 case MODE_11A: 237 case MODE_11NA_HT20: 238 case MODE_11NA_HT40: 239 case MODE_11AC_VHT20: 240 case MODE_11AC_VHT40: 241 case MODE_11AC_VHT80: 242 band = IEEE80211_BAND_5GHZ; 243 break; 244 case MODE_11G: 245 case MODE_11B: 246 case MODE_11GONLY: 247 case MODE_11NG_HT20: 248 case MODE_11NG_HT40: 249 case MODE_11AC_VHT20_2G: 250 case MODE_11AC_VHT40_2G: 251 case MODE_11AC_VHT80_2G: 252 default: 253 band = IEEE80211_BAND_2GHZ; 254 } 255 256 return band; 257 } 258 259 static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) 260 { 261 u8 rate_idx = 0; 262 263 /* rate in Kbps */ 264 switch (rate) { 265 case 1000: 266 rate_idx = 0; 267 break; 268 case 2000: 269 rate_idx = 1; 270 break; 271 case 5500: 272 rate_idx = 2; 273 break; 274 case 11000: 275 rate_idx = 3; 276 break; 277 case 6000: 278 rate_idx = 4; 279 break; 280 case 9000: 281 rate_idx = 5; 282 break; 283 case 12000: 284 rate_idx = 6; 285 break; 286 case 18000: 287 rate_idx = 7; 288 break; 289 case 24000: 290 rate_idx = 8; 291 break; 292 case 36000: 293 rate_idx = 9; 294 break; 295 case 48000: 296 rate_idx = 10; 297 break; 298 case 54000: 299 rate_idx = 11; 300 break; 301 default: 302 break; 303 } 304 305 if (band == IEEE80211_BAND_5GHZ) { 306 if (rate_idx > 3) 307 /* Omit CCK rates */ 308 rate_idx -= 4; 309 else 310 rate_idx = 0; 311 } 312 313 return rate_idx; 314 } 315 316 static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) 317 { 318 struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data; 319 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 320 struct ieee80211_hdr *hdr; 321 u32 rx_status; 322 u32 channel; 323 u32 phy_mode; 324 u32 snr; 325 u32 rate; 326 u32 buf_len; 327 u16 fc; 328 329 channel = __le32_to_cpu(event->hdr.channel); 330 buf_len = __le32_to_cpu(event->hdr.buf_len); 331 rx_status = __le32_to_cpu(event->hdr.status); 332 snr = __le32_to_cpu(event->hdr.snr); 333 phy_mode = __le32_to_cpu(event->hdr.phy_mode); 334 rate = __le32_to_cpu(event->hdr.rate); 335 336 memset(status, 0, sizeof(*status)); 337 338 ath10k_dbg(ATH10K_DBG_MGMT, 339 "event mgmt rx status %08x\n", rx_status); 340 341 if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { 342 dev_kfree_skb(skb); 343 return 0; 344 } 345 346 if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) { 347 dev_kfree_skb(skb); 348 return 0; 349 } 350 351 if (rx_status & WMI_RX_STATUS_ERR_CRC) 352 status->flag |= RX_FLAG_FAILED_FCS_CRC; 353 if (rx_status & WMI_RX_STATUS_ERR_MIC) 354 status->flag |= RX_FLAG_MMIC_ERROR; 355 356 status->band = phy_mode_to_band(phy_mode); 357 status->freq = ieee80211_channel_to_frequency(channel, status->band); 358 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; 359 status->rate_idx = get_rate_idx(rate, status->band); 360 361 skb_pull(skb, sizeof(event->hdr)); 362 363 hdr = (struct ieee80211_hdr *)skb->data; 364 fc = le16_to_cpu(hdr->frame_control); 365 366 if (fc & IEEE80211_FCTL_PROTECTED) { 367 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | 368 RX_FLAG_MMIC_STRIPPED; 369 hdr->frame_control = __cpu_to_le16(fc & 370 ~IEEE80211_FCTL_PROTECTED); 371 } 372 373 ath10k_dbg(ATH10K_DBG_MGMT, 374 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 375 skb, skb->len, 376 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 377 378 ath10k_dbg(ATH10K_DBG_MGMT, 379 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 380 status->freq, status->band, status->signal, 381 status->rate_idx); 382 383 /* 384 * packets from HTC come aligned to 4byte boundaries 385 * because they can originally come in along with a trailer 386 */ 387 skb_trim(skb, buf_len); 388 389 ieee80211_rx(ar->hw, skb); 390 return 0; 391 } 392 393 static int freq_to_idx(struct ath10k *ar, int freq) 394 { 395 struct ieee80211_supported_band *sband; 396 int band, ch, idx = 0; 397 398 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 399 sband = ar->hw->wiphy->bands[band]; 400 if (!sband) 401 continue; 402 403 for (ch = 0; ch < sband->n_channels; ch++, idx++) 404 if (sband->channels[ch].center_freq == freq) 405 goto exit; 406 } 407 408 exit: 409 return idx; 410 } 411 412 static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) 413 { 414 struct wmi_chan_info_event *ev; 415 struct survey_info *survey; 416 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count; 417 int idx; 418 419 ev = (struct wmi_chan_info_event *)skb->data; 420 421 err_code = __le32_to_cpu(ev->err_code); 422 freq = __le32_to_cpu(ev->freq); 423 cmd_flags = __le32_to_cpu(ev->cmd_flags); 424 noise_floor = __le32_to_cpu(ev->noise_floor); 425 rx_clear_count = __le32_to_cpu(ev->rx_clear_count); 426 cycle_count = __le32_to_cpu(ev->cycle_count); 427 428 ath10k_dbg(ATH10K_DBG_WMI, 429 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", 430 err_code, freq, cmd_flags, noise_floor, rx_clear_count, 431 cycle_count); 432 433 spin_lock_bh(&ar->data_lock); 434 435 if (!ar->scan.in_progress) { 436 ath10k_warn("chan info event without a scan request?\n"); 437 goto exit; 438 } 439 440 idx = freq_to_idx(ar, freq); 441 if (idx >= ARRAY_SIZE(ar->survey)) { 442 ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n", 443 freq, idx); 444 goto exit; 445 } 446 447 if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { 448 /* During scanning chan info is reported twice for each 449 * visited channel. The reported cycle count is global 450 * and per-channel cycle count must be calculated */ 451 452 cycle_count -= ar->survey_last_cycle_count; 453 rx_clear_count -= ar->survey_last_rx_clear_count; 454 455 survey = &ar->survey[idx]; 456 survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count); 457 survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count); 458 survey->noise = noise_floor; 459 survey->filled = SURVEY_INFO_CHANNEL_TIME | 460 SURVEY_INFO_CHANNEL_TIME_RX | 461 SURVEY_INFO_NOISE_DBM; 462 } 463 464 ar->survey_last_rx_clear_count = rx_clear_count; 465 ar->survey_last_cycle_count = cycle_count; 466 467 exit: 468 spin_unlock_bh(&ar->data_lock); 469 } 470 471 static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) 472 { 473 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); 474 } 475 476 static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) 477 { 478 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n"); 479 } 480 481 static void ath10k_wmi_event_update_stats(struct ath10k *ar, 482 struct sk_buff *skb) 483 { 484 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; 485 486 ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); 487 488 ath10k_debug_read_target_stats(ar, ev); 489 } 490 491 static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, 492 struct sk_buff *skb) 493 { 494 struct wmi_vdev_start_response_event *ev; 495 496 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); 497 498 ev = (struct wmi_vdev_start_response_event *)skb->data; 499 500 if (WARN_ON(__le32_to_cpu(ev->status))) 501 return; 502 503 complete(&ar->vdev_setup_done); 504 } 505 506 static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, 507 struct sk_buff *skb) 508 { 509 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); 510 complete(&ar->vdev_setup_done); 511 } 512 513 static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, 514 struct sk_buff *skb) 515 { 516 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n"); 517 } 518 519 /* 520 * FIXME 521 * 522 * We don't report to mac80211 sleep state of connected 523 * stations. Due to this mac80211 can't fill in TIM IE 524 * correctly. 525 * 526 * I know of no way of getting nullfunc frames that contain 527 * sleep transition from connected stations - these do not 528 * seem to be sent from the target to the host. There also 529 * doesn't seem to be a dedicated event for that. So the 530 * only way left to do this would be to read tim_bitmap 531 * during SWBA. 532 * 533 * We could probably try using tim_bitmap from SWBA to tell 534 * mac80211 which stations are asleep and which are not. The 535 * problem here is calling mac80211 functions so many times 536 * could take too long and make us miss the time to submit 537 * the beacon to the target. 538 * 539 * So as a workaround we try to extend the TIM IE if there 540 * is unicast buffered for stations with aid > 7 and fill it 541 * in ourselves. 542 */ 543 static void ath10k_wmi_update_tim(struct ath10k *ar, 544 struct ath10k_vif *arvif, 545 struct sk_buff *bcn, 546 struct wmi_bcn_info *bcn_info) 547 { 548 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; 549 struct ieee80211_tim_ie *tim; 550 u8 *ies, *ie; 551 u8 ie_len, pvm_len; 552 553 /* if next SWBA has no tim_changed the tim_bitmap is garbage. 554 * we must copy the bitmap upon change and reuse it later */ 555 if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) { 556 int i; 557 558 BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != 559 sizeof(bcn_info->tim_info.tim_bitmap)); 560 561 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { 562 __le32 t = bcn_info->tim_info.tim_bitmap[i / 4]; 563 u32 v = __le32_to_cpu(t); 564 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; 565 } 566 567 /* FW reports either length 0 or 16 568 * so we calculate this on our own */ 569 arvif->u.ap.tim_len = 0; 570 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) 571 if (arvif->u.ap.tim_bitmap[i]) 572 arvif->u.ap.tim_len = i; 573 574 arvif->u.ap.tim_len++; 575 } 576 577 ies = bcn->data; 578 ies += ieee80211_hdrlen(hdr->frame_control); 579 ies += 12; /* fixed parameters */ 580 581 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, 582 (u8 *)skb_tail_pointer(bcn) - ies); 583 if (!ie) { 584 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 585 ath10k_warn("no tim ie found;\n"); 586 return; 587 } 588 589 tim = (void *)ie + 2; 590 ie_len = ie[1]; 591 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ 592 593 if (pvm_len < arvif->u.ap.tim_len) { 594 int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len; 595 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); 596 void *next_ie = ie + 2 + ie_len; 597 598 if (skb_put(bcn, expand_size)) { 599 memmove(next_ie + expand_size, next_ie, move_size); 600 601 ie[1] += expand_size; 602 ie_len += expand_size; 603 pvm_len += expand_size; 604 } else { 605 ath10k_warn("tim expansion failed\n"); 606 } 607 } 608 609 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { 610 ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); 611 return; 612 } 613 614 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast); 615 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); 616 617 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", 618 tim->dtim_count, tim->dtim_period, 619 tim->bitmap_ctrl, pvm_len); 620 } 621 622 static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len, 623 struct wmi_p2p_noa_info *noa) 624 { 625 struct ieee80211_p2p_noa_attr *noa_attr; 626 u8 ctwindow_oppps = noa->ctwindow_oppps; 627 u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET; 628 bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT); 629 __le16 *noa_attr_len; 630 u16 attr_len; 631 u8 noa_descriptors = noa->num_descriptors; 632 int i; 633 634 /* P2P IE */ 635 data[0] = WLAN_EID_VENDOR_SPECIFIC; 636 data[1] = len - 2; 637 data[2] = (WLAN_OUI_WFA >> 16) & 0xff; 638 data[3] = (WLAN_OUI_WFA >> 8) & 0xff; 639 data[4] = (WLAN_OUI_WFA >> 0) & 0xff; 640 data[5] = WLAN_OUI_TYPE_WFA_P2P; 641 642 /* NOA ATTR */ 643 data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; 644 noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ 645 noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; 646 647 noa_attr->index = noa->index; 648 noa_attr->oppps_ctwindow = ctwindow; 649 if (oppps) 650 noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; 651 652 for (i = 0; i < noa_descriptors; i++) { 653 noa_attr->desc[i].count = 654 __le32_to_cpu(noa->descriptors[i].type_count); 655 noa_attr->desc[i].duration = noa->descriptors[i].duration; 656 noa_attr->desc[i].interval = noa->descriptors[i].interval; 657 noa_attr->desc[i].start_time = noa->descriptors[i].start_time; 658 } 659 660 attr_len = 2; /* index + oppps_ctwindow */ 661 attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); 662 *noa_attr_len = __cpu_to_le16(attr_len); 663 } 664 665 static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa) 666 { 667 u32 len = 0; 668 u8 noa_descriptors = noa->num_descriptors; 669 u8 opp_ps_info = noa->ctwindow_oppps; 670 bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT); 671 672 673 if (!noa_descriptors && !opps_enabled) 674 return len; 675 676 len += 1 + 1 + 4; /* EID + len + OUI */ 677 len += 1 + 2; /* noa attr + attr len */ 678 len += 1 + 1; /* index + oppps_ctwindow */ 679 len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); 680 681 return len; 682 } 683 684 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, 685 struct sk_buff *bcn, 686 struct wmi_bcn_info *bcn_info) 687 { 688 struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info; 689 u8 *new_data, *old_data = arvif->u.ap.noa_data; 690 u32 new_len; 691 692 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 693 return; 694 695 ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); 696 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { 697 new_len = ath10k_p2p_calc_noa_ie_len(noa); 698 if (!new_len) 699 goto cleanup; 700 701 new_data = kmalloc(new_len, GFP_ATOMIC); 702 if (!new_data) 703 goto cleanup; 704 705 ath10k_p2p_fill_noa_ie(new_data, new_len, noa); 706 707 spin_lock_bh(&ar->data_lock); 708 arvif->u.ap.noa_data = new_data; 709 arvif->u.ap.noa_len = new_len; 710 spin_unlock_bh(&ar->data_lock); 711 kfree(old_data); 712 } 713 714 if (arvif->u.ap.noa_data) 715 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) 716 memcpy(skb_put(bcn, arvif->u.ap.noa_len), 717 arvif->u.ap.noa_data, 718 arvif->u.ap.noa_len); 719 return; 720 721 cleanup: 722 spin_lock_bh(&ar->data_lock); 723 arvif->u.ap.noa_data = NULL; 724 arvif->u.ap.noa_len = 0; 725 spin_unlock_bh(&ar->data_lock); 726 kfree(old_data); 727 } 728 729 730 static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) 731 { 732 struct wmi_host_swba_event *ev; 733 u32 map; 734 int i = -1; 735 struct wmi_bcn_info *bcn_info; 736 struct ath10k_vif *arvif; 737 struct wmi_bcn_tx_arg arg; 738 struct sk_buff *bcn; 739 int vdev_id = 0; 740 int ret; 741 742 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); 743 744 ev = (struct wmi_host_swba_event *)skb->data; 745 map = __le32_to_cpu(ev->vdev_map); 746 747 ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" 748 "-vdev map 0x%x\n", 749 ev->vdev_map); 750 751 for (; map; map >>= 1, vdev_id++) { 752 if (!(map & 0x1)) 753 continue; 754 755 i++; 756 757 if (i >= WMI_MAX_AP_VDEV) { 758 ath10k_warn("swba has corrupted vdev map\n"); 759 break; 760 } 761 762 bcn_info = &ev->bcn_info[i]; 763 764 ath10k_dbg(ATH10K_DBG_MGMT, 765 "-bcn_info[%d]:\n" 766 "--tim_len %d\n" 767 "--tim_mcast %d\n" 768 "--tim_changed %d\n" 769 "--tim_num_ps_pending %d\n" 770 "--tim_bitmap 0x%08x%08x%08x%08x\n", 771 i, 772 __le32_to_cpu(bcn_info->tim_info.tim_len), 773 __le32_to_cpu(bcn_info->tim_info.tim_mcast), 774 __le32_to_cpu(bcn_info->tim_info.tim_changed), 775 __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending), 776 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]), 777 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]), 778 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]), 779 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0])); 780 781 arvif = ath10k_get_arvif(ar, vdev_id); 782 if (arvif == NULL) { 783 ath10k_warn("no vif for vdev_id %d found\n", vdev_id); 784 continue; 785 } 786 787 bcn = ieee80211_beacon_get(ar->hw, arvif->vif); 788 if (!bcn) { 789 ath10k_warn("could not get mac80211 beacon\n"); 790 continue; 791 } 792 793 ath10k_tx_h_seq_no(bcn); 794 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); 795 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); 796 797 arg.vdev_id = arvif->vdev_id; 798 arg.tx_rate = 0; 799 arg.tx_power = 0; 800 arg.bcn = bcn->data; 801 arg.bcn_len = bcn->len; 802 803 ret = ath10k_wmi_beacon_send(ar, &arg); 804 if (ret) 805 ath10k_warn("could not send beacon (%d)\n", ret); 806 807 dev_kfree_skb_any(bcn); 808 } 809 } 810 811 static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, 812 struct sk_buff *skb) 813 { 814 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); 815 } 816 817 static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) 818 { 819 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n"); 820 } 821 822 static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) 823 { 824 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); 825 } 826 827 static void ath10k_wmi_event_profile_match(struct ath10k *ar, 828 struct sk_buff *skb) 829 { 830 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); 831 } 832 833 static void ath10k_wmi_event_debug_print(struct ath10k *ar, 834 struct sk_buff *skb) 835 { 836 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n"); 837 } 838 839 static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) 840 { 841 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); 842 } 843 844 static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, 845 struct sk_buff *skb) 846 { 847 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); 848 } 849 850 static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, 851 struct sk_buff *skb) 852 { 853 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); 854 } 855 856 static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, 857 struct sk_buff *skb) 858 { 859 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); 860 } 861 862 static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, 863 struct sk_buff *skb) 864 { 865 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); 866 } 867 868 static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, 869 struct sk_buff *skb) 870 { 871 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); 872 } 873 874 static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, 875 struct sk_buff *skb) 876 { 877 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); 878 } 879 880 static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, 881 struct sk_buff *skb) 882 { 883 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); 884 } 885 886 static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, 887 struct sk_buff *skb) 888 { 889 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); 890 } 891 892 static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, 893 struct sk_buff *skb) 894 { 895 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); 896 } 897 898 static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, 899 struct sk_buff *skb) 900 { 901 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); 902 } 903 904 static void ath10k_wmi_event_delba_complete(struct ath10k *ar, 905 struct sk_buff *skb) 906 { 907 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); 908 } 909 910 static void ath10k_wmi_event_addba_complete(struct ath10k *ar, 911 struct sk_buff *skb) 912 { 913 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); 914 } 915 916 static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, 917 struct sk_buff *skb) 918 { 919 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); 920 } 921 922 static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, 923 struct sk_buff *skb) 924 { 925 struct wmi_service_ready_event *ev = (void *)skb->data; 926 927 if (skb->len < sizeof(*ev)) { 928 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 929 skb->len, sizeof(*ev)); 930 return; 931 } 932 933 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); 934 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); 935 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); 936 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); 937 ar->fw_version_major = 938 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; 939 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); 940 ar->fw_version_release = 941 (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; 942 ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); 943 ar->phy_capability = __le32_to_cpu(ev->phy_capability); 944 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 945 946 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 947 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 948 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 949 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 950 } 951 952 ar->ath_common.regulatory.current_rd = 953 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); 954 955 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, 956 sizeof(ev->wmi_service_bitmap)); 957 958 if (strlen(ar->hw->wiphy->fw_version) == 0) { 959 snprintf(ar->hw->wiphy->fw_version, 960 sizeof(ar->hw->wiphy->fw_version), 961 "%u.%u.%u.%u", 962 ar->fw_version_major, 963 ar->fw_version_minor, 964 ar->fw_version_release, 965 ar->fw_version_build); 966 } 967 968 /* FIXME: it probably should be better to support this */ 969 if (__le32_to_cpu(ev->num_mem_reqs) > 0) { 970 ath10k_warn("target requested %d memory chunks; ignoring\n", 971 __le32_to_cpu(ev->num_mem_reqs)); 972 } 973 974 ath10k_dbg(ATH10K_DBG_WMI, 975 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 976 __le32_to_cpu(ev->sw_version), 977 __le32_to_cpu(ev->sw_version_1), 978 __le32_to_cpu(ev->abi_version), 979 __le32_to_cpu(ev->phy_capability), 980 __le32_to_cpu(ev->ht_cap_info), 981 __le32_to_cpu(ev->vht_cap_info), 982 __le32_to_cpu(ev->vht_supp_mcs), 983 __le32_to_cpu(ev->sys_cap_info), 984 __le32_to_cpu(ev->num_mem_reqs), 985 __le32_to_cpu(ev->num_rf_chains)); 986 987 complete(&ar->wmi.service_ready); 988 } 989 990 static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) 991 { 992 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; 993 994 if (WARN_ON(skb->len < sizeof(*ev))) 995 return -EINVAL; 996 997 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); 998 999 ath10k_dbg(ATH10K_DBG_WMI, 1000 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n", 1001 __le32_to_cpu(ev->sw_version), 1002 __le32_to_cpu(ev->abi_version), 1003 ev->mac_addr.addr, 1004 __le32_to_cpu(ev->status)); 1005 1006 complete(&ar->wmi.unified_ready); 1007 return 0; 1008 } 1009 1010 static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) 1011 { 1012 struct wmi_cmd_hdr *cmd_hdr; 1013 enum wmi_event_id id; 1014 u16 len; 1015 1016 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 1017 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 1018 1019 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 1020 return; 1021 1022 len = skb->len; 1023 1024 trace_ath10k_wmi_event(id, skb->data, skb->len); 1025 1026 switch (id) { 1027 case WMI_MGMT_RX_EVENTID: 1028 ath10k_wmi_event_mgmt_rx(ar, skb); 1029 /* mgmt_rx() owns the skb now! */ 1030 return; 1031 case WMI_SCAN_EVENTID: 1032 ath10k_wmi_event_scan(ar, skb); 1033 break; 1034 case WMI_CHAN_INFO_EVENTID: 1035 ath10k_wmi_event_chan_info(ar, skb); 1036 break; 1037 case WMI_ECHO_EVENTID: 1038 ath10k_wmi_event_echo(ar, skb); 1039 break; 1040 case WMI_DEBUG_MESG_EVENTID: 1041 ath10k_wmi_event_debug_mesg(ar, skb); 1042 break; 1043 case WMI_UPDATE_STATS_EVENTID: 1044 ath10k_wmi_event_update_stats(ar, skb); 1045 break; 1046 case WMI_VDEV_START_RESP_EVENTID: 1047 ath10k_wmi_event_vdev_start_resp(ar, skb); 1048 break; 1049 case WMI_VDEV_STOPPED_EVENTID: 1050 ath10k_wmi_event_vdev_stopped(ar, skb); 1051 break; 1052 case WMI_PEER_STA_KICKOUT_EVENTID: 1053 ath10k_wmi_event_peer_sta_kickout(ar, skb); 1054 break; 1055 case WMI_HOST_SWBA_EVENTID: 1056 ath10k_wmi_event_host_swba(ar, skb); 1057 break; 1058 case WMI_TBTTOFFSET_UPDATE_EVENTID: 1059 ath10k_wmi_event_tbttoffset_update(ar, skb); 1060 break; 1061 case WMI_PHYERR_EVENTID: 1062 ath10k_wmi_event_phyerr(ar, skb); 1063 break; 1064 case WMI_ROAM_EVENTID: 1065 ath10k_wmi_event_roam(ar, skb); 1066 break; 1067 case WMI_PROFILE_MATCH: 1068 ath10k_wmi_event_profile_match(ar, skb); 1069 break; 1070 case WMI_DEBUG_PRINT_EVENTID: 1071 ath10k_wmi_event_debug_print(ar, skb); 1072 break; 1073 case WMI_PDEV_QVIT_EVENTID: 1074 ath10k_wmi_event_pdev_qvit(ar, skb); 1075 break; 1076 case WMI_WLAN_PROFILE_DATA_EVENTID: 1077 ath10k_wmi_event_wlan_profile_data(ar, skb); 1078 break; 1079 case WMI_RTT_MEASUREMENT_REPORT_EVENTID: 1080 ath10k_wmi_event_rtt_measurement_report(ar, skb); 1081 break; 1082 case WMI_TSF_MEASUREMENT_REPORT_EVENTID: 1083 ath10k_wmi_event_tsf_measurement_report(ar, skb); 1084 break; 1085 case WMI_RTT_ERROR_REPORT_EVENTID: 1086 ath10k_wmi_event_rtt_error_report(ar, skb); 1087 break; 1088 case WMI_WOW_WAKEUP_HOST_EVENTID: 1089 ath10k_wmi_event_wow_wakeup_host(ar, skb); 1090 break; 1091 case WMI_DCS_INTERFERENCE_EVENTID: 1092 ath10k_wmi_event_dcs_interference(ar, skb); 1093 break; 1094 case WMI_PDEV_TPC_CONFIG_EVENTID: 1095 ath10k_wmi_event_pdev_tpc_config(ar, skb); 1096 break; 1097 case WMI_PDEV_FTM_INTG_EVENTID: 1098 ath10k_wmi_event_pdev_ftm_intg(ar, skb); 1099 break; 1100 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 1101 ath10k_wmi_event_gtk_offload_status(ar, skb); 1102 break; 1103 case WMI_GTK_REKEY_FAIL_EVENTID: 1104 ath10k_wmi_event_gtk_rekey_fail(ar, skb); 1105 break; 1106 case WMI_TX_DELBA_COMPLETE_EVENTID: 1107 ath10k_wmi_event_delba_complete(ar, skb); 1108 break; 1109 case WMI_TX_ADDBA_COMPLETE_EVENTID: 1110 ath10k_wmi_event_addba_complete(ar, skb); 1111 break; 1112 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 1113 ath10k_wmi_event_vdev_install_key_complete(ar, skb); 1114 break; 1115 case WMI_SERVICE_READY_EVENTID: 1116 ath10k_wmi_service_ready_event_rx(ar, skb); 1117 break; 1118 case WMI_READY_EVENTID: 1119 ath10k_wmi_ready_event_rx(ar, skb); 1120 break; 1121 default: 1122 ath10k_warn("Unknown eventid: %d\n", id); 1123 break; 1124 } 1125 1126 dev_kfree_skb(skb); 1127 } 1128 1129 static void ath10k_wmi_event_work(struct work_struct *work) 1130 { 1131 struct ath10k *ar = container_of(work, struct ath10k, 1132 wmi.wmi_event_work); 1133 struct sk_buff *skb; 1134 1135 for (;;) { 1136 skb = skb_dequeue(&ar->wmi.wmi_event_list); 1137 if (!skb) 1138 break; 1139 1140 ath10k_wmi_event_process(ar, skb); 1141 } 1142 } 1143 1144 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) 1145 { 1146 struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 1147 enum wmi_event_id event_id; 1148 1149 event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 1150 1151 /* some events require to be handled ASAP 1152 * thus can't be defered to a worker thread */ 1153 switch (event_id) { 1154 case WMI_HOST_SWBA_EVENTID: 1155 case WMI_MGMT_RX_EVENTID: 1156 ath10k_wmi_event_process(ar, skb); 1157 return; 1158 default: 1159 break; 1160 } 1161 1162 skb_queue_tail(&ar->wmi.wmi_event_list, skb); 1163 queue_work(ar->workqueue, &ar->wmi.wmi_event_work); 1164 } 1165 1166 /* WMI Initialization functions */ 1167 int ath10k_wmi_attach(struct ath10k *ar) 1168 { 1169 init_completion(&ar->wmi.service_ready); 1170 init_completion(&ar->wmi.unified_ready); 1171 init_waitqueue_head(&ar->wmi.wq); 1172 1173 skb_queue_head_init(&ar->wmi.wmi_event_list); 1174 INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work); 1175 1176 return 0; 1177 } 1178 1179 void ath10k_wmi_detach(struct ath10k *ar) 1180 { 1181 /* HTC should've drained the packets already */ 1182 if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0)) 1183 ath10k_warn("there are still pending packets\n"); 1184 1185 cancel_work_sync(&ar->wmi.wmi_event_work); 1186 skb_queue_purge(&ar->wmi.wmi_event_list); 1187 } 1188 1189 int ath10k_wmi_connect_htc_service(struct ath10k *ar) 1190 { 1191 int status; 1192 struct ath10k_htc_svc_conn_req conn_req; 1193 struct ath10k_htc_svc_conn_resp conn_resp; 1194 1195 memset(&conn_req, 0, sizeof(conn_req)); 1196 memset(&conn_resp, 0, sizeof(conn_resp)); 1197 1198 /* these fields are the same for all service endpoints */ 1199 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; 1200 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; 1201 1202 /* connect to control service */ 1203 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; 1204 1205 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); 1206 if (status) { 1207 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", 1208 status); 1209 return status; 1210 } 1211 1212 ar->wmi.eid = conn_resp.eid; 1213 return 0; 1214 } 1215 1216 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, 1217 u16 rd5g, u16 ctl2g, u16 ctl5g) 1218 { 1219 struct wmi_pdev_set_regdomain_cmd *cmd; 1220 struct sk_buff *skb; 1221 1222 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1223 if (!skb) 1224 return -ENOMEM; 1225 1226 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 1227 cmd->reg_domain = __cpu_to_le32(rd); 1228 cmd->reg_domain_2G = __cpu_to_le32(rd2g); 1229 cmd->reg_domain_5G = __cpu_to_le32(rd5g); 1230 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); 1231 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 1232 1233 ath10k_dbg(ATH10K_DBG_WMI, 1234 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", 1235 rd, rd2g, rd5g, ctl2g, ctl5g); 1236 1237 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); 1238 } 1239 1240 int ath10k_wmi_pdev_set_channel(struct ath10k *ar, 1241 const struct wmi_channel_arg *arg) 1242 { 1243 struct wmi_set_channel_cmd *cmd; 1244 struct sk_buff *skb; 1245 1246 if (arg->passive) 1247 return -EINVAL; 1248 1249 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1250 if (!skb) 1251 return -ENOMEM; 1252 1253 cmd = (struct wmi_set_channel_cmd *)skb->data; 1254 cmd->chan.mhz = __cpu_to_le32(arg->freq); 1255 cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); 1256 cmd->chan.mode = arg->mode; 1257 cmd->chan.min_power = arg->min_power; 1258 cmd->chan.max_power = arg->max_power; 1259 cmd->chan.reg_power = arg->max_reg_power; 1260 cmd->chan.reg_classid = arg->reg_class_id; 1261 cmd->chan.antenna_max = arg->max_antenna_gain; 1262 1263 ath10k_dbg(ATH10K_DBG_WMI, 1264 "wmi set channel mode %d freq %d\n", 1265 arg->mode, arg->freq); 1266 1267 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID); 1268 } 1269 1270 int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) 1271 { 1272 struct wmi_pdev_suspend_cmd *cmd; 1273 struct sk_buff *skb; 1274 1275 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1276 if (!skb) 1277 return -ENOMEM; 1278 1279 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 1280 cmd->suspend_opt = WMI_PDEV_SUSPEND; 1281 1282 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID); 1283 } 1284 1285 int ath10k_wmi_pdev_resume_target(struct ath10k *ar) 1286 { 1287 struct sk_buff *skb; 1288 1289 skb = ath10k_wmi_alloc_skb(0); 1290 if (skb == NULL) 1291 return -ENOMEM; 1292 1293 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID); 1294 } 1295 1296 int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, 1297 u32 value) 1298 { 1299 struct wmi_pdev_set_param_cmd *cmd; 1300 struct sk_buff *skb; 1301 1302 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1303 if (!skb) 1304 return -ENOMEM; 1305 1306 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 1307 cmd->param_id = __cpu_to_le32(id); 1308 cmd->param_value = __cpu_to_le32(value); 1309 1310 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", 1311 id, value); 1312 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID); 1313 } 1314 1315 int ath10k_wmi_cmd_init(struct ath10k *ar) 1316 { 1317 struct wmi_init_cmd *cmd; 1318 struct sk_buff *buf; 1319 struct wmi_resource_config config = {}; 1320 u32 val; 1321 1322 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); 1323 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); 1324 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); 1325 1326 config.num_offload_reorder_bufs = 1327 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS); 1328 1329 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS); 1330 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS); 1331 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT); 1332 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK); 1333 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK); 1334 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 1335 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 1336 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); 1337 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); 1338 config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE); 1339 1340 config.scan_max_pending_reqs = 1341 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); 1342 1343 config.bmiss_offload_max_vdev = 1344 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV); 1345 1346 config.roam_offload_max_vdev = 1347 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV); 1348 1349 config.roam_offload_max_ap_profiles = 1350 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES); 1351 1352 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS); 1353 config.num_mcast_table_elems = 1354 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS); 1355 1356 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE); 1357 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE); 1358 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES); 1359 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE); 1360 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM); 1361 1362 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 1363 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); 1364 1365 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG); 1366 1367 config.gtk_offload_max_vdev = 1368 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV); 1369 1370 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); 1371 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); 1372 1373 buf = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1374 if (!buf) 1375 return -ENOMEM; 1376 1377 cmd = (struct wmi_init_cmd *)buf->data; 1378 cmd->num_host_mem_chunks = 0; 1379 memcpy(&cmd->resource_config, &config, sizeof(config)); 1380 1381 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); 1382 return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID); 1383 } 1384 1385 static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg) 1386 { 1387 int len; 1388 1389 len = sizeof(struct wmi_start_scan_cmd); 1390 1391 if (arg->ie_len) { 1392 if (!arg->ie) 1393 return -EINVAL; 1394 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) 1395 return -EINVAL; 1396 1397 len += sizeof(struct wmi_ie_data); 1398 len += roundup(arg->ie_len, 4); 1399 } 1400 1401 if (arg->n_channels) { 1402 if (!arg->channels) 1403 return -EINVAL; 1404 if (arg->n_channels > ARRAY_SIZE(arg->channels)) 1405 return -EINVAL; 1406 1407 len += sizeof(struct wmi_chan_list); 1408 len += sizeof(__le32) * arg->n_channels; 1409 } 1410 1411 if (arg->n_ssids) { 1412 if (!arg->ssids) 1413 return -EINVAL; 1414 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) 1415 return -EINVAL; 1416 1417 len += sizeof(struct wmi_ssid_list); 1418 len += sizeof(struct wmi_ssid) * arg->n_ssids; 1419 } 1420 1421 if (arg->n_bssids) { 1422 if (!arg->bssids) 1423 return -EINVAL; 1424 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) 1425 return -EINVAL; 1426 1427 len += sizeof(struct wmi_bssid_list); 1428 len += sizeof(struct wmi_mac_addr) * arg->n_bssids; 1429 } 1430 1431 return len; 1432 } 1433 1434 int ath10k_wmi_start_scan(struct ath10k *ar, 1435 const struct wmi_start_scan_arg *arg) 1436 { 1437 struct wmi_start_scan_cmd *cmd; 1438 struct sk_buff *skb; 1439 struct wmi_ie_data *ie; 1440 struct wmi_chan_list *channels; 1441 struct wmi_ssid_list *ssids; 1442 struct wmi_bssid_list *bssids; 1443 u32 scan_id; 1444 u32 scan_req_id; 1445 int off; 1446 int len = 0; 1447 int i; 1448 1449 len = ath10k_wmi_start_scan_calc_len(arg); 1450 if (len < 0) 1451 return len; /* len contains error code here */ 1452 1453 skb = ath10k_wmi_alloc_skb(len); 1454 if (!skb) 1455 return -ENOMEM; 1456 1457 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; 1458 scan_id |= arg->scan_id; 1459 1460 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 1461 scan_req_id |= arg->scan_req_id; 1462 1463 cmd = (struct wmi_start_scan_cmd *)skb->data; 1464 cmd->scan_id = __cpu_to_le32(scan_id); 1465 cmd->scan_req_id = __cpu_to_le32(scan_req_id); 1466 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 1467 cmd->scan_priority = __cpu_to_le32(arg->scan_priority); 1468 cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); 1469 cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); 1470 cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); 1471 cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time); 1472 cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time); 1473 cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); 1474 cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); 1475 cmd->idle_time = __cpu_to_le32(arg->idle_time); 1476 cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time); 1477 cmd->probe_delay = __cpu_to_le32(arg->probe_delay); 1478 cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); 1479 1480 /* TLV list starts after fields included in the struct */ 1481 off = sizeof(*cmd); 1482 1483 if (arg->n_channels) { 1484 channels = (void *)skb->data + off; 1485 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); 1486 channels->num_chan = __cpu_to_le32(arg->n_channels); 1487 1488 for (i = 0; i < arg->n_channels; i++) 1489 channels->channel_list[i] = 1490 __cpu_to_le32(arg->channels[i]); 1491 1492 off += sizeof(*channels); 1493 off += sizeof(__le32) * arg->n_channels; 1494 } 1495 1496 if (arg->n_ssids) { 1497 ssids = (void *)skb->data + off; 1498 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); 1499 ssids->num_ssids = __cpu_to_le32(arg->n_ssids); 1500 1501 for (i = 0; i < arg->n_ssids; i++) { 1502 ssids->ssids[i].ssid_len = 1503 __cpu_to_le32(arg->ssids[i].len); 1504 memcpy(&ssids->ssids[i].ssid, 1505 arg->ssids[i].ssid, 1506 arg->ssids[i].len); 1507 } 1508 1509 off += sizeof(*ssids); 1510 off += sizeof(struct wmi_ssid) * arg->n_ssids; 1511 } 1512 1513 if (arg->n_bssids) { 1514 bssids = (void *)skb->data + off; 1515 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); 1516 bssids->num_bssid = __cpu_to_le32(arg->n_bssids); 1517 1518 for (i = 0; i < arg->n_bssids; i++) 1519 memcpy(&bssids->bssid_list[i], 1520 arg->bssids[i].bssid, 1521 ETH_ALEN); 1522 1523 off += sizeof(*bssids); 1524 off += sizeof(struct wmi_mac_addr) * arg->n_bssids; 1525 } 1526 1527 if (arg->ie_len) { 1528 ie = (void *)skb->data + off; 1529 ie->tag = __cpu_to_le32(WMI_IE_TAG); 1530 ie->ie_len = __cpu_to_le32(arg->ie_len); 1531 memcpy(ie->ie_data, arg->ie, arg->ie_len); 1532 1533 off += sizeof(*ie); 1534 off += roundup(arg->ie_len, 4); 1535 } 1536 1537 if (off != skb->len) { 1538 dev_kfree_skb(skb); 1539 return -EINVAL; 1540 } 1541 1542 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); 1543 return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID); 1544 } 1545 1546 void ath10k_wmi_start_scan_init(struct ath10k *ar, 1547 struct wmi_start_scan_arg *arg) 1548 { 1549 /* setup commonly used values */ 1550 arg->scan_req_id = 1; 1551 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 1552 arg->dwell_time_active = 50; 1553 arg->dwell_time_passive = 150; 1554 arg->min_rest_time = 50; 1555 arg->max_rest_time = 500; 1556 arg->repeat_probe_time = 0; 1557 arg->probe_spacing_time = 0; 1558 arg->idle_time = 0; 1559 arg->max_scan_time = 5000; 1560 arg->probe_delay = 5; 1561 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED 1562 | WMI_SCAN_EVENT_COMPLETED 1563 | WMI_SCAN_EVENT_BSS_CHANNEL 1564 | WMI_SCAN_EVENT_FOREIGN_CHANNEL 1565 | WMI_SCAN_EVENT_DEQUEUED; 1566 arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; 1567 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; 1568 arg->n_bssids = 1; 1569 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; 1570 } 1571 1572 int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 1573 { 1574 struct wmi_stop_scan_cmd *cmd; 1575 struct sk_buff *skb; 1576 u32 scan_id; 1577 u32 req_id; 1578 1579 if (arg->req_id > 0xFFF) 1580 return -EINVAL; 1581 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 1582 return -EINVAL; 1583 1584 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1585 if (!skb) 1586 return -ENOMEM; 1587 1588 scan_id = arg->u.scan_id; 1589 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; 1590 1591 req_id = arg->req_id; 1592 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 1593 1594 cmd = (struct wmi_stop_scan_cmd *)skb->data; 1595 cmd->req_type = __cpu_to_le32(arg->req_type); 1596 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); 1597 cmd->scan_id = __cpu_to_le32(scan_id); 1598 cmd->scan_req_id = __cpu_to_le32(req_id); 1599 1600 ath10k_dbg(ATH10K_DBG_WMI, 1601 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", 1602 arg->req_id, arg->req_type, arg->u.scan_id); 1603 return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID); 1604 } 1605 1606 int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 1607 enum wmi_vdev_type type, 1608 enum wmi_vdev_subtype subtype, 1609 const u8 macaddr[ETH_ALEN]) 1610 { 1611 struct wmi_vdev_create_cmd *cmd; 1612 struct sk_buff *skb; 1613 1614 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1615 if (!skb) 1616 return -ENOMEM; 1617 1618 cmd = (struct wmi_vdev_create_cmd *)skb->data; 1619 cmd->vdev_id = __cpu_to_le32(vdev_id); 1620 cmd->vdev_type = __cpu_to_le32(type); 1621 cmd->vdev_subtype = __cpu_to_le32(subtype); 1622 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); 1623 1624 ath10k_dbg(ATH10K_DBG_WMI, 1625 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", 1626 vdev_id, type, subtype, macaddr); 1627 1628 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID); 1629 } 1630 1631 int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 1632 { 1633 struct wmi_vdev_delete_cmd *cmd; 1634 struct sk_buff *skb; 1635 1636 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1637 if (!skb) 1638 return -ENOMEM; 1639 1640 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 1641 cmd->vdev_id = __cpu_to_le32(vdev_id); 1642 1643 ath10k_dbg(ATH10K_DBG_WMI, 1644 "WMI vdev delete id %d\n", vdev_id); 1645 1646 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID); 1647 } 1648 1649 static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, 1650 const struct wmi_vdev_start_request_arg *arg, 1651 enum wmi_cmd_id cmd_id) 1652 { 1653 struct wmi_vdev_start_request_cmd *cmd; 1654 struct sk_buff *skb; 1655 const char *cmdname; 1656 u32 flags = 0; 1657 1658 if (cmd_id != WMI_VDEV_START_REQUEST_CMDID && 1659 cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID) 1660 return -EINVAL; 1661 if (WARN_ON(arg->ssid && arg->ssid_len == 0)) 1662 return -EINVAL; 1663 if (WARN_ON(arg->hidden_ssid && !arg->ssid)) 1664 return -EINVAL; 1665 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 1666 return -EINVAL; 1667 1668 if (cmd_id == WMI_VDEV_START_REQUEST_CMDID) 1669 cmdname = "start"; 1670 else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID) 1671 cmdname = "restart"; 1672 else 1673 return -EINVAL; /* should not happen, we already check cmd_id */ 1674 1675 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1676 if (!skb) 1677 return -ENOMEM; 1678 1679 if (arg->hidden_ssid) 1680 flags |= WMI_VDEV_START_HIDDEN_SSID; 1681 if (arg->pmf_enabled) 1682 flags |= WMI_VDEV_START_PMF_ENABLED; 1683 1684 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 1685 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 1686 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); 1687 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval); 1688 cmd->dtim_period = __cpu_to_le32(arg->dtim_period); 1689 cmd->flags = __cpu_to_le32(flags); 1690 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); 1691 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); 1692 1693 if (arg->ssid) { 1694 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); 1695 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 1696 } 1697 1698 cmd->chan.mhz = __cpu_to_le32(arg->channel.freq); 1699 1700 cmd->chan.band_center_freq1 = 1701 __cpu_to_le32(arg->channel.band_center_freq1); 1702 1703 cmd->chan.mode = arg->channel.mode; 1704 cmd->chan.min_power = arg->channel.min_power; 1705 cmd->chan.max_power = arg->channel.max_power; 1706 cmd->chan.reg_power = arg->channel.max_reg_power; 1707 cmd->chan.reg_classid = arg->channel.reg_class_id; 1708 cmd->chan.antenna_max = arg->channel.max_antenna_gain; 1709 1710 ath10k_dbg(ATH10K_DBG_WMI, 1711 "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X," 1712 "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq, 1713 arg->channel.mode, flags, arg->channel.max_power); 1714 1715 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1716 } 1717 1718 int ath10k_wmi_vdev_start(struct ath10k *ar, 1719 const struct wmi_vdev_start_request_arg *arg) 1720 { 1721 return ath10k_wmi_vdev_start_restart(ar, arg, 1722 WMI_VDEV_START_REQUEST_CMDID); 1723 } 1724 1725 int ath10k_wmi_vdev_restart(struct ath10k *ar, 1726 const struct wmi_vdev_start_request_arg *arg) 1727 { 1728 return ath10k_wmi_vdev_start_restart(ar, arg, 1729 WMI_VDEV_RESTART_REQUEST_CMDID); 1730 } 1731 1732 int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 1733 { 1734 struct wmi_vdev_stop_cmd *cmd; 1735 struct sk_buff *skb; 1736 1737 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1738 if (!skb) 1739 return -ENOMEM; 1740 1741 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 1742 cmd->vdev_id = __cpu_to_le32(vdev_id); 1743 1744 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); 1745 1746 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID); 1747 } 1748 1749 int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 1750 { 1751 struct wmi_vdev_up_cmd *cmd; 1752 struct sk_buff *skb; 1753 1754 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1755 if (!skb) 1756 return -ENOMEM; 1757 1758 cmd = (struct wmi_vdev_up_cmd *)skb->data; 1759 cmd->vdev_id = __cpu_to_le32(vdev_id); 1760 cmd->vdev_assoc_id = __cpu_to_le32(aid); 1761 memcpy(&cmd->vdev_bssid.addr, bssid, 6); 1762 1763 ath10k_dbg(ATH10K_DBG_WMI, 1764 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 1765 vdev_id, aid, bssid); 1766 1767 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID); 1768 } 1769 1770 int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 1771 { 1772 struct wmi_vdev_down_cmd *cmd; 1773 struct sk_buff *skb; 1774 1775 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1776 if (!skb) 1777 return -ENOMEM; 1778 1779 cmd = (struct wmi_vdev_down_cmd *)skb->data; 1780 cmd->vdev_id = __cpu_to_le32(vdev_id); 1781 1782 ath10k_dbg(ATH10K_DBG_WMI, 1783 "wmi mgmt vdev down id 0x%x\n", vdev_id); 1784 1785 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID); 1786 } 1787 1788 int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, 1789 enum wmi_vdev_param param_id, u32 param_value) 1790 { 1791 struct wmi_vdev_set_param_cmd *cmd; 1792 struct sk_buff *skb; 1793 1794 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1795 if (!skb) 1796 return -ENOMEM; 1797 1798 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 1799 cmd->vdev_id = __cpu_to_le32(vdev_id); 1800 cmd->param_id = __cpu_to_le32(param_id); 1801 cmd->param_value = __cpu_to_le32(param_value); 1802 1803 ath10k_dbg(ATH10K_DBG_WMI, 1804 "wmi vdev id 0x%x set param %d value %d\n", 1805 vdev_id, param_id, param_value); 1806 1807 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID); 1808 } 1809 1810 int ath10k_wmi_vdev_install_key(struct ath10k *ar, 1811 const struct wmi_vdev_install_key_arg *arg) 1812 { 1813 struct wmi_vdev_install_key_cmd *cmd; 1814 struct sk_buff *skb; 1815 1816 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) 1817 return -EINVAL; 1818 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) 1819 return -EINVAL; 1820 1821 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); 1822 if (!skb) 1823 return -ENOMEM; 1824 1825 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 1826 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 1827 cmd->key_idx = __cpu_to_le32(arg->key_idx); 1828 cmd->key_flags = __cpu_to_le32(arg->key_flags); 1829 cmd->key_cipher = __cpu_to_le32(arg->key_cipher); 1830 cmd->key_len = __cpu_to_le32(arg->key_len); 1831 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); 1832 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); 1833 1834 if (arg->macaddr) 1835 memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN); 1836 if (arg->key_data) 1837 memcpy(cmd->key_data, arg->key_data, arg->key_len); 1838 1839 ath10k_dbg(ATH10K_DBG_WMI, 1840 "wmi vdev install key idx %d cipher %d len %d\n", 1841 arg->key_idx, arg->key_cipher, arg->key_len); 1842 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); 1843 } 1844 1845 int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 1846 const u8 peer_addr[ETH_ALEN]) 1847 { 1848 struct wmi_peer_create_cmd *cmd; 1849 struct sk_buff *skb; 1850 1851 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1852 if (!skb) 1853 return -ENOMEM; 1854 1855 cmd = (struct wmi_peer_create_cmd *)skb->data; 1856 cmd->vdev_id = __cpu_to_le32(vdev_id); 1857 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 1858 1859 ath10k_dbg(ATH10K_DBG_WMI, 1860 "wmi peer create vdev_id %d peer_addr %pM\n", 1861 vdev_id, peer_addr); 1862 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID); 1863 } 1864 1865 int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 1866 const u8 peer_addr[ETH_ALEN]) 1867 { 1868 struct wmi_peer_delete_cmd *cmd; 1869 struct sk_buff *skb; 1870 1871 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1872 if (!skb) 1873 return -ENOMEM; 1874 1875 cmd = (struct wmi_peer_delete_cmd *)skb->data; 1876 cmd->vdev_id = __cpu_to_le32(vdev_id); 1877 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 1878 1879 ath10k_dbg(ATH10K_DBG_WMI, 1880 "wmi peer delete vdev_id %d peer_addr %pM\n", 1881 vdev_id, peer_addr); 1882 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID); 1883 } 1884 1885 int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 1886 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 1887 { 1888 struct wmi_peer_flush_tids_cmd *cmd; 1889 struct sk_buff *skb; 1890 1891 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1892 if (!skb) 1893 return -ENOMEM; 1894 1895 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 1896 cmd->vdev_id = __cpu_to_le32(vdev_id); 1897 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); 1898 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 1899 1900 ath10k_dbg(ATH10K_DBG_WMI, 1901 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", 1902 vdev_id, peer_addr, tid_bitmap); 1903 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID); 1904 } 1905 1906 int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, 1907 const u8 *peer_addr, enum wmi_peer_param param_id, 1908 u32 param_value) 1909 { 1910 struct wmi_peer_set_param_cmd *cmd; 1911 struct sk_buff *skb; 1912 1913 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1914 if (!skb) 1915 return -ENOMEM; 1916 1917 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 1918 cmd->vdev_id = __cpu_to_le32(vdev_id); 1919 cmd->param_id = __cpu_to_le32(param_id); 1920 cmd->param_value = __cpu_to_le32(param_value); 1921 memcpy(&cmd->peer_macaddr.addr, peer_addr, 6); 1922 1923 ath10k_dbg(ATH10K_DBG_WMI, 1924 "wmi vdev %d peer 0x%pM set param %d value %d\n", 1925 vdev_id, peer_addr, param_id, param_value); 1926 1927 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID); 1928 } 1929 1930 int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 1931 enum wmi_sta_ps_mode psmode) 1932 { 1933 struct wmi_sta_powersave_mode_cmd *cmd; 1934 struct sk_buff *skb; 1935 1936 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1937 if (!skb) 1938 return -ENOMEM; 1939 1940 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data; 1941 cmd->vdev_id = __cpu_to_le32(vdev_id); 1942 cmd->sta_ps_mode = __cpu_to_le32(psmode); 1943 1944 ath10k_dbg(ATH10K_DBG_WMI, 1945 "wmi set powersave id 0x%x mode %d\n", 1946 vdev_id, psmode); 1947 1948 return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID); 1949 } 1950 1951 int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 1952 enum wmi_sta_powersave_param param_id, 1953 u32 value) 1954 { 1955 struct wmi_sta_powersave_param_cmd *cmd; 1956 struct sk_buff *skb; 1957 1958 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1959 if (!skb) 1960 return -ENOMEM; 1961 1962 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 1963 cmd->vdev_id = __cpu_to_le32(vdev_id); 1964 cmd->param_id = __cpu_to_le32(param_id); 1965 cmd->param_value = __cpu_to_le32(value); 1966 1967 ath10k_dbg(ATH10K_DBG_WMI, 1968 "wmi sta ps param vdev_id 0x%x param %d value %d\n", 1969 vdev_id, param_id, value); 1970 return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID); 1971 } 1972 1973 int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1974 enum wmi_ap_ps_peer_param param_id, u32 value) 1975 { 1976 struct wmi_ap_ps_peer_cmd *cmd; 1977 struct sk_buff *skb; 1978 1979 if (!mac) 1980 return -EINVAL; 1981 1982 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 1983 if (!skb) 1984 return -ENOMEM; 1985 1986 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 1987 cmd->vdev_id = __cpu_to_le32(vdev_id); 1988 cmd->param_id = __cpu_to_le32(param_id); 1989 cmd->param_value = __cpu_to_le32(value); 1990 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); 1991 1992 ath10k_dbg(ATH10K_DBG_WMI, 1993 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", 1994 vdev_id, param_id, value, mac); 1995 1996 return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID); 1997 } 1998 1999 int ath10k_wmi_scan_chan_list(struct ath10k *ar, 2000 const struct wmi_scan_chan_list_arg *arg) 2001 { 2002 struct wmi_scan_chan_list_cmd *cmd; 2003 struct sk_buff *skb; 2004 struct wmi_channel_arg *ch; 2005 struct wmi_channel *ci; 2006 int len; 2007 int i; 2008 2009 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); 2010 2011 skb = ath10k_wmi_alloc_skb(len); 2012 if (!skb) 2013 return -EINVAL; 2014 2015 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 2016 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); 2017 2018 for (i = 0; i < arg->n_channels; i++) { 2019 u32 flags = 0; 2020 2021 ch = &arg->channels[i]; 2022 ci = &cmd->chan_info[i]; 2023 2024 if (ch->passive) 2025 flags |= WMI_CHAN_FLAG_PASSIVE; 2026 if (ch->allow_ibss) 2027 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; 2028 if (ch->allow_ht) 2029 flags |= WMI_CHAN_FLAG_ALLOW_HT; 2030 if (ch->allow_vht) 2031 flags |= WMI_CHAN_FLAG_ALLOW_VHT; 2032 if (ch->ht40plus) 2033 flags |= WMI_CHAN_FLAG_HT40_PLUS; 2034 2035 ci->mhz = __cpu_to_le32(ch->freq); 2036 ci->band_center_freq1 = __cpu_to_le32(ch->freq); 2037 ci->band_center_freq2 = 0; 2038 ci->min_power = ch->min_power; 2039 ci->max_power = ch->max_power; 2040 ci->reg_power = ch->max_reg_power; 2041 ci->antenna_max = ch->max_antenna_gain; 2042 ci->antenna_max = 0; 2043 2044 /* mode & flags share storage */ 2045 ci->mode = ch->mode; 2046 ci->flags |= __cpu_to_le32(flags); 2047 } 2048 2049 return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID); 2050 } 2051 2052 int ath10k_wmi_peer_assoc(struct ath10k *ar, 2053 const struct wmi_peer_assoc_complete_arg *arg) 2054 { 2055 struct wmi_peer_assoc_complete_cmd *cmd; 2056 struct sk_buff *skb; 2057 2058 if (arg->peer_mpdu_density > 16) 2059 return -EINVAL; 2060 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) 2061 return -EINVAL; 2062 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) 2063 return -EINVAL; 2064 2065 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2066 if (!skb) 2067 return -ENOMEM; 2068 2069 cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data; 2070 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 2071 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); 2072 cmd->peer_associd = __cpu_to_le32(arg->peer_aid); 2073 cmd->peer_flags = __cpu_to_le32(arg->peer_flags); 2074 cmd->peer_caps = __cpu_to_le32(arg->peer_caps); 2075 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval); 2076 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps); 2077 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); 2078 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); 2079 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps); 2080 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams); 2081 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps); 2082 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode); 2083 2084 memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN); 2085 2086 cmd->peer_legacy_rates.num_rates = 2087 __cpu_to_le32(arg->peer_legacy_rates.num_rates); 2088 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates, 2089 arg->peer_legacy_rates.num_rates); 2090 2091 cmd->peer_ht_rates.num_rates = 2092 __cpu_to_le32(arg->peer_ht_rates.num_rates); 2093 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates, 2094 arg->peer_ht_rates.num_rates); 2095 2096 cmd->peer_vht_rates.rx_max_rate = 2097 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); 2098 cmd->peer_vht_rates.rx_mcs_set = 2099 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); 2100 cmd->peer_vht_rates.tx_max_rate = 2101 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); 2102 cmd->peer_vht_rates.tx_mcs_set = 2103 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 2104 2105 ath10k_dbg(ATH10K_DBG_WMI, 2106 "wmi peer assoc vdev %d addr %pM\n", 2107 arg->vdev_id, arg->addr); 2108 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); 2109 } 2110 2111 int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg) 2112 { 2113 struct wmi_bcn_tx_cmd *cmd; 2114 struct sk_buff *skb; 2115 2116 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len); 2117 if (!skb) 2118 return -ENOMEM; 2119 2120 cmd = (struct wmi_bcn_tx_cmd *)skb->data; 2121 cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id); 2122 cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate); 2123 cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power); 2124 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); 2125 memcpy(cmd->bcn, arg->bcn, arg->bcn_len); 2126 2127 return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID); 2128 } 2129 2130 static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, 2131 const struct wmi_wmm_params_arg *arg) 2132 { 2133 params->cwmin = __cpu_to_le32(arg->cwmin); 2134 params->cwmax = __cpu_to_le32(arg->cwmax); 2135 params->aifs = __cpu_to_le32(arg->aifs); 2136 params->txop = __cpu_to_le32(arg->txop); 2137 params->acm = __cpu_to_le32(arg->acm); 2138 params->no_ack = __cpu_to_le32(arg->no_ack); 2139 } 2140 2141 int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 2142 const struct wmi_pdev_set_wmm_params_arg *arg) 2143 { 2144 struct wmi_pdev_set_wmm_params *cmd; 2145 struct sk_buff *skb; 2146 2147 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2148 if (!skb) 2149 return -ENOMEM; 2150 2151 cmd = (struct wmi_pdev_set_wmm_params *)skb->data; 2152 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be); 2153 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); 2154 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); 2155 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); 2156 2157 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); 2158 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID); 2159 } 2160 2161 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) 2162 { 2163 struct wmi_request_stats_cmd *cmd; 2164 struct sk_buff *skb; 2165 2166 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2167 if (!skb) 2168 return -ENOMEM; 2169 2170 cmd = (struct wmi_request_stats_cmd *)skb->data; 2171 cmd->stats_id = __cpu_to_le32(stats_id); 2172 2173 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); 2174 return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); 2175 } 2176 2177 int ath10k_wmi_force_fw_hang(struct ath10k *ar, 2178 enum wmi_force_fw_hang_type type, u32 delay_ms) 2179 { 2180 struct wmi_force_fw_hang_cmd *cmd; 2181 struct sk_buff *skb; 2182 2183 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2184 if (!skb) 2185 return -ENOMEM; 2186 2187 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 2188 cmd->type = __cpu_to_le32(type); 2189 cmd->delay_ms = __cpu_to_le32(delay_ms); 2190 2191 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", 2192 type, delay_ms); 2193 return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID); 2194 } 2195