1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 #include "core.h" 18 #include "debug.h" 19 #include "mac.h" 20 #include "hw.h" 21 #include "mac.h" 22 #include "wmi.h" 23 #include "wmi-ops.h" 24 #include "wmi-tlv.h" 25 #include "p2p.h" 26 27 /***************/ 28 /* TLV helpers */ 29 /**************/ 30 31 struct wmi_tlv_policy { 32 size_t min_len; 33 }; 34 35 static const struct wmi_tlv_policy wmi_tlv_policies[] = { 36 [WMI_TLV_TAG_ARRAY_BYTE] 37 = { .min_len = 0 }, 38 [WMI_TLV_TAG_ARRAY_UINT32] 39 = { .min_len = 0 }, 40 [WMI_TLV_TAG_STRUCT_SCAN_EVENT] 41 = { .min_len = sizeof(struct wmi_scan_event) }, 42 [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR] 43 = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) }, 44 [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT] 45 = { .min_len = sizeof(struct wmi_chan_info_event) }, 46 [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT] 47 = { .min_len = sizeof(struct wmi_vdev_start_response_event) }, 48 [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT] 49 = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, 50 [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT] 51 = { .min_len = sizeof(struct wmi_host_swba_event) }, 52 [WMI_TLV_TAG_STRUCT_TIM_INFO] 53 = { .min_len = sizeof(struct wmi_tim_info) }, 54 [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO] 55 = { .min_len = sizeof(struct wmi_p2p_noa_info) }, 56 [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT] 57 = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) }, 58 [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES] 59 = { .min_len = sizeof(struct hal_reg_capabilities) }, 60 [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ] 61 = { .min_len = sizeof(struct wlan_host_mem_req) }, 62 [WMI_TLV_TAG_STRUCT_READY_EVENT] 63 = { .min_len = sizeof(struct wmi_tlv_rdy_ev) }, 64 [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT] 65 = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) }, 66 [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT] 67 = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) }, 68 [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT] 69 = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) }, 70 [WMI_TLV_TAG_STRUCT_ROAM_EVENT] 71 = { .min_len = sizeof(struct wmi_tlv_roam_ev) }, 72 [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO] 73 = { .min_len = sizeof(struct wmi_tlv_wow_event_info) }, 74 [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT] 75 = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) }, 76 }; 77 78 static int 79 ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len, 80 int (*iter)(struct ath10k *ar, u16 tag, u16 len, 81 const void *ptr, void *data), 82 void *data) 83 { 84 const void *begin = ptr; 85 const struct wmi_tlv *tlv; 86 u16 tlv_tag, tlv_len; 87 int ret; 88 89 while (len > 0) { 90 if (len < sizeof(*tlv)) { 91 ath10k_dbg(ar, ATH10K_DBG_WMI, 92 "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 93 ptr - begin, len, sizeof(*tlv)); 94 return -EINVAL; 95 } 96 97 tlv = ptr; 98 tlv_tag = __le16_to_cpu(tlv->tag); 99 tlv_len = __le16_to_cpu(tlv->len); 100 ptr += sizeof(*tlv); 101 len -= sizeof(*tlv); 102 103 if (tlv_len > len) { 104 ath10k_dbg(ar, ATH10K_DBG_WMI, 105 "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 106 tlv_tag, ptr - begin, len, tlv_len); 107 return -EINVAL; 108 } 109 110 if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) && 111 wmi_tlv_policies[tlv_tag].min_len && 112 wmi_tlv_policies[tlv_tag].min_len > tlv_len) { 113 ath10k_dbg(ar, ATH10K_DBG_WMI, 114 "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n", 115 tlv_tag, ptr - begin, tlv_len, 116 wmi_tlv_policies[tlv_tag].min_len); 117 return -EINVAL; 118 } 119 120 ret = iter(ar, tlv_tag, tlv_len, ptr, data); 121 if (ret) 122 return ret; 123 124 ptr += tlv_len; 125 len -= tlv_len; 126 } 127 128 return 0; 129 } 130 131 static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len, 132 const void *ptr, void *data) 133 { 134 const void **tb = data; 135 136 if (tag < WMI_TLV_TAG_MAX) 137 tb[tag] = ptr; 138 139 return 0; 140 } 141 142 static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb, 143 const void *ptr, size_t len) 144 { 145 return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse, 146 (void *)tb); 147 } 148 149 static const void ** 150 ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr, 151 size_t len, gfp_t gfp) 152 { 153 const void **tb; 154 int ret; 155 156 tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp); 157 if (!tb) 158 return ERR_PTR(-ENOMEM); 159 160 ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len); 161 if (ret) { 162 kfree(tb); 163 return ERR_PTR(ret); 164 } 165 166 return tb; 167 } 168 169 static u16 ath10k_wmi_tlv_len(const void *ptr) 170 { 171 return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len); 172 } 173 174 /**************/ 175 /* TLV events */ 176 /**************/ 177 static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar, 178 struct sk_buff *skb) 179 { 180 const void **tb; 181 const struct wmi_tlv_bcn_tx_status_ev *ev; 182 struct ath10k_vif *arvif; 183 u32 vdev_id, tx_status; 184 int ret; 185 186 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 187 if (IS_ERR(tb)) { 188 ret = PTR_ERR(tb); 189 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 190 return ret; 191 } 192 193 ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]; 194 if (!ev) { 195 kfree(tb); 196 return -EPROTO; 197 } 198 199 tx_status = __le32_to_cpu(ev->tx_status); 200 vdev_id = __le32_to_cpu(ev->vdev_id); 201 202 switch (tx_status) { 203 case WMI_TLV_BCN_TX_STATUS_OK: 204 break; 205 case WMI_TLV_BCN_TX_STATUS_XRETRY: 206 case WMI_TLV_BCN_TX_STATUS_DROP: 207 case WMI_TLV_BCN_TX_STATUS_FILTERED: 208 /* FIXME: It's probably worth telling mac80211 to stop the 209 * interface as it is crippled. 210 */ 211 ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d", 212 vdev_id, tx_status); 213 break; 214 } 215 216 arvif = ath10k_get_arvif(ar, vdev_id); 217 if (arvif && arvif->is_up && arvif->vif->csa_active) 218 ieee80211_queue_work(ar->hw, &arvif->ap_csa_work); 219 220 kfree(tb); 221 return 0; 222 } 223 224 static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar, 225 struct sk_buff *skb) 226 { 227 const void **tb; 228 const struct wmi_tlv_diag_data_ev *ev; 229 const struct wmi_tlv_diag_item *item; 230 const void *data; 231 int ret, num_items, len; 232 233 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 234 if (IS_ERR(tb)) { 235 ret = PTR_ERR(tb); 236 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 237 return ret; 238 } 239 240 ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]; 241 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; 242 if (!ev || !data) { 243 kfree(tb); 244 return -EPROTO; 245 } 246 247 num_items = __le32_to_cpu(ev->num_items); 248 len = ath10k_wmi_tlv_len(data); 249 250 while (num_items--) { 251 if (len == 0) 252 break; 253 if (len < sizeof(*item)) { 254 ath10k_warn(ar, "failed to parse diag data: can't fit item header\n"); 255 break; 256 } 257 258 item = data; 259 260 if (len < sizeof(*item) + __le16_to_cpu(item->len)) { 261 ath10k_warn(ar, "failed to parse diag data: item is too long\n"); 262 break; 263 } 264 265 trace_ath10k_wmi_diag_container(ar, 266 item->type, 267 __le32_to_cpu(item->timestamp), 268 __le32_to_cpu(item->code), 269 __le16_to_cpu(item->len), 270 item->payload); 271 272 len -= sizeof(*item); 273 len -= roundup(__le16_to_cpu(item->len), 4); 274 275 data += sizeof(*item); 276 data += roundup(__le16_to_cpu(item->len), 4); 277 } 278 279 if (num_items != -1 || len != 0) 280 ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n", 281 num_items, len); 282 283 kfree(tb); 284 return 0; 285 } 286 287 static int ath10k_wmi_tlv_event_diag(struct ath10k *ar, 288 struct sk_buff *skb) 289 { 290 const void **tb; 291 const void *data; 292 int ret, len; 293 294 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 295 if (IS_ERR(tb)) { 296 ret = PTR_ERR(tb); 297 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 298 return ret; 299 } 300 301 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; 302 if (!data) { 303 kfree(tb); 304 return -EPROTO; 305 } 306 len = ath10k_wmi_tlv_len(data); 307 308 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len); 309 trace_ath10k_wmi_diag(ar, data, len); 310 311 kfree(tb); 312 return 0; 313 } 314 315 static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar, 316 struct sk_buff *skb) 317 { 318 const void **tb; 319 const struct wmi_tlv_p2p_noa_ev *ev; 320 const struct wmi_p2p_noa_info *noa; 321 int ret, vdev_id; 322 323 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 324 if (IS_ERR(tb)) { 325 ret = PTR_ERR(tb); 326 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 327 return ret; 328 } 329 330 ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]; 331 noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]; 332 333 if (!ev || !noa) { 334 kfree(tb); 335 return -EPROTO; 336 } 337 338 vdev_id = __le32_to_cpu(ev->vdev_id); 339 340 ath10k_dbg(ar, ATH10K_DBG_WMI, 341 "wmi tlv p2p noa vdev_id %i descriptors %hhu\n", 342 vdev_id, noa->num_descriptors); 343 344 ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); 345 kfree(tb); 346 return 0; 347 } 348 349 static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar, 350 struct sk_buff *skb) 351 { 352 const void **tb; 353 const struct wmi_tlv_tx_pause_ev *ev; 354 int ret, vdev_id; 355 u32 pause_id, action, vdev_map, peer_id, tid_map; 356 357 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 358 if (IS_ERR(tb)) { 359 ret = PTR_ERR(tb); 360 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 361 return ret; 362 } 363 364 ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]; 365 if (!ev) { 366 kfree(tb); 367 return -EPROTO; 368 } 369 370 pause_id = __le32_to_cpu(ev->pause_id); 371 action = __le32_to_cpu(ev->action); 372 vdev_map = __le32_to_cpu(ev->vdev_map); 373 peer_id = __le32_to_cpu(ev->peer_id); 374 tid_map = __le32_to_cpu(ev->tid_map); 375 376 ath10k_dbg(ar, ATH10K_DBG_WMI, 377 "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n", 378 pause_id, action, vdev_map, peer_id, tid_map); 379 380 switch (pause_id) { 381 case WMI_TLV_TX_PAUSE_ID_MCC: 382 case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: 383 case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: 384 case WMI_TLV_TX_PAUSE_ID_AP_PS: 385 case WMI_TLV_TX_PAUSE_ID_IBSS_PS: 386 for (vdev_id = 0; vdev_map; vdev_id++) { 387 if (!(vdev_map & BIT(vdev_id))) 388 continue; 389 390 vdev_map &= ~BIT(vdev_id); 391 ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id, 392 action); 393 } 394 break; 395 case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: 396 case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: 397 case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: 398 case WMI_TLV_TX_PAUSE_ID_HOST: 399 ath10k_dbg(ar, ATH10K_DBG_MAC, 400 "mac ignoring unsupported tx pause id %d\n", 401 pause_id); 402 break; 403 default: 404 ath10k_dbg(ar, ATH10K_DBG_MAC, 405 "mac ignoring unknown tx pause vdev %d\n", 406 pause_id); 407 break; 408 } 409 410 kfree(tb); 411 return 0; 412 } 413 414 /***********/ 415 /* TLV ops */ 416 /***********/ 417 418 static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) 419 { 420 struct wmi_cmd_hdr *cmd_hdr; 421 enum wmi_tlv_event_id id; 422 423 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 424 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 425 426 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 427 goto out; 428 429 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); 430 431 switch (id) { 432 case WMI_TLV_MGMT_RX_EVENTID: 433 ath10k_wmi_event_mgmt_rx(ar, skb); 434 /* mgmt_rx() owns the skb now! */ 435 return; 436 case WMI_TLV_SCAN_EVENTID: 437 ath10k_wmi_event_scan(ar, skb); 438 break; 439 case WMI_TLV_CHAN_INFO_EVENTID: 440 ath10k_wmi_event_chan_info(ar, skb); 441 break; 442 case WMI_TLV_ECHO_EVENTID: 443 ath10k_wmi_event_echo(ar, skb); 444 break; 445 case WMI_TLV_DEBUG_MESG_EVENTID: 446 ath10k_wmi_event_debug_mesg(ar, skb); 447 break; 448 case WMI_TLV_UPDATE_STATS_EVENTID: 449 ath10k_wmi_event_update_stats(ar, skb); 450 break; 451 case WMI_TLV_VDEV_START_RESP_EVENTID: 452 ath10k_wmi_event_vdev_start_resp(ar, skb); 453 break; 454 case WMI_TLV_VDEV_STOPPED_EVENTID: 455 ath10k_wmi_event_vdev_stopped(ar, skb); 456 break; 457 case WMI_TLV_PEER_STA_KICKOUT_EVENTID: 458 ath10k_wmi_event_peer_sta_kickout(ar, skb); 459 break; 460 case WMI_TLV_HOST_SWBA_EVENTID: 461 ath10k_wmi_event_host_swba(ar, skb); 462 break; 463 case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID: 464 ath10k_wmi_event_tbttoffset_update(ar, skb); 465 break; 466 case WMI_TLV_PHYERR_EVENTID: 467 ath10k_wmi_event_phyerr(ar, skb); 468 break; 469 case WMI_TLV_ROAM_EVENTID: 470 ath10k_wmi_event_roam(ar, skb); 471 break; 472 case WMI_TLV_PROFILE_MATCH: 473 ath10k_wmi_event_profile_match(ar, skb); 474 break; 475 case WMI_TLV_DEBUG_PRINT_EVENTID: 476 ath10k_wmi_event_debug_print(ar, skb); 477 break; 478 case WMI_TLV_PDEV_QVIT_EVENTID: 479 ath10k_wmi_event_pdev_qvit(ar, skb); 480 break; 481 case WMI_TLV_WLAN_PROFILE_DATA_EVENTID: 482 ath10k_wmi_event_wlan_profile_data(ar, skb); 483 break; 484 case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID: 485 ath10k_wmi_event_rtt_measurement_report(ar, skb); 486 break; 487 case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID: 488 ath10k_wmi_event_tsf_measurement_report(ar, skb); 489 break; 490 case WMI_TLV_RTT_ERROR_REPORT_EVENTID: 491 ath10k_wmi_event_rtt_error_report(ar, skb); 492 break; 493 case WMI_TLV_WOW_WAKEUP_HOST_EVENTID: 494 ath10k_wmi_event_wow_wakeup_host(ar, skb); 495 break; 496 case WMI_TLV_DCS_INTERFERENCE_EVENTID: 497 ath10k_wmi_event_dcs_interference(ar, skb); 498 break; 499 case WMI_TLV_PDEV_TPC_CONFIG_EVENTID: 500 ath10k_wmi_event_pdev_tpc_config(ar, skb); 501 break; 502 case WMI_TLV_PDEV_FTM_INTG_EVENTID: 503 ath10k_wmi_event_pdev_ftm_intg(ar, skb); 504 break; 505 case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID: 506 ath10k_wmi_event_gtk_offload_status(ar, skb); 507 break; 508 case WMI_TLV_GTK_REKEY_FAIL_EVENTID: 509 ath10k_wmi_event_gtk_rekey_fail(ar, skb); 510 break; 511 case WMI_TLV_TX_DELBA_COMPLETE_EVENTID: 512 ath10k_wmi_event_delba_complete(ar, skb); 513 break; 514 case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID: 515 ath10k_wmi_event_addba_complete(ar, skb); 516 break; 517 case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 518 ath10k_wmi_event_vdev_install_key_complete(ar, skb); 519 break; 520 case WMI_TLV_SERVICE_READY_EVENTID: 521 ath10k_wmi_event_service_ready(ar, skb); 522 break; 523 case WMI_TLV_READY_EVENTID: 524 ath10k_wmi_event_ready(ar, skb); 525 break; 526 case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID: 527 ath10k_wmi_tlv_event_bcn_tx_status(ar, skb); 528 break; 529 case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID: 530 ath10k_wmi_tlv_event_diag_data(ar, skb); 531 break; 532 case WMI_TLV_DIAG_EVENTID: 533 ath10k_wmi_tlv_event_diag(ar, skb); 534 break; 535 case WMI_TLV_P2P_NOA_EVENTID: 536 ath10k_wmi_tlv_event_p2p_noa(ar, skb); 537 break; 538 case WMI_TLV_TX_PAUSE_EVENTID: 539 ath10k_wmi_tlv_event_tx_pause(ar, skb); 540 break; 541 default: 542 ath10k_warn(ar, "Unknown eventid: %d\n", id); 543 break; 544 } 545 546 out: 547 dev_kfree_skb(skb); 548 } 549 550 static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar, 551 struct sk_buff *skb, 552 struct wmi_scan_ev_arg *arg) 553 { 554 const void **tb; 555 const struct wmi_scan_event *ev; 556 int ret; 557 558 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 559 if (IS_ERR(tb)) { 560 ret = PTR_ERR(tb); 561 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 562 return ret; 563 } 564 565 ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT]; 566 if (!ev) { 567 kfree(tb); 568 return -EPROTO; 569 } 570 571 arg->event_type = ev->event_type; 572 arg->reason = ev->reason; 573 arg->channel_freq = ev->channel_freq; 574 arg->scan_req_id = ev->scan_req_id; 575 arg->scan_id = ev->scan_id; 576 arg->vdev_id = ev->vdev_id; 577 578 kfree(tb); 579 return 0; 580 } 581 582 static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, 583 struct sk_buff *skb, 584 struct wmi_mgmt_rx_ev_arg *arg) 585 { 586 const void **tb; 587 const struct wmi_tlv_mgmt_rx_ev *ev; 588 const u8 *frame; 589 u32 msdu_len; 590 int ret; 591 592 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 593 if (IS_ERR(tb)) { 594 ret = PTR_ERR(tb); 595 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 596 return ret; 597 } 598 599 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]; 600 frame = tb[WMI_TLV_TAG_ARRAY_BYTE]; 601 602 if (!ev || !frame) { 603 kfree(tb); 604 return -EPROTO; 605 } 606 607 arg->channel = ev->channel; 608 arg->buf_len = ev->buf_len; 609 arg->status = ev->status; 610 arg->snr = ev->snr; 611 arg->phy_mode = ev->phy_mode; 612 arg->rate = ev->rate; 613 614 msdu_len = __le32_to_cpu(arg->buf_len); 615 616 if (skb->len < (frame - skb->data) + msdu_len) { 617 kfree(tb); 618 return -EPROTO; 619 } 620 621 /* shift the sk_buff to point to `frame` */ 622 skb_trim(skb, 0); 623 skb_put(skb, frame - skb->data); 624 skb_pull(skb, frame - skb->data); 625 skb_put(skb, msdu_len); 626 627 kfree(tb); 628 return 0; 629 } 630 631 static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, 632 struct sk_buff *skb, 633 struct wmi_ch_info_ev_arg *arg) 634 { 635 const void **tb; 636 const struct wmi_chan_info_event *ev; 637 int ret; 638 639 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 640 if (IS_ERR(tb)) { 641 ret = PTR_ERR(tb); 642 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 643 return ret; 644 } 645 646 ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]; 647 if (!ev) { 648 kfree(tb); 649 return -EPROTO; 650 } 651 652 arg->err_code = ev->err_code; 653 arg->freq = ev->freq; 654 arg->cmd_flags = ev->cmd_flags; 655 arg->noise_floor = ev->noise_floor; 656 arg->rx_clear_count = ev->rx_clear_count; 657 arg->cycle_count = ev->cycle_count; 658 659 kfree(tb); 660 return 0; 661 } 662 663 static int 664 ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb, 665 struct wmi_vdev_start_ev_arg *arg) 666 { 667 const void **tb; 668 const struct wmi_vdev_start_response_event *ev; 669 int ret; 670 671 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 672 if (IS_ERR(tb)) { 673 ret = PTR_ERR(tb); 674 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 675 return ret; 676 } 677 678 ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]; 679 if (!ev) { 680 kfree(tb); 681 return -EPROTO; 682 } 683 684 skb_pull(skb, sizeof(*ev)); 685 arg->vdev_id = ev->vdev_id; 686 arg->req_id = ev->req_id; 687 arg->resp_type = ev->resp_type; 688 arg->status = ev->status; 689 690 kfree(tb); 691 return 0; 692 } 693 694 static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar, 695 struct sk_buff *skb, 696 struct wmi_peer_kick_ev_arg *arg) 697 { 698 const void **tb; 699 const struct wmi_peer_sta_kickout_event *ev; 700 int ret; 701 702 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 703 if (IS_ERR(tb)) { 704 ret = PTR_ERR(tb); 705 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 706 return ret; 707 } 708 709 ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]; 710 if (!ev) { 711 kfree(tb); 712 return -EPROTO; 713 } 714 715 arg->mac_addr = ev->peer_macaddr.addr; 716 717 kfree(tb); 718 return 0; 719 } 720 721 struct wmi_tlv_swba_parse { 722 const struct wmi_host_swba_event *ev; 723 bool tim_done; 724 bool noa_done; 725 size_t n_tim; 726 size_t n_noa; 727 struct wmi_swba_ev_arg *arg; 728 }; 729 730 static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, 731 const void *ptr, void *data) 732 { 733 struct wmi_tlv_swba_parse *swba = data; 734 struct wmi_tim_info_arg *tim_info_arg; 735 const struct wmi_tim_info *tim_info_ev = ptr; 736 737 if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO) 738 return -EPROTO; 739 740 if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info)) 741 return -ENOBUFS; 742 743 if (__le32_to_cpu(tim_info_ev->tim_len) > 744 sizeof(tim_info_ev->tim_bitmap)) { 745 ath10k_warn(ar, "refusing to parse invalid swba structure\n"); 746 return -EPROTO; 747 } 748 749 tim_info_arg = &swba->arg->tim_info[swba->n_tim]; 750 tim_info_arg->tim_len = tim_info_ev->tim_len; 751 tim_info_arg->tim_mcast = tim_info_ev->tim_mcast; 752 tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap; 753 tim_info_arg->tim_changed = tim_info_ev->tim_changed; 754 tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending; 755 756 swba->n_tim++; 757 758 return 0; 759 } 760 761 static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len, 762 const void *ptr, void *data) 763 { 764 struct wmi_tlv_swba_parse *swba = data; 765 766 if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO) 767 return -EPROTO; 768 769 if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info)) 770 return -ENOBUFS; 771 772 swba->arg->noa_info[swba->n_noa++] = ptr; 773 return 0; 774 } 775 776 static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len, 777 const void *ptr, void *data) 778 { 779 struct wmi_tlv_swba_parse *swba = data; 780 int ret; 781 782 switch (tag) { 783 case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT: 784 swba->ev = ptr; 785 break; 786 case WMI_TLV_TAG_ARRAY_STRUCT: 787 if (!swba->tim_done) { 788 swba->tim_done = true; 789 ret = ath10k_wmi_tlv_iter(ar, ptr, len, 790 ath10k_wmi_tlv_swba_tim_parse, 791 swba); 792 if (ret) 793 return ret; 794 } else if (!swba->noa_done) { 795 swba->noa_done = true; 796 ret = ath10k_wmi_tlv_iter(ar, ptr, len, 797 ath10k_wmi_tlv_swba_noa_parse, 798 swba); 799 if (ret) 800 return ret; 801 } 802 break; 803 default: 804 break; 805 } 806 return 0; 807 } 808 809 static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar, 810 struct sk_buff *skb, 811 struct wmi_swba_ev_arg *arg) 812 { 813 struct wmi_tlv_swba_parse swba = { .arg = arg }; 814 u32 map; 815 size_t n_vdevs; 816 int ret; 817 818 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, 819 ath10k_wmi_tlv_swba_parse, &swba); 820 if (ret) { 821 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 822 return ret; 823 } 824 825 if (!swba.ev) 826 return -EPROTO; 827 828 arg->vdev_map = swba.ev->vdev_map; 829 830 for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1) 831 if (map & BIT(0)) 832 n_vdevs++; 833 834 if (n_vdevs != swba.n_tim || 835 n_vdevs != swba.n_noa) 836 return -EPROTO; 837 838 return 0; 839 } 840 841 static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar, 842 struct sk_buff *skb, 843 struct wmi_phyerr_ev_arg *arg) 844 { 845 const void **tb; 846 const struct wmi_tlv_phyerr_ev *ev; 847 const void *phyerrs; 848 int ret; 849 850 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 851 if (IS_ERR(tb)) { 852 ret = PTR_ERR(tb); 853 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 854 return ret; 855 } 856 857 ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR]; 858 phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE]; 859 860 if (!ev || !phyerrs) { 861 kfree(tb); 862 return -EPROTO; 863 } 864 865 arg->num_phyerrs = ev->num_phyerrs; 866 arg->tsf_l32 = ev->tsf_l32; 867 arg->tsf_u32 = ev->tsf_u32; 868 arg->buf_len = ev->buf_len; 869 arg->phyerrs = phyerrs; 870 871 kfree(tb); 872 return 0; 873 } 874 875 #define WMI_TLV_ABI_VER_NS0 0x5F414351 876 #define WMI_TLV_ABI_VER_NS1 0x00004C4D 877 #define WMI_TLV_ABI_VER_NS2 0x00000000 878 #define WMI_TLV_ABI_VER_NS3 0x00000000 879 880 #define WMI_TLV_ABI_VER0_MAJOR 1 881 #define WMI_TLV_ABI_VER0_MINOR 0 882 #define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \ 883 (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF)) 884 #define WMI_TLV_ABI_VER1 53 885 886 static int 887 ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len, 888 const void *ptr, void *data) 889 { 890 struct wmi_svc_rdy_ev_arg *arg = data; 891 int i; 892 893 if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ) 894 return -EPROTO; 895 896 for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) { 897 if (!arg->mem_reqs[i]) { 898 arg->mem_reqs[i] = ptr; 899 return 0; 900 } 901 } 902 903 return -ENOMEM; 904 } 905 906 static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, 907 struct sk_buff *skb, 908 struct wmi_svc_rdy_ev_arg *arg) 909 { 910 const void **tb; 911 const struct hal_reg_capabilities *reg; 912 const struct wmi_tlv_svc_rdy_ev *ev; 913 const __le32 *svc_bmap; 914 const struct wlan_host_mem_req *mem_reqs; 915 int ret; 916 917 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 918 if (IS_ERR(tb)) { 919 ret = PTR_ERR(tb); 920 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 921 return ret; 922 } 923 924 ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]; 925 reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]; 926 svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32]; 927 mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT]; 928 929 if (!ev || !reg || !svc_bmap || !mem_reqs) { 930 kfree(tb); 931 return -EPROTO; 932 } 933 934 /* This is an internal ABI compatibility check for WMI TLV so check it 935 * here instead of the generic WMI code. 936 */ 937 ath10k_dbg(ar, ATH10K_DBG_WMI, 938 "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n", 939 __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0, 940 __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0, 941 __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1, 942 __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2, 943 __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3); 944 945 if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 || 946 __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 || 947 __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 || 948 __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 || 949 __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) { 950 kfree(tb); 951 return -ENOTSUPP; 952 } 953 954 arg->min_tx_power = ev->hw_min_tx_power; 955 arg->max_tx_power = ev->hw_max_tx_power; 956 arg->ht_cap = ev->ht_cap_info; 957 arg->vht_cap = ev->vht_cap_info; 958 arg->sw_ver0 = ev->abi.abi_ver0; 959 arg->sw_ver1 = ev->abi.abi_ver1; 960 arg->fw_build = ev->fw_build_vers; 961 arg->phy_capab = ev->phy_capability; 962 arg->num_rf_chains = ev->num_rf_chains; 963 arg->eeprom_rd = reg->eeprom_rd; 964 arg->num_mem_reqs = ev->num_mem_reqs; 965 arg->service_map = svc_bmap; 966 arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap); 967 968 ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs), 969 ath10k_wmi_tlv_parse_mem_reqs, arg); 970 if (ret) { 971 kfree(tb); 972 ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret); 973 return ret; 974 } 975 976 kfree(tb); 977 return 0; 978 } 979 980 static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar, 981 struct sk_buff *skb, 982 struct wmi_rdy_ev_arg *arg) 983 { 984 const void **tb; 985 const struct wmi_tlv_rdy_ev *ev; 986 int ret; 987 988 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 989 if (IS_ERR(tb)) { 990 ret = PTR_ERR(tb); 991 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 992 return ret; 993 } 994 995 ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT]; 996 if (!ev) { 997 kfree(tb); 998 return -EPROTO; 999 } 1000 1001 arg->sw_version = ev->abi.abi_ver0; 1002 arg->abi_version = ev->abi.abi_ver1; 1003 arg->status = ev->status; 1004 arg->mac_addr = ev->mac_addr.addr; 1005 1006 kfree(tb); 1007 return 0; 1008 } 1009 1010 static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src, 1011 struct ath10k_fw_stats_vdev *dst) 1012 { 1013 int i; 1014 1015 dst->vdev_id = __le32_to_cpu(src->vdev_id); 1016 dst->beacon_snr = __le32_to_cpu(src->beacon_snr); 1017 dst->data_snr = __le32_to_cpu(src->data_snr); 1018 dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames); 1019 dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail); 1020 dst->num_rts_success = __le32_to_cpu(src->num_rts_success); 1021 dst->num_rx_err = __le32_to_cpu(src->num_rx_err); 1022 dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard); 1023 dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked); 1024 1025 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++) 1026 dst->num_tx_frames[i] = 1027 __le32_to_cpu(src->num_tx_frames[i]); 1028 1029 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++) 1030 dst->num_tx_frames_retries[i] = 1031 __le32_to_cpu(src->num_tx_frames_retries[i]); 1032 1033 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++) 1034 dst->num_tx_frames_failures[i] = 1035 __le32_to_cpu(src->num_tx_frames_failures[i]); 1036 1037 for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++) 1038 dst->tx_rate_history[i] = 1039 __le32_to_cpu(src->tx_rate_history[i]); 1040 1041 for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++) 1042 dst->beacon_rssi_history[i] = 1043 __le32_to_cpu(src->beacon_rssi_history[i]); 1044 } 1045 1046 static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, 1047 struct sk_buff *skb, 1048 struct ath10k_fw_stats *stats) 1049 { 1050 const void **tb; 1051 const struct wmi_tlv_stats_ev *ev; 1052 const void *data; 1053 u32 num_pdev_stats; 1054 u32 num_vdev_stats; 1055 u32 num_peer_stats; 1056 u32 num_bcnflt_stats; 1057 u32 num_chan_stats; 1058 size_t data_len; 1059 int ret; 1060 int i; 1061 1062 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1063 if (IS_ERR(tb)) { 1064 ret = PTR_ERR(tb); 1065 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1066 return ret; 1067 } 1068 1069 ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT]; 1070 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; 1071 1072 if (!ev || !data) { 1073 kfree(tb); 1074 return -EPROTO; 1075 } 1076 1077 data_len = ath10k_wmi_tlv_len(data); 1078 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); 1079 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); 1080 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 1081 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); 1082 num_chan_stats = __le32_to_cpu(ev->num_chan_stats); 1083 1084 ath10k_dbg(ar, ATH10K_DBG_WMI, 1085 "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n", 1086 num_pdev_stats, num_vdev_stats, num_peer_stats, 1087 num_bcnflt_stats, num_chan_stats); 1088 1089 for (i = 0; i < num_pdev_stats; i++) { 1090 const struct wmi_pdev_stats *src; 1091 struct ath10k_fw_stats_pdev *dst; 1092 1093 src = data; 1094 if (data_len < sizeof(*src)) 1095 return -EPROTO; 1096 1097 data += sizeof(*src); 1098 data_len -= sizeof(*src); 1099 1100 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 1101 if (!dst) 1102 continue; 1103 1104 ath10k_wmi_pull_pdev_stats_base(&src->base, dst); 1105 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); 1106 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); 1107 list_add_tail(&dst->list, &stats->pdevs); 1108 } 1109 1110 for (i = 0; i < num_vdev_stats; i++) { 1111 const struct wmi_tlv_vdev_stats *src; 1112 struct ath10k_fw_stats_vdev *dst; 1113 1114 src = data; 1115 if (data_len < sizeof(*src)) 1116 return -EPROTO; 1117 1118 data += sizeof(*src); 1119 data_len -= sizeof(*src); 1120 1121 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 1122 if (!dst) 1123 continue; 1124 1125 ath10k_wmi_tlv_pull_vdev_stats(src, dst); 1126 list_add_tail(&dst->list, &stats->vdevs); 1127 } 1128 1129 for (i = 0; i < num_peer_stats; i++) { 1130 const struct wmi_10x_peer_stats *src; 1131 struct ath10k_fw_stats_peer *dst; 1132 1133 src = data; 1134 if (data_len < sizeof(*src)) 1135 return -EPROTO; 1136 1137 data += sizeof(*src); 1138 data_len -= sizeof(*src); 1139 1140 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 1141 if (!dst) 1142 continue; 1143 1144 ath10k_wmi_pull_peer_stats(&src->old, dst); 1145 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); 1146 list_add_tail(&dst->list, &stats->peers); 1147 } 1148 1149 kfree(tb); 1150 return 0; 1151 } 1152 1153 static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar, 1154 struct sk_buff *skb, 1155 struct wmi_roam_ev_arg *arg) 1156 { 1157 const void **tb; 1158 const struct wmi_tlv_roam_ev *ev; 1159 int ret; 1160 1161 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1162 if (IS_ERR(tb)) { 1163 ret = PTR_ERR(tb); 1164 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1165 return ret; 1166 } 1167 1168 ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT]; 1169 if (!ev) { 1170 kfree(tb); 1171 return -EPROTO; 1172 } 1173 1174 arg->vdev_id = ev->vdev_id; 1175 arg->reason = ev->reason; 1176 arg->rssi = ev->rssi; 1177 1178 kfree(tb); 1179 return 0; 1180 } 1181 1182 static int 1183 ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb, 1184 struct wmi_wow_ev_arg *arg) 1185 { 1186 const void **tb; 1187 const struct wmi_tlv_wow_event_info *ev; 1188 int ret; 1189 1190 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1191 if (IS_ERR(tb)) { 1192 ret = PTR_ERR(tb); 1193 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1194 return ret; 1195 } 1196 1197 ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]; 1198 if (!ev) { 1199 kfree(tb); 1200 return -EPROTO; 1201 } 1202 1203 arg->vdev_id = __le32_to_cpu(ev->vdev_id); 1204 arg->flag = __le32_to_cpu(ev->flag); 1205 arg->wake_reason = __le32_to_cpu(ev->wake_reason); 1206 arg->data_len = __le32_to_cpu(ev->data_len); 1207 1208 kfree(tb); 1209 return 0; 1210 } 1211 1212 static struct sk_buff * 1213 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt) 1214 { 1215 struct wmi_tlv_pdev_suspend *cmd; 1216 struct wmi_tlv *tlv; 1217 struct sk_buff *skb; 1218 1219 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1220 if (!skb) 1221 return ERR_PTR(-ENOMEM); 1222 1223 tlv = (void *)skb->data; 1224 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD); 1225 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1226 cmd = (void *)tlv->value; 1227 cmd->opt = __cpu_to_le32(opt); 1228 1229 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n"); 1230 return skb; 1231 } 1232 1233 static struct sk_buff * 1234 ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar) 1235 { 1236 struct wmi_tlv_resume_cmd *cmd; 1237 struct wmi_tlv *tlv; 1238 struct sk_buff *skb; 1239 1240 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1241 if (!skb) 1242 return ERR_PTR(-ENOMEM); 1243 1244 tlv = (void *)skb->data; 1245 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD); 1246 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1247 cmd = (void *)tlv->value; 1248 cmd->reserved = __cpu_to_le32(0); 1249 1250 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n"); 1251 return skb; 1252 } 1253 1254 static struct sk_buff * 1255 ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar, 1256 u16 rd, u16 rd2g, u16 rd5g, 1257 u16 ctl2g, u16 ctl5g, 1258 enum wmi_dfs_region dfs_reg) 1259 { 1260 struct wmi_tlv_pdev_set_rd_cmd *cmd; 1261 struct wmi_tlv *tlv; 1262 struct sk_buff *skb; 1263 1264 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1265 if (!skb) 1266 return ERR_PTR(-ENOMEM); 1267 1268 tlv = (void *)skb->data; 1269 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD); 1270 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1271 cmd = (void *)tlv->value; 1272 cmd->regd = __cpu_to_le32(rd); 1273 cmd->regd_2ghz = __cpu_to_le32(rd2g); 1274 cmd->regd_5ghz = __cpu_to_le32(rd5g); 1275 cmd->conform_limit_2ghz = __cpu_to_le32(rd2g); 1276 cmd->conform_limit_5ghz = __cpu_to_le32(rd5g); 1277 1278 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n"); 1279 return skb; 1280 } 1281 1282 static struct sk_buff * 1283 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, 1284 u32 param_value) 1285 { 1286 struct wmi_tlv_pdev_set_param_cmd *cmd; 1287 struct wmi_tlv *tlv; 1288 struct sk_buff *skb; 1289 1290 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1291 if (!skb) 1292 return ERR_PTR(-ENOMEM); 1293 1294 tlv = (void *)skb->data; 1295 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD); 1296 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1297 cmd = (void *)tlv->value; 1298 cmd->param_id = __cpu_to_le32(param_id); 1299 cmd->param_value = __cpu_to_le32(param_value); 1300 1301 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n"); 1302 return skb; 1303 } 1304 1305 static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) 1306 { 1307 struct sk_buff *skb; 1308 struct wmi_tlv *tlv; 1309 struct wmi_tlv_init_cmd *cmd; 1310 struct wmi_tlv_resource_config *cfg; 1311 struct wmi_host_mem_chunks *chunks; 1312 size_t len, chunks_len; 1313 void *ptr; 1314 1315 chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk); 1316 len = (sizeof(*tlv) + sizeof(*cmd)) + 1317 (sizeof(*tlv) + sizeof(*cfg)) + 1318 (sizeof(*tlv) + chunks_len); 1319 1320 skb = ath10k_wmi_alloc_skb(ar, len); 1321 if (!skb) 1322 return ERR_PTR(-ENOMEM); 1323 1324 ptr = skb->data; 1325 1326 tlv = ptr; 1327 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD); 1328 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1329 cmd = (void *)tlv->value; 1330 ptr += sizeof(*tlv); 1331 ptr += sizeof(*cmd); 1332 1333 tlv = ptr; 1334 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG); 1335 tlv->len = __cpu_to_le16(sizeof(*cfg)); 1336 cfg = (void *)tlv->value; 1337 ptr += sizeof(*tlv); 1338 ptr += sizeof(*cfg); 1339 1340 tlv = ptr; 1341 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 1342 tlv->len = __cpu_to_le16(chunks_len); 1343 chunks = (void *)tlv->value; 1344 1345 ptr += sizeof(*tlv); 1346 ptr += chunks_len; 1347 1348 cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0); 1349 cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1); 1350 cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0); 1351 cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1); 1352 cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2); 1353 cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3); 1354 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 1355 1356 cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1357 cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); 1358 1359 if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { 1360 cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1361 cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1362 } else { 1363 cfg->num_offload_peers = __cpu_to_le32(0); 1364 cfg->num_offload_reorder_bufs = __cpu_to_le32(0); 1365 } 1366 1367 cfg->num_peer_keys = __cpu_to_le32(2); 1368 cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); 1369 cfg->ast_skid_limit = __cpu_to_le32(0x10); 1370 cfg->tx_chain_mask = __cpu_to_le32(0x7); 1371 cfg->rx_chain_mask = __cpu_to_le32(0x7); 1372 cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64); 1373 cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64); 1374 cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64); 1375 cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28); 1376 cfg->rx_decap_mode = __cpu_to_le32(1); 1377 cfg->scan_max_pending_reqs = __cpu_to_le32(4); 1378 cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1379 cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1380 cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8); 1381 cfg->num_mcast_groups = __cpu_to_le32(0); 1382 cfg->num_mcast_table_elems = __cpu_to_le32(0); 1383 cfg->mcast2ucast_mode = __cpu_to_le32(0); 1384 cfg->tx_dbg_log_size = __cpu_to_le32(0x400); 1385 cfg->num_wds_entries = __cpu_to_le32(0x20); 1386 cfg->dma_burst_size = __cpu_to_le32(0); 1387 cfg->mac_aggr_delim = __cpu_to_le32(0); 1388 cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0); 1389 cfg->vow_config = __cpu_to_le32(0); 1390 cfg->gtk_offload_max_vdev = __cpu_to_le32(2); 1391 cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC); 1392 cfg->max_frag_entries = __cpu_to_le32(2); 1393 cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS); 1394 cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20); 1395 cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2); 1396 cfg->num_multicast_filter_entries = __cpu_to_le32(5); 1397 cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns); 1398 cfg->num_keep_alive_pattern = __cpu_to_le32(6); 1399 cfg->keep_alive_pattern_size = __cpu_to_le32(0); 1400 cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1); 1401 cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1); 1402 1403 ath10k_wmi_put_host_mem_chunks(ar, chunks); 1404 1405 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n"); 1406 return skb; 1407 } 1408 1409 static struct sk_buff * 1410 ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, 1411 const struct wmi_start_scan_arg *arg) 1412 { 1413 struct wmi_tlv_start_scan_cmd *cmd; 1414 struct wmi_tlv *tlv; 1415 struct sk_buff *skb; 1416 size_t len, chan_len, ssid_len, bssid_len, ie_len; 1417 __le32 *chans; 1418 struct wmi_ssid *ssids; 1419 struct wmi_mac_addr *addrs; 1420 void *ptr; 1421 int i, ret; 1422 1423 ret = ath10k_wmi_start_scan_verify(arg); 1424 if (ret) 1425 return ERR_PTR(ret); 1426 1427 chan_len = arg->n_channels * sizeof(__le32); 1428 ssid_len = arg->n_ssids * sizeof(struct wmi_ssid); 1429 bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr); 1430 ie_len = roundup(arg->ie_len, 4); 1431 len = (sizeof(*tlv) + sizeof(*cmd)) + 1432 (arg->n_channels ? sizeof(*tlv) + chan_len : 0) + 1433 (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) + 1434 (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) + 1435 (arg->ie_len ? sizeof(*tlv) + ie_len : 0); 1436 1437 skb = ath10k_wmi_alloc_skb(ar, len); 1438 if (!skb) 1439 return ERR_PTR(-ENOMEM); 1440 1441 ptr = (void *)skb->data; 1442 tlv = ptr; 1443 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD); 1444 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1445 cmd = (void *)tlv->value; 1446 1447 ath10k_wmi_put_start_scan_common(&cmd->common, arg); 1448 cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms); 1449 cmd->num_channels = __cpu_to_le32(arg->n_channels); 1450 cmd->num_ssids = __cpu_to_le32(arg->n_ssids); 1451 cmd->num_bssids = __cpu_to_le32(arg->n_bssids); 1452 cmd->ie_len = __cpu_to_le32(arg->ie_len); 1453 cmd->num_probes = __cpu_to_le32(3); 1454 1455 /* FIXME: There are some scan flag inconsistencies across firmwares, 1456 * e.g. WMI-TLV inverts the logic behind the following flag. 1457 */ 1458 cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); 1459 1460 ptr += sizeof(*tlv); 1461 ptr += sizeof(*cmd); 1462 1463 tlv = ptr; 1464 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 1465 tlv->len = __cpu_to_le16(chan_len); 1466 chans = (void *)tlv->value; 1467 for (i = 0; i < arg->n_channels; i++) 1468 chans[i] = __cpu_to_le32(arg->channels[i]); 1469 1470 ptr += sizeof(*tlv); 1471 ptr += chan_len; 1472 1473 tlv = ptr; 1474 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT); 1475 tlv->len = __cpu_to_le16(ssid_len); 1476 ssids = (void *)tlv->value; 1477 for (i = 0; i < arg->n_ssids; i++) { 1478 ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len); 1479 memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len); 1480 } 1481 1482 ptr += sizeof(*tlv); 1483 ptr += ssid_len; 1484 1485 tlv = ptr; 1486 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT); 1487 tlv->len = __cpu_to_le16(bssid_len); 1488 addrs = (void *)tlv->value; 1489 for (i = 0; i < arg->n_bssids; i++) 1490 ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid); 1491 1492 ptr += sizeof(*tlv); 1493 ptr += bssid_len; 1494 1495 tlv = ptr; 1496 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 1497 tlv->len = __cpu_to_le16(ie_len); 1498 memcpy(tlv->value, arg->ie, arg->ie_len); 1499 1500 ptr += sizeof(*tlv); 1501 ptr += ie_len; 1502 1503 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n"); 1504 return skb; 1505 } 1506 1507 static struct sk_buff * 1508 ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar, 1509 const struct wmi_stop_scan_arg *arg) 1510 { 1511 struct wmi_stop_scan_cmd *cmd; 1512 struct wmi_tlv *tlv; 1513 struct sk_buff *skb; 1514 u32 scan_id; 1515 u32 req_id; 1516 1517 if (arg->req_id > 0xFFF) 1518 return ERR_PTR(-EINVAL); 1519 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 1520 return ERR_PTR(-EINVAL); 1521 1522 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1523 if (!skb) 1524 return ERR_PTR(-ENOMEM); 1525 1526 scan_id = arg->u.scan_id; 1527 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; 1528 1529 req_id = arg->req_id; 1530 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 1531 1532 tlv = (void *)skb->data; 1533 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD); 1534 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1535 cmd = (void *)tlv->value; 1536 cmd->req_type = __cpu_to_le32(arg->req_type); 1537 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); 1538 cmd->scan_id = __cpu_to_le32(scan_id); 1539 cmd->scan_req_id = __cpu_to_le32(req_id); 1540 1541 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n"); 1542 return skb; 1543 } 1544 1545 static struct sk_buff * 1546 ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar, 1547 u32 vdev_id, 1548 enum wmi_vdev_type vdev_type, 1549 enum wmi_vdev_subtype vdev_subtype, 1550 const u8 mac_addr[ETH_ALEN]) 1551 { 1552 struct wmi_vdev_create_cmd *cmd; 1553 struct wmi_tlv *tlv; 1554 struct sk_buff *skb; 1555 1556 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1557 if (!skb) 1558 return ERR_PTR(-ENOMEM); 1559 1560 tlv = (void *)skb->data; 1561 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD); 1562 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1563 cmd = (void *)tlv->value; 1564 cmd->vdev_id = __cpu_to_le32(vdev_id); 1565 cmd->vdev_type = __cpu_to_le32(vdev_type); 1566 cmd->vdev_subtype = __cpu_to_le32(vdev_subtype); 1567 ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr); 1568 1569 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n"); 1570 return skb; 1571 } 1572 1573 static struct sk_buff * 1574 ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id) 1575 { 1576 struct wmi_vdev_delete_cmd *cmd; 1577 struct wmi_tlv *tlv; 1578 struct sk_buff *skb; 1579 1580 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1581 if (!skb) 1582 return ERR_PTR(-ENOMEM); 1583 1584 tlv = (void *)skb->data; 1585 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD); 1586 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1587 cmd = (void *)tlv->value; 1588 cmd->vdev_id = __cpu_to_le32(vdev_id); 1589 1590 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n"); 1591 return skb; 1592 } 1593 1594 static struct sk_buff * 1595 ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar, 1596 const struct wmi_vdev_start_request_arg *arg, 1597 bool restart) 1598 { 1599 struct wmi_tlv_vdev_start_cmd *cmd; 1600 struct wmi_channel *ch; 1601 struct wmi_p2p_noa_descriptor *noa; 1602 struct wmi_tlv *tlv; 1603 struct sk_buff *skb; 1604 size_t len; 1605 void *ptr; 1606 u32 flags = 0; 1607 1608 if (WARN_ON(arg->hidden_ssid && !arg->ssid)) 1609 return ERR_PTR(-EINVAL); 1610 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 1611 return ERR_PTR(-EINVAL); 1612 1613 len = (sizeof(*tlv) + sizeof(*cmd)) + 1614 (sizeof(*tlv) + sizeof(*ch)) + 1615 (sizeof(*tlv) + 0); 1616 skb = ath10k_wmi_alloc_skb(ar, len); 1617 if (!skb) 1618 return ERR_PTR(-ENOMEM); 1619 1620 if (arg->hidden_ssid) 1621 flags |= WMI_VDEV_START_HIDDEN_SSID; 1622 if (arg->pmf_enabled) 1623 flags |= WMI_VDEV_START_PMF_ENABLED; 1624 1625 ptr = (void *)skb->data; 1626 1627 tlv = ptr; 1628 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD); 1629 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1630 cmd = (void *)tlv->value; 1631 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 1632 cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval); 1633 cmd->dtim_period = __cpu_to_le32(arg->dtim_period); 1634 cmd->flags = __cpu_to_le32(flags); 1635 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); 1636 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); 1637 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); 1638 1639 if (arg->ssid) { 1640 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); 1641 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 1642 } 1643 1644 ptr += sizeof(*tlv); 1645 ptr += sizeof(*cmd); 1646 1647 tlv = ptr; 1648 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); 1649 tlv->len = __cpu_to_le16(sizeof(*ch)); 1650 ch = (void *)tlv->value; 1651 ath10k_wmi_put_wmi_channel(ch, &arg->channel); 1652 1653 ptr += sizeof(*tlv); 1654 ptr += sizeof(*ch); 1655 1656 tlv = ptr; 1657 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 1658 tlv->len = 0; 1659 noa = (void *)tlv->value; 1660 1661 /* Note: This is a nested TLV containing: 1662 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. 1663 */ 1664 1665 ptr += sizeof(*tlv); 1666 ptr += 0; 1667 1668 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n"); 1669 return skb; 1670 } 1671 1672 static struct sk_buff * 1673 ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id) 1674 { 1675 struct wmi_vdev_stop_cmd *cmd; 1676 struct wmi_tlv *tlv; 1677 struct sk_buff *skb; 1678 1679 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1680 if (!skb) 1681 return ERR_PTR(-ENOMEM); 1682 1683 tlv = (void *)skb->data; 1684 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD); 1685 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1686 cmd = (void *)tlv->value; 1687 cmd->vdev_id = __cpu_to_le32(vdev_id); 1688 1689 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n"); 1690 return skb; 1691 } 1692 1693 static struct sk_buff * 1694 ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, 1695 const u8 *bssid) 1696 1697 { 1698 struct wmi_vdev_up_cmd *cmd; 1699 struct wmi_tlv *tlv; 1700 struct sk_buff *skb; 1701 1702 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1703 if (!skb) 1704 return ERR_PTR(-ENOMEM); 1705 1706 tlv = (void *)skb->data; 1707 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD); 1708 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1709 cmd = (void *)tlv->value; 1710 cmd->vdev_id = __cpu_to_le32(vdev_id); 1711 cmd->vdev_assoc_id = __cpu_to_le32(aid); 1712 ether_addr_copy(cmd->vdev_bssid.addr, bssid); 1713 1714 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n"); 1715 return skb; 1716 } 1717 1718 static struct sk_buff * 1719 ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id) 1720 { 1721 struct wmi_vdev_down_cmd *cmd; 1722 struct wmi_tlv *tlv; 1723 struct sk_buff *skb; 1724 1725 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1726 if (!skb) 1727 return ERR_PTR(-ENOMEM); 1728 1729 tlv = (void *)skb->data; 1730 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD); 1731 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1732 cmd = (void *)tlv->value; 1733 cmd->vdev_id = __cpu_to_le32(vdev_id); 1734 1735 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n"); 1736 return skb; 1737 } 1738 1739 static struct sk_buff * 1740 ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, 1741 u32 param_id, u32 param_value) 1742 { 1743 struct wmi_vdev_set_param_cmd *cmd; 1744 struct wmi_tlv *tlv; 1745 struct sk_buff *skb; 1746 1747 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1748 if (!skb) 1749 return ERR_PTR(-ENOMEM); 1750 1751 tlv = (void *)skb->data; 1752 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD); 1753 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1754 cmd = (void *)tlv->value; 1755 cmd->vdev_id = __cpu_to_le32(vdev_id); 1756 cmd->param_id = __cpu_to_le32(param_id); 1757 cmd->param_value = __cpu_to_le32(param_value); 1758 1759 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n"); 1760 return skb; 1761 } 1762 1763 static struct sk_buff * 1764 ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar, 1765 const struct wmi_vdev_install_key_arg *arg) 1766 { 1767 struct wmi_vdev_install_key_cmd *cmd; 1768 struct wmi_tlv *tlv; 1769 struct sk_buff *skb; 1770 size_t len; 1771 void *ptr; 1772 1773 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) 1774 return ERR_PTR(-EINVAL); 1775 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) 1776 return ERR_PTR(-EINVAL); 1777 1778 len = sizeof(*tlv) + sizeof(*cmd) + 1779 sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32)); 1780 skb = ath10k_wmi_alloc_skb(ar, len); 1781 if (!skb) 1782 return ERR_PTR(-ENOMEM); 1783 1784 ptr = (void *)skb->data; 1785 tlv = ptr; 1786 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD); 1787 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1788 cmd = (void *)tlv->value; 1789 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 1790 cmd->key_idx = __cpu_to_le32(arg->key_idx); 1791 cmd->key_flags = __cpu_to_le32(arg->key_flags); 1792 cmd->key_cipher = __cpu_to_le32(arg->key_cipher); 1793 cmd->key_len = __cpu_to_le32(arg->key_len); 1794 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); 1795 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); 1796 1797 if (arg->macaddr) 1798 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 1799 1800 ptr += sizeof(*tlv); 1801 ptr += sizeof(*cmd); 1802 1803 tlv = ptr; 1804 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 1805 tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32))); 1806 if (arg->key_data) 1807 memcpy(tlv->value, arg->key_data, arg->key_len); 1808 1809 ptr += sizeof(*tlv); 1810 ptr += roundup(arg->key_len, sizeof(__le32)); 1811 1812 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n"); 1813 return skb; 1814 } 1815 1816 static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr, 1817 const struct wmi_sta_uapsd_auto_trig_arg *arg) 1818 { 1819 struct wmi_sta_uapsd_auto_trig_param *ac; 1820 struct wmi_tlv *tlv; 1821 1822 tlv = ptr; 1823 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM); 1824 tlv->len = __cpu_to_le16(sizeof(*ac)); 1825 ac = (void *)tlv->value; 1826 1827 ac->wmm_ac = __cpu_to_le32(arg->wmm_ac); 1828 ac->user_priority = __cpu_to_le32(arg->user_priority); 1829 ac->service_interval = __cpu_to_le32(arg->service_interval); 1830 ac->suspend_interval = __cpu_to_le32(arg->suspend_interval); 1831 ac->delay_interval = __cpu_to_le32(arg->delay_interval); 1832 1833 ath10k_dbg(ar, ATH10K_DBG_WMI, 1834 "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n", 1835 ac->wmm_ac, ac->user_priority, ac->service_interval, 1836 ac->suspend_interval, ac->delay_interval); 1837 1838 return ptr + sizeof(*tlv) + sizeof(*ac); 1839 } 1840 1841 static struct sk_buff * 1842 ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 1843 const u8 peer_addr[ETH_ALEN], 1844 const struct wmi_sta_uapsd_auto_trig_arg *args, 1845 u32 num_ac) 1846 { 1847 struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd; 1848 struct wmi_sta_uapsd_auto_trig_param *ac; 1849 struct wmi_tlv *tlv; 1850 struct sk_buff *skb; 1851 size_t len; 1852 size_t ac_tlv_len; 1853 void *ptr; 1854 int i; 1855 1856 ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac)); 1857 len = sizeof(*tlv) + sizeof(*cmd) + 1858 sizeof(*tlv) + ac_tlv_len; 1859 skb = ath10k_wmi_alloc_skb(ar, len); 1860 if (!skb) 1861 return ERR_PTR(-ENOMEM); 1862 1863 ptr = (void *)skb->data; 1864 tlv = ptr; 1865 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD); 1866 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1867 cmd = (void *)tlv->value; 1868 cmd->vdev_id = __cpu_to_le32(vdev_id); 1869 cmd->num_ac = __cpu_to_le32(num_ac); 1870 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1871 1872 ptr += sizeof(*tlv); 1873 ptr += sizeof(*cmd); 1874 1875 tlv = ptr; 1876 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 1877 tlv->len = __cpu_to_le16(ac_tlv_len); 1878 ac = (void *)tlv->value; 1879 1880 ptr += sizeof(*tlv); 1881 for (i = 0; i < num_ac; i++) 1882 ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]); 1883 1884 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n"); 1885 return skb; 1886 } 1887 1888 static void *ath10k_wmi_tlv_put_wmm(void *ptr, 1889 const struct wmi_wmm_params_arg *arg) 1890 { 1891 struct wmi_wmm_params *wmm; 1892 struct wmi_tlv *tlv; 1893 1894 tlv = ptr; 1895 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS); 1896 tlv->len = __cpu_to_le16(sizeof(*wmm)); 1897 wmm = (void *)tlv->value; 1898 ath10k_wmi_set_wmm_param(wmm, arg); 1899 1900 return ptr + sizeof(*tlv) + sizeof(*wmm); 1901 } 1902 1903 static struct sk_buff * 1904 ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 1905 const struct wmi_wmm_params_all_arg *arg) 1906 { 1907 struct wmi_tlv_vdev_set_wmm_cmd *cmd; 1908 struct wmi_tlv *tlv; 1909 struct sk_buff *skb; 1910 size_t len; 1911 void *ptr; 1912 1913 len = sizeof(*tlv) + sizeof(*cmd); 1914 skb = ath10k_wmi_alloc_skb(ar, len); 1915 if (!skb) 1916 return ERR_PTR(-ENOMEM); 1917 1918 ptr = (void *)skb->data; 1919 tlv = ptr; 1920 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD); 1921 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1922 cmd = (void *)tlv->value; 1923 cmd->vdev_id = __cpu_to_le32(vdev_id); 1924 1925 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be); 1926 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk); 1927 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi); 1928 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo); 1929 1930 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n"); 1931 return skb; 1932 } 1933 1934 static struct sk_buff * 1935 ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar, 1936 const struct wmi_sta_keepalive_arg *arg) 1937 { 1938 struct wmi_tlv_sta_keepalive_cmd *cmd; 1939 struct wmi_sta_keepalive_arp_resp *arp; 1940 struct sk_buff *skb; 1941 struct wmi_tlv *tlv; 1942 void *ptr; 1943 size_t len; 1944 1945 len = sizeof(*tlv) + sizeof(*cmd) + 1946 sizeof(*tlv) + sizeof(*arp); 1947 skb = ath10k_wmi_alloc_skb(ar, len); 1948 if (!skb) 1949 return ERR_PTR(-ENOMEM); 1950 1951 ptr = (void *)skb->data; 1952 tlv = ptr; 1953 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD); 1954 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1955 cmd = (void *)tlv->value; 1956 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 1957 cmd->enabled = __cpu_to_le32(arg->enabled); 1958 cmd->method = __cpu_to_le32(arg->method); 1959 cmd->interval = __cpu_to_le32(arg->interval); 1960 1961 ptr += sizeof(*tlv); 1962 ptr += sizeof(*cmd); 1963 1964 tlv = ptr; 1965 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE); 1966 tlv->len = __cpu_to_le16(sizeof(*arp)); 1967 arp = (void *)tlv->value; 1968 1969 arp->src_ip4_addr = arg->src_ip4_addr; 1970 arp->dest_ip4_addr = arg->dest_ip4_addr; 1971 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); 1972 1973 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n", 1974 arg->vdev_id, arg->enabled, arg->method, arg->interval); 1975 return skb; 1976 } 1977 1978 static struct sk_buff * 1979 ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, 1980 const u8 peer_addr[ETH_ALEN], 1981 enum wmi_peer_type peer_type) 1982 { 1983 struct wmi_tlv_peer_create_cmd *cmd; 1984 struct wmi_tlv *tlv; 1985 struct sk_buff *skb; 1986 1987 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1988 if (!skb) 1989 return ERR_PTR(-ENOMEM); 1990 1991 tlv = (void *)skb->data; 1992 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD); 1993 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1994 cmd = (void *)tlv->value; 1995 cmd->vdev_id = __cpu_to_le32(vdev_id); 1996 cmd->peer_type = __cpu_to_le32(peer_type); 1997 ether_addr_copy(cmd->peer_addr.addr, peer_addr); 1998 1999 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n"); 2000 return skb; 2001 } 2002 2003 static struct sk_buff * 2004 ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id, 2005 const u8 peer_addr[ETH_ALEN]) 2006 { 2007 struct wmi_peer_delete_cmd *cmd; 2008 struct wmi_tlv *tlv; 2009 struct sk_buff *skb; 2010 2011 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2012 if (!skb) 2013 return ERR_PTR(-ENOMEM); 2014 2015 tlv = (void *)skb->data; 2016 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD); 2017 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2018 cmd = (void *)tlv->value; 2019 cmd->vdev_id = __cpu_to_le32(vdev_id); 2020 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 2021 2022 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n"); 2023 return skb; 2024 } 2025 2026 static struct sk_buff * 2027 ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id, 2028 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 2029 { 2030 struct wmi_peer_flush_tids_cmd *cmd; 2031 struct wmi_tlv *tlv; 2032 struct sk_buff *skb; 2033 2034 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2035 if (!skb) 2036 return ERR_PTR(-ENOMEM); 2037 2038 tlv = (void *)skb->data; 2039 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD); 2040 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2041 cmd = (void *)tlv->value; 2042 cmd->vdev_id = __cpu_to_le32(vdev_id); 2043 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); 2044 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 2045 2046 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n"); 2047 return skb; 2048 } 2049 2050 static struct sk_buff * 2051 ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, 2052 const u8 *peer_addr, 2053 enum wmi_peer_param param_id, 2054 u32 param_value) 2055 { 2056 struct wmi_peer_set_param_cmd *cmd; 2057 struct wmi_tlv *tlv; 2058 struct sk_buff *skb; 2059 2060 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2061 if (!skb) 2062 return ERR_PTR(-ENOMEM); 2063 2064 tlv = (void *)skb->data; 2065 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD); 2066 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2067 cmd = (void *)tlv->value; 2068 cmd->vdev_id = __cpu_to_le32(vdev_id); 2069 cmd->param_id = __cpu_to_le32(param_id); 2070 cmd->param_value = __cpu_to_le32(param_value); 2071 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 2072 2073 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n"); 2074 return skb; 2075 } 2076 2077 static struct sk_buff * 2078 ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar, 2079 const struct wmi_peer_assoc_complete_arg *arg) 2080 { 2081 struct wmi_tlv_peer_assoc_cmd *cmd; 2082 struct wmi_vht_rate_set *vht_rate; 2083 struct wmi_tlv *tlv; 2084 struct sk_buff *skb; 2085 size_t len, legacy_rate_len, ht_rate_len; 2086 void *ptr; 2087 2088 if (arg->peer_mpdu_density > 16) 2089 return ERR_PTR(-EINVAL); 2090 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) 2091 return ERR_PTR(-EINVAL); 2092 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) 2093 return ERR_PTR(-EINVAL); 2094 2095 legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates, 2096 sizeof(__le32)); 2097 ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32)); 2098 len = (sizeof(*tlv) + sizeof(*cmd)) + 2099 (sizeof(*tlv) + legacy_rate_len) + 2100 (sizeof(*tlv) + ht_rate_len) + 2101 (sizeof(*tlv) + sizeof(*vht_rate)); 2102 skb = ath10k_wmi_alloc_skb(ar, len); 2103 if (!skb) 2104 return ERR_PTR(-ENOMEM); 2105 2106 ptr = (void *)skb->data; 2107 tlv = ptr; 2108 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD); 2109 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2110 cmd = (void *)tlv->value; 2111 2112 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 2113 cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); 2114 cmd->assoc_id = __cpu_to_le32(arg->peer_aid); 2115 cmd->flags = __cpu_to_le32(arg->peer_flags); 2116 cmd->caps = __cpu_to_le32(arg->peer_caps); 2117 cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval); 2118 cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps); 2119 cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); 2120 cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); 2121 cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps); 2122 cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams); 2123 cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps); 2124 cmd->phy_mode = __cpu_to_le32(arg->peer_phymode); 2125 cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates); 2126 cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates); 2127 ether_addr_copy(cmd->mac_addr.addr, arg->addr); 2128 2129 ptr += sizeof(*tlv); 2130 ptr += sizeof(*cmd); 2131 2132 tlv = ptr; 2133 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2134 tlv->len = __cpu_to_le16(legacy_rate_len); 2135 memcpy(tlv->value, arg->peer_legacy_rates.rates, 2136 arg->peer_legacy_rates.num_rates); 2137 2138 ptr += sizeof(*tlv); 2139 ptr += legacy_rate_len; 2140 2141 tlv = ptr; 2142 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2143 tlv->len = __cpu_to_le16(ht_rate_len); 2144 memcpy(tlv->value, arg->peer_ht_rates.rates, 2145 arg->peer_ht_rates.num_rates); 2146 2147 ptr += sizeof(*tlv); 2148 ptr += ht_rate_len; 2149 2150 tlv = ptr; 2151 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET); 2152 tlv->len = __cpu_to_le16(sizeof(*vht_rate)); 2153 vht_rate = (void *)tlv->value; 2154 2155 vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); 2156 vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); 2157 vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); 2158 vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 2159 2160 ptr += sizeof(*tlv); 2161 ptr += sizeof(*vht_rate); 2162 2163 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n"); 2164 return skb; 2165 } 2166 2167 static struct sk_buff * 2168 ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, 2169 enum wmi_sta_ps_mode psmode) 2170 { 2171 struct wmi_sta_powersave_mode_cmd *cmd; 2172 struct wmi_tlv *tlv; 2173 struct sk_buff *skb; 2174 2175 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2176 if (!skb) 2177 return ERR_PTR(-ENOMEM); 2178 2179 tlv = (void *)skb->data; 2180 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD); 2181 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2182 cmd = (void *)tlv->value; 2183 cmd->vdev_id = __cpu_to_le32(vdev_id); 2184 cmd->sta_ps_mode = __cpu_to_le32(psmode); 2185 2186 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n"); 2187 return skb; 2188 } 2189 2190 static struct sk_buff * 2191 ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, 2192 enum wmi_sta_powersave_param param_id, 2193 u32 param_value) 2194 { 2195 struct wmi_sta_powersave_param_cmd *cmd; 2196 struct wmi_tlv *tlv; 2197 struct sk_buff *skb; 2198 2199 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2200 if (!skb) 2201 return ERR_PTR(-ENOMEM); 2202 2203 tlv = (void *)skb->data; 2204 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD); 2205 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2206 cmd = (void *)tlv->value; 2207 cmd->vdev_id = __cpu_to_le32(vdev_id); 2208 cmd->param_id = __cpu_to_le32(param_id); 2209 cmd->param_value = __cpu_to_le32(param_value); 2210 2211 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n"); 2212 return skb; 2213 } 2214 2215 static struct sk_buff * 2216 ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, 2217 enum wmi_ap_ps_peer_param param_id, u32 value) 2218 { 2219 struct wmi_ap_ps_peer_cmd *cmd; 2220 struct wmi_tlv *tlv; 2221 struct sk_buff *skb; 2222 2223 if (!mac) 2224 return ERR_PTR(-EINVAL); 2225 2226 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2227 if (!skb) 2228 return ERR_PTR(-ENOMEM); 2229 2230 tlv = (void *)skb->data; 2231 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD); 2232 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2233 cmd = (void *)tlv->value; 2234 cmd->vdev_id = __cpu_to_le32(vdev_id); 2235 cmd->param_id = __cpu_to_le32(param_id); 2236 cmd->param_value = __cpu_to_le32(value); 2237 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2238 2239 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n"); 2240 return skb; 2241 } 2242 2243 static struct sk_buff * 2244 ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, 2245 const struct wmi_scan_chan_list_arg *arg) 2246 { 2247 struct wmi_tlv_scan_chan_list_cmd *cmd; 2248 struct wmi_channel *ci; 2249 struct wmi_channel_arg *ch; 2250 struct wmi_tlv *tlv; 2251 struct sk_buff *skb; 2252 size_t chans_len, len; 2253 int i; 2254 void *ptr, *chans; 2255 2256 chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci)); 2257 len = (sizeof(*tlv) + sizeof(*cmd)) + 2258 (sizeof(*tlv) + chans_len); 2259 2260 skb = ath10k_wmi_alloc_skb(ar, len); 2261 if (!skb) 2262 return ERR_PTR(-ENOMEM); 2263 2264 ptr = (void *)skb->data; 2265 tlv = ptr; 2266 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD); 2267 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2268 cmd = (void *)tlv->value; 2269 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); 2270 2271 ptr += sizeof(*tlv); 2272 ptr += sizeof(*cmd); 2273 2274 tlv = ptr; 2275 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2276 tlv->len = __cpu_to_le16(chans_len); 2277 chans = (void *)tlv->value; 2278 2279 for (i = 0; i < arg->n_channels; i++) { 2280 ch = &arg->channels[i]; 2281 2282 tlv = chans; 2283 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); 2284 tlv->len = __cpu_to_le16(sizeof(*ci)); 2285 ci = (void *)tlv->value; 2286 2287 ath10k_wmi_put_wmi_channel(ci, ch); 2288 2289 chans += sizeof(*tlv); 2290 chans += sizeof(*ci); 2291 } 2292 2293 ptr += sizeof(*tlv); 2294 ptr += chans_len; 2295 2296 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n"); 2297 return skb; 2298 } 2299 2300 static struct sk_buff * 2301 ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, 2302 const void *bcn, size_t bcn_len, 2303 u32 bcn_paddr, bool dtim_zero, 2304 bool deliver_cab) 2305 2306 { 2307 struct wmi_bcn_tx_ref_cmd *cmd; 2308 struct wmi_tlv *tlv; 2309 struct sk_buff *skb; 2310 struct ieee80211_hdr *hdr; 2311 u16 fc; 2312 2313 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2314 if (!skb) 2315 return ERR_PTR(-ENOMEM); 2316 2317 hdr = (struct ieee80211_hdr *)bcn; 2318 fc = le16_to_cpu(hdr->frame_control); 2319 2320 tlv = (void *)skb->data; 2321 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD); 2322 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2323 cmd = (void *)tlv->value; 2324 cmd->vdev_id = __cpu_to_le32(vdev_id); 2325 cmd->data_len = __cpu_to_le32(bcn_len); 2326 cmd->data_ptr = __cpu_to_le32(bcn_paddr); 2327 cmd->msdu_id = 0; 2328 cmd->frame_control = __cpu_to_le32(fc); 2329 cmd->flags = 0; 2330 2331 if (dtim_zero) 2332 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); 2333 2334 if (deliver_cab) 2335 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); 2336 2337 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n"); 2338 return skb; 2339 } 2340 2341 static struct sk_buff * 2342 ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, 2343 const struct wmi_wmm_params_all_arg *arg) 2344 { 2345 struct wmi_tlv_pdev_set_wmm_cmd *cmd; 2346 struct wmi_wmm_params *wmm; 2347 struct wmi_tlv *tlv; 2348 struct sk_buff *skb; 2349 size_t len; 2350 void *ptr; 2351 2352 len = (sizeof(*tlv) + sizeof(*cmd)) + 2353 (4 * (sizeof(*tlv) + sizeof(*wmm))); 2354 skb = ath10k_wmi_alloc_skb(ar, len); 2355 if (!skb) 2356 return ERR_PTR(-ENOMEM); 2357 2358 ptr = (void *)skb->data; 2359 2360 tlv = ptr; 2361 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD); 2362 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2363 cmd = (void *)tlv->value; 2364 2365 /* nothing to set here */ 2366 2367 ptr += sizeof(*tlv); 2368 ptr += sizeof(*cmd); 2369 2370 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be); 2371 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk); 2372 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi); 2373 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo); 2374 2375 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n"); 2376 return skb; 2377 } 2378 2379 static struct sk_buff * 2380 ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) 2381 { 2382 struct wmi_request_stats_cmd *cmd; 2383 struct wmi_tlv *tlv; 2384 struct sk_buff *skb; 2385 2386 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2387 if (!skb) 2388 return ERR_PTR(-ENOMEM); 2389 2390 tlv = (void *)skb->data; 2391 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD); 2392 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2393 cmd = (void *)tlv->value; 2394 cmd->stats_id = __cpu_to_le32(stats_mask); 2395 2396 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n"); 2397 return skb; 2398 } 2399 2400 static struct sk_buff * 2401 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, 2402 enum wmi_force_fw_hang_type type, 2403 u32 delay_ms) 2404 { 2405 struct wmi_force_fw_hang_cmd *cmd; 2406 struct wmi_tlv *tlv; 2407 struct sk_buff *skb; 2408 2409 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2410 if (!skb) 2411 return ERR_PTR(-ENOMEM); 2412 2413 tlv = (void *)skb->data; 2414 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD); 2415 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2416 cmd = (void *)tlv->value; 2417 cmd->type = __cpu_to_le32(type); 2418 cmd->delay_ms = __cpu_to_le32(delay_ms); 2419 2420 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n"); 2421 return skb; 2422 } 2423 2424 static struct sk_buff * 2425 ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable, 2426 u32 log_level) { 2427 struct wmi_tlv_dbglog_cmd *cmd; 2428 struct wmi_tlv *tlv; 2429 struct sk_buff *skb; 2430 size_t len, bmap_len; 2431 u32 value; 2432 void *ptr; 2433 2434 if (module_enable) { 2435 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE( 2436 module_enable, 2437 WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE); 2438 } else { 2439 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE( 2440 WMI_TLV_DBGLOG_ALL_MODULES, 2441 WMI_TLV_DBGLOG_LOG_LEVEL_WARN); 2442 } 2443 2444 bmap_len = 0; 2445 len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len; 2446 skb = ath10k_wmi_alloc_skb(ar, len); 2447 if (!skb) 2448 return ERR_PTR(-ENOMEM); 2449 2450 ptr = (void *)skb->data; 2451 2452 tlv = ptr; 2453 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD); 2454 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2455 cmd = (void *)tlv->value; 2456 cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL); 2457 cmd->value = __cpu_to_le32(value); 2458 2459 ptr += sizeof(*tlv); 2460 ptr += sizeof(*cmd); 2461 2462 tlv = ptr; 2463 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 2464 tlv->len = __cpu_to_le16(bmap_len); 2465 2466 /* nothing to do here */ 2467 2468 ptr += sizeof(*tlv); 2469 ptr += sizeof(bmap_len); 2470 2471 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value); 2472 return skb; 2473 } 2474 2475 static struct sk_buff * 2476 ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter) 2477 { 2478 struct wmi_tlv_pktlog_enable *cmd; 2479 struct wmi_tlv *tlv; 2480 struct sk_buff *skb; 2481 void *ptr; 2482 size_t len; 2483 2484 len = sizeof(*tlv) + sizeof(*cmd); 2485 skb = ath10k_wmi_alloc_skb(ar, len); 2486 if (!skb) 2487 return ERR_PTR(-ENOMEM); 2488 2489 ptr = (void *)skb->data; 2490 tlv = ptr; 2491 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD); 2492 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2493 cmd = (void *)tlv->value; 2494 cmd->filter = __cpu_to_le32(filter); 2495 2496 ptr += sizeof(*tlv); 2497 ptr += sizeof(*cmd); 2498 2499 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n", 2500 filter); 2501 return skb; 2502 } 2503 2504 static struct sk_buff * 2505 ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar) 2506 { 2507 struct wmi_tlv_pktlog_disable *cmd; 2508 struct wmi_tlv *tlv; 2509 struct sk_buff *skb; 2510 void *ptr; 2511 size_t len; 2512 2513 len = sizeof(*tlv) + sizeof(*cmd); 2514 skb = ath10k_wmi_alloc_skb(ar, len); 2515 if (!skb) 2516 return ERR_PTR(-ENOMEM); 2517 2518 ptr = (void *)skb->data; 2519 tlv = ptr; 2520 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD); 2521 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2522 cmd = (void *)tlv->value; 2523 2524 ptr += sizeof(*tlv); 2525 ptr += sizeof(*cmd); 2526 2527 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n"); 2528 return skb; 2529 } 2530 2531 static struct sk_buff * 2532 ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id, 2533 u32 tim_ie_offset, struct sk_buff *bcn, 2534 u32 prb_caps, u32 prb_erp, void *prb_ies, 2535 size_t prb_ies_len) 2536 { 2537 struct wmi_tlv_bcn_tmpl_cmd *cmd; 2538 struct wmi_tlv_bcn_prb_info *info; 2539 struct wmi_tlv *tlv; 2540 struct sk_buff *skb; 2541 void *ptr; 2542 size_t len; 2543 2544 if (WARN_ON(prb_ies_len > 0 && !prb_ies)) 2545 return ERR_PTR(-EINVAL); 2546 2547 len = sizeof(*tlv) + sizeof(*cmd) + 2548 sizeof(*tlv) + sizeof(*info) + prb_ies_len + 2549 sizeof(*tlv) + roundup(bcn->len, 4); 2550 skb = ath10k_wmi_alloc_skb(ar, len); 2551 if (!skb) 2552 return ERR_PTR(-ENOMEM); 2553 2554 ptr = (void *)skb->data; 2555 tlv = ptr; 2556 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD); 2557 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2558 cmd = (void *)tlv->value; 2559 cmd->vdev_id = __cpu_to_le32(vdev_id); 2560 cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset); 2561 cmd->buf_len = __cpu_to_le32(bcn->len); 2562 2563 ptr += sizeof(*tlv); 2564 ptr += sizeof(*cmd); 2565 2566 /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but 2567 * then it is then impossible to pass original ie len. 2568 * This chunk is not used yet so if setting probe resp template yields 2569 * problems with beaconing or crashes firmware look here. 2570 */ 2571 tlv = ptr; 2572 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); 2573 tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len); 2574 info = (void *)tlv->value; 2575 info->caps = __cpu_to_le32(prb_caps); 2576 info->erp = __cpu_to_le32(prb_erp); 2577 memcpy(info->ies, prb_ies, prb_ies_len); 2578 2579 ptr += sizeof(*tlv); 2580 ptr += sizeof(*info); 2581 ptr += prb_ies_len; 2582 2583 tlv = ptr; 2584 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2585 tlv->len = __cpu_to_le16(roundup(bcn->len, 4)); 2586 memcpy(tlv->value, bcn->data, bcn->len); 2587 2588 /* FIXME: Adjust TSF? */ 2589 2590 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n", 2591 vdev_id); 2592 return skb; 2593 } 2594 2595 static struct sk_buff * 2596 ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id, 2597 struct sk_buff *prb) 2598 { 2599 struct wmi_tlv_prb_tmpl_cmd *cmd; 2600 struct wmi_tlv_bcn_prb_info *info; 2601 struct wmi_tlv *tlv; 2602 struct sk_buff *skb; 2603 void *ptr; 2604 size_t len; 2605 2606 len = sizeof(*tlv) + sizeof(*cmd) + 2607 sizeof(*tlv) + sizeof(*info) + 2608 sizeof(*tlv) + roundup(prb->len, 4); 2609 skb = ath10k_wmi_alloc_skb(ar, len); 2610 if (!skb) 2611 return ERR_PTR(-ENOMEM); 2612 2613 ptr = (void *)skb->data; 2614 tlv = ptr; 2615 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD); 2616 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2617 cmd = (void *)tlv->value; 2618 cmd->vdev_id = __cpu_to_le32(vdev_id); 2619 cmd->buf_len = __cpu_to_le32(prb->len); 2620 2621 ptr += sizeof(*tlv); 2622 ptr += sizeof(*cmd); 2623 2624 tlv = ptr; 2625 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); 2626 tlv->len = __cpu_to_le16(sizeof(*info)); 2627 info = (void *)tlv->value; 2628 info->caps = 0; 2629 info->erp = 0; 2630 2631 ptr += sizeof(*tlv); 2632 ptr += sizeof(*info); 2633 2634 tlv = ptr; 2635 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2636 tlv->len = __cpu_to_le16(roundup(prb->len, 4)); 2637 memcpy(tlv->value, prb->data, prb->len); 2638 2639 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n", 2640 vdev_id); 2641 return skb; 2642 } 2643 2644 static struct sk_buff * 2645 ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, 2646 const u8 *p2p_ie) 2647 { 2648 struct wmi_tlv_p2p_go_bcn_ie *cmd; 2649 struct wmi_tlv *tlv; 2650 struct sk_buff *skb; 2651 void *ptr; 2652 size_t len; 2653 2654 len = sizeof(*tlv) + sizeof(*cmd) + 2655 sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4); 2656 skb = ath10k_wmi_alloc_skb(ar, len); 2657 if (!skb) 2658 return ERR_PTR(-ENOMEM); 2659 2660 ptr = (void *)skb->data; 2661 tlv = ptr; 2662 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE); 2663 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2664 cmd = (void *)tlv->value; 2665 cmd->vdev_id = __cpu_to_le32(vdev_id); 2666 cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2); 2667 2668 ptr += sizeof(*tlv); 2669 ptr += sizeof(*cmd); 2670 2671 tlv = ptr; 2672 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2673 tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4)); 2674 memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2); 2675 2676 ptr += sizeof(*tlv); 2677 ptr += roundup(p2p_ie[1] + 2, 4); 2678 2679 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n", 2680 vdev_id); 2681 return skb; 2682 } 2683 2684 static struct sk_buff * 2685 ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 2686 enum wmi_tdls_state state) 2687 { 2688 struct wmi_tdls_set_state_cmd *cmd; 2689 struct wmi_tlv *tlv; 2690 struct sk_buff *skb; 2691 void *ptr; 2692 size_t len; 2693 /* Set to options from wmi_tlv_tdls_options, 2694 * for now none of them are enabled. 2695 */ 2696 u32 options = 0; 2697 2698 len = sizeof(*tlv) + sizeof(*cmd); 2699 skb = ath10k_wmi_alloc_skb(ar, len); 2700 if (!skb) 2701 return ERR_PTR(-ENOMEM); 2702 2703 ptr = (void *)skb->data; 2704 tlv = ptr; 2705 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD); 2706 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2707 2708 cmd = (void *)tlv->value; 2709 cmd->vdev_id = __cpu_to_le32(vdev_id); 2710 cmd->state = __cpu_to_le32(state); 2711 cmd->notification_interval_ms = __cpu_to_le32(5000); 2712 cmd->tx_discovery_threshold = __cpu_to_le32(100); 2713 cmd->tx_teardown_threshold = __cpu_to_le32(5); 2714 cmd->rssi_teardown_threshold = __cpu_to_le32(-75); 2715 cmd->rssi_delta = __cpu_to_le32(-20); 2716 cmd->tdls_options = __cpu_to_le32(options); 2717 cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); 2718 cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); 2719 cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); 2720 cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); 2721 cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); 2722 2723 ptr += sizeof(*tlv); 2724 ptr += sizeof(*cmd); 2725 2726 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n", 2727 state, vdev_id); 2728 return skb; 2729 } 2730 2731 static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp) 2732 { 2733 u32 peer_qos = 0; 2734 2735 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2736 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO; 2737 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2738 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI; 2739 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2740 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK; 2741 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2742 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE; 2743 2744 peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP); 2745 2746 return peer_qos; 2747 } 2748 2749 static struct sk_buff * 2750 ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar, 2751 const struct wmi_tdls_peer_update_cmd_arg *arg, 2752 const struct wmi_tdls_peer_capab_arg *cap, 2753 const struct wmi_channel_arg *chan_arg) 2754 { 2755 struct wmi_tdls_peer_update_cmd *cmd; 2756 struct wmi_tdls_peer_capab *peer_cap; 2757 struct wmi_channel *chan; 2758 struct wmi_tlv *tlv; 2759 struct sk_buff *skb; 2760 u32 peer_qos; 2761 void *ptr; 2762 int len; 2763 int i; 2764 2765 len = sizeof(*tlv) + sizeof(*cmd) + 2766 sizeof(*tlv) + sizeof(*peer_cap) + 2767 sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan); 2768 2769 skb = ath10k_wmi_alloc_skb(ar, len); 2770 if (!skb) 2771 return ERR_PTR(-ENOMEM); 2772 2773 ptr = (void *)skb->data; 2774 tlv = ptr; 2775 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD); 2776 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2777 2778 cmd = (void *)tlv->value; 2779 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 2780 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); 2781 cmd->peer_state = __cpu_to_le32(arg->peer_state); 2782 2783 ptr += sizeof(*tlv); 2784 ptr += sizeof(*cmd); 2785 2786 tlv = ptr; 2787 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES); 2788 tlv->len = __cpu_to_le16(sizeof(*peer_cap)); 2789 peer_cap = (void *)tlv->value; 2790 peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues, 2791 cap->peer_max_sp); 2792 peer_cap->peer_qos = __cpu_to_le32(peer_qos); 2793 peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); 2794 peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); 2795 peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); 2796 peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); 2797 peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); 2798 peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); 2799 2800 for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) 2801 peer_cap->peer_operclass[i] = cap->peer_operclass[i]; 2802 2803 peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); 2804 peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); 2805 peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); 2806 2807 ptr += sizeof(*tlv); 2808 ptr += sizeof(*peer_cap); 2809 2810 tlv = ptr; 2811 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2812 tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan)); 2813 2814 ptr += sizeof(*tlv); 2815 2816 for (i = 0; i < cap->peer_chan_len; i++) { 2817 tlv = ptr; 2818 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); 2819 tlv->len = __cpu_to_le16(sizeof(*chan)); 2820 chan = (void *)tlv->value; 2821 ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]); 2822 2823 ptr += sizeof(*tlv); 2824 ptr += sizeof(*chan); 2825 } 2826 2827 ath10k_dbg(ar, ATH10K_DBG_WMI, 2828 "wmi tlv tdls peer update vdev %i state %d n_chans %u\n", 2829 arg->vdev_id, arg->peer_state, cap->peer_chan_len); 2830 return skb; 2831 } 2832 2833 static struct sk_buff * 2834 ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) 2835 { 2836 struct wmi_tlv_wow_enable_cmd *cmd; 2837 struct wmi_tlv *tlv; 2838 struct sk_buff *skb; 2839 size_t len; 2840 2841 len = sizeof(*tlv) + sizeof(*cmd); 2842 skb = ath10k_wmi_alloc_skb(ar, len); 2843 if (!skb) 2844 return ERR_PTR(-ENOMEM); 2845 2846 tlv = (struct wmi_tlv *)skb->data; 2847 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD); 2848 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2849 cmd = (void *)tlv->value; 2850 2851 cmd->enable = __cpu_to_le32(1); 2852 2853 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n"); 2854 return skb; 2855 } 2856 2857 static struct sk_buff * 2858 ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar, 2859 u32 vdev_id, 2860 enum wmi_wow_wakeup_event event, 2861 u32 enable) 2862 { 2863 struct wmi_tlv_wow_add_del_event_cmd *cmd; 2864 struct wmi_tlv *tlv; 2865 struct sk_buff *skb; 2866 size_t len; 2867 2868 len = sizeof(*tlv) + sizeof(*cmd); 2869 skb = ath10k_wmi_alloc_skb(ar, len); 2870 if (!skb) 2871 return ERR_PTR(-ENOMEM); 2872 2873 tlv = (struct wmi_tlv *)skb->data; 2874 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD); 2875 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2876 cmd = (void *)tlv->value; 2877 2878 cmd->vdev_id = __cpu_to_le32(vdev_id); 2879 cmd->is_add = __cpu_to_le32(enable); 2880 cmd->event_bitmap = __cpu_to_le32(1 << event); 2881 2882 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", 2883 wow_wakeup_event(event), enable, vdev_id); 2884 return skb; 2885 } 2886 2887 static struct sk_buff * 2888 ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar) 2889 { 2890 struct wmi_tlv_wow_host_wakeup_ind *cmd; 2891 struct wmi_tlv *tlv; 2892 struct sk_buff *skb; 2893 size_t len; 2894 2895 len = sizeof(*tlv) + sizeof(*cmd); 2896 skb = ath10k_wmi_alloc_skb(ar, len); 2897 if (!skb) 2898 return ERR_PTR(-ENOMEM); 2899 2900 tlv = (struct wmi_tlv *)skb->data; 2901 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD); 2902 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2903 cmd = (void *)tlv->value; 2904 2905 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); 2906 return skb; 2907 } 2908 2909 static struct sk_buff * 2910 ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id, 2911 u32 pattern_id, const u8 *pattern, 2912 const u8 *bitmask, int pattern_len, 2913 int pattern_offset) 2914 { 2915 struct wmi_tlv_wow_add_pattern_cmd *cmd; 2916 struct wmi_tlv_wow_bitmap_pattern *bitmap; 2917 struct wmi_tlv *tlv; 2918 struct sk_buff *skb; 2919 void *ptr; 2920 size_t len; 2921 2922 len = sizeof(*tlv) + sizeof(*cmd) + 2923 sizeof(*tlv) + /* array struct */ 2924 sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */ 2925 sizeof(*tlv) + /* empty ipv4 sync */ 2926 sizeof(*tlv) + /* empty ipv6 sync */ 2927 sizeof(*tlv) + /* empty magic */ 2928 sizeof(*tlv) + /* empty info timeout */ 2929 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ 2930 2931 skb = ath10k_wmi_alloc_skb(ar, len); 2932 if (!skb) 2933 return ERR_PTR(-ENOMEM); 2934 2935 /* cmd */ 2936 ptr = (void *)skb->data; 2937 tlv = ptr; 2938 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD); 2939 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2940 cmd = (void *)tlv->value; 2941 2942 cmd->vdev_id = __cpu_to_le32(vdev_id); 2943 cmd->pattern_id = __cpu_to_le32(pattern_id); 2944 cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); 2945 2946 ptr += sizeof(*tlv); 2947 ptr += sizeof(*cmd); 2948 2949 /* bitmap */ 2950 tlv = ptr; 2951 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2952 tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap)); 2953 2954 ptr += sizeof(*tlv); 2955 2956 tlv = ptr; 2957 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T); 2958 tlv->len = __cpu_to_le16(sizeof(*bitmap)); 2959 bitmap = (void *)tlv->value; 2960 2961 memcpy(bitmap->patternbuf, pattern, pattern_len); 2962 memcpy(bitmap->bitmaskbuf, bitmask, pattern_len); 2963 bitmap->pattern_offset = __cpu_to_le32(pattern_offset); 2964 bitmap->pattern_len = __cpu_to_le32(pattern_len); 2965 bitmap->bitmask_len = __cpu_to_le32(pattern_len); 2966 bitmap->pattern_id = __cpu_to_le32(pattern_id); 2967 2968 ptr += sizeof(*tlv); 2969 ptr += sizeof(*bitmap); 2970 2971 /* ipv4 sync */ 2972 tlv = ptr; 2973 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2974 tlv->len = __cpu_to_le16(0); 2975 2976 ptr += sizeof(*tlv); 2977 2978 /* ipv6 sync */ 2979 tlv = ptr; 2980 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2981 tlv->len = __cpu_to_le16(0); 2982 2983 ptr += sizeof(*tlv); 2984 2985 /* magic */ 2986 tlv = ptr; 2987 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2988 tlv->len = __cpu_to_le16(0); 2989 2990 ptr += sizeof(*tlv); 2991 2992 /* pattern info timeout */ 2993 tlv = ptr; 2994 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 2995 tlv->len = __cpu_to_le16(0); 2996 2997 ptr += sizeof(*tlv); 2998 2999 /* ratelimit interval */ 3000 tlv = ptr; 3001 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 3002 tlv->len = __cpu_to_le16(sizeof(u32)); 3003 3004 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n", 3005 vdev_id, pattern_id, pattern_offset); 3006 return skb; 3007 } 3008 3009 static struct sk_buff * 3010 ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id, 3011 u32 pattern_id) 3012 { 3013 struct wmi_tlv_wow_del_pattern_cmd *cmd; 3014 struct wmi_tlv *tlv; 3015 struct sk_buff *skb; 3016 size_t len; 3017 3018 len = sizeof(*tlv) + sizeof(*cmd); 3019 skb = ath10k_wmi_alloc_skb(ar, len); 3020 if (!skb) 3021 return ERR_PTR(-ENOMEM); 3022 3023 tlv = (struct wmi_tlv *)skb->data; 3024 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD); 3025 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3026 cmd = (void *)tlv->value; 3027 3028 cmd->vdev_id = __cpu_to_le32(vdev_id); 3029 cmd->pattern_id = __cpu_to_le32(pattern_id); 3030 cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); 3031 3032 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", 3033 vdev_id, pattern_id); 3034 return skb; 3035 } 3036 3037 static struct sk_buff * 3038 ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) 3039 { 3040 struct wmi_tlv_adaptive_qcs *cmd; 3041 struct wmi_tlv *tlv; 3042 struct sk_buff *skb; 3043 void *ptr; 3044 size_t len; 3045 3046 len = sizeof(*tlv) + sizeof(*cmd); 3047 skb = ath10k_wmi_alloc_skb(ar, len); 3048 if (!skb) 3049 return ERR_PTR(-ENOMEM); 3050 3051 ptr = (void *)skb->data; 3052 tlv = ptr; 3053 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD); 3054 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3055 cmd = (void *)tlv->value; 3056 cmd->enable = __cpu_to_le32(enable ? 1 : 0); 3057 3058 ptr += sizeof(*tlv); 3059 ptr += sizeof(*cmd); 3060 3061 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable); 3062 return skb; 3063 } 3064 3065 /****************/ 3066 /* TLV mappings */ 3067 /****************/ 3068 3069 static struct wmi_cmd_map wmi_tlv_cmd_map = { 3070 .init_cmdid = WMI_TLV_INIT_CMDID, 3071 .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID, 3072 .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID, 3073 .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID, 3074 .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID, 3075 .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID, 3076 .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID, 3077 .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID, 3078 .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID, 3079 .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID, 3080 .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID, 3081 .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID, 3082 .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID, 3083 .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID, 3084 .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID, 3085 .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID, 3086 .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID, 3087 .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID, 3088 .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID, 3089 .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID, 3090 .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID, 3091 .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID, 3092 .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID, 3093 .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID, 3094 .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID, 3095 .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID, 3096 .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID, 3097 .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID, 3098 .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID, 3099 .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID, 3100 .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID, 3101 .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID, 3102 .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID, 3103 .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID, 3104 .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID, 3105 .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID, 3106 .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID, 3107 .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID, 3108 .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID, 3109 .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID, 3110 .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID, 3111 .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID, 3112 .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID, 3113 .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID, 3114 .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID, 3115 .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID, 3116 .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID, 3117 .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID, 3118 .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID, 3119 .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID, 3120 .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID, 3121 .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID, 3122 .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID, 3123 .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE, 3124 .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD, 3125 .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD, 3126 .roam_scan_rssi_change_threshold = 3127 WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 3128 .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE, 3129 .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE, 3130 .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE, 3131 .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD, 3132 .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO, 3133 .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY, 3134 .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE, 3135 .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE, 3136 .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID, 3137 .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID, 3138 .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID, 3139 .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID, 3140 .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID, 3141 .wlan_profile_set_hist_intvl_cmdid = 3142 WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 3143 .wlan_profile_get_profile_data_cmdid = 3144 WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 3145 .wlan_profile_enable_profile_id_cmdid = 3146 WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 3147 .wlan_profile_list_profile_id_cmdid = 3148 WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 3149 .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID, 3150 .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID, 3151 .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID, 3152 .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID, 3153 .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID, 3154 .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID, 3155 .wow_enable_disable_wake_event_cmdid = 3156 WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 3157 .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID, 3158 .wow_hostwakeup_from_sleep_cmdid = 3159 WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 3160 .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID, 3161 .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID, 3162 .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID, 3163 .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID, 3164 .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID, 3165 .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID, 3166 .network_list_offload_config_cmdid = 3167 WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, 3168 .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID, 3169 .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID, 3170 .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID, 3171 .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID, 3172 .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID, 3173 .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID, 3174 .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID, 3175 .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID, 3176 .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID, 3177 .echo_cmdid = WMI_TLV_ECHO_CMDID, 3178 .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID, 3179 .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID, 3180 .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID, 3181 .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID, 3182 .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID, 3183 .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID, 3184 .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID, 3185 .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID, 3186 .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, 3187 .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED, 3188 .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID, 3189 .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID, 3190 .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID, 3191 .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID, 3192 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, 3193 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, 3194 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, 3195 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, 3196 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, 3197 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, 3198 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, 3199 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, 3200 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, 3201 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 3202 .oem_req_cmdid = WMI_CMD_UNSUPPORTED, 3203 .nan_cmdid = WMI_CMD_UNSUPPORTED, 3204 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, 3205 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, 3206 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, 3207 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 3208 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 3209 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, 3210 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, 3211 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, 3212 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, 3213 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, 3214 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, 3215 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, 3216 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, 3217 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, 3218 .fwtest_cmdid = WMI_CMD_UNSUPPORTED, 3219 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 3220 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 3221 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, 3222 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, 3223 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, 3224 }; 3225 3226 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { 3227 .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK, 3228 .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK, 3229 .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G, 3230 .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G, 3231 .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE, 3232 .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE, 3233 .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE, 3234 .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 3235 .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE, 3236 .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW, 3237 .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 3238 .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH, 3239 .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH, 3240 .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING, 3241 .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE, 3242 .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE, 3243 .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK, 3244 .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI, 3245 .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO, 3246 .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 3247 .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 3248 .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE, 3249 .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 3250 .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE, 3251 .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE, 3252 .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH, 3253 .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 3254 .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 3255 .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, 3256 .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 3257 .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 3258 .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 3259 .bcnflt_stats_update_period = 3260 WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 3261 .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS, 3262 .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE, 3263 .dcs = WMI_TLV_PDEV_PARAM_DCS, 3264 .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE, 3265 .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD, 3266 .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD, 3267 .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL, 3268 .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL, 3269 .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN, 3270 .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA, 3271 .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG, 3272 .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP, 3273 .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED, 3274 .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR, 3275 .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE, 3276 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, 3277 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, 3278 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, 3279 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, 3280 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, 3281 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, 3282 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, 3283 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, 3284 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, 3285 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, 3286 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, 3287 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 3288 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 3289 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, 3290 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, 3291 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, 3292 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 3293 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 3294 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 3295 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 3296 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 3297 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 3298 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, 3299 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, 3300 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, 3301 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 3302 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, 3303 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, 3304 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, 3305 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, 3306 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, 3307 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, 3308 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, 3309 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, 3310 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, 3311 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, 3312 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, 3313 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 3314 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, 3315 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, 3316 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, 3317 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, 3318 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, 3319 }; 3320 3321 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { 3322 .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD, 3323 .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 3324 .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL, 3325 .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL, 3326 .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE, 3327 .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE, 3328 .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME, 3329 .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE, 3330 .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME, 3331 .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD, 3332 .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME, 3333 .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL, 3334 .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD, 3335 .wmi_vdev_oc_scheduler_air_time_limit = 3336 WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 3337 .wds = WMI_TLV_VDEV_PARAM_WDS, 3338 .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW, 3339 .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX, 3340 .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT, 3341 .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT, 3342 .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM, 3343 .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH, 3344 .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET, 3345 .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION, 3346 .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT, 3347 .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE, 3348 .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE, 3349 .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE, 3350 .sgi = WMI_TLV_VDEV_PARAM_SGI, 3351 .ldpc = WMI_TLV_VDEV_PARAM_LDPC, 3352 .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC, 3353 .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC, 3354 .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD, 3355 .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID, 3356 .nss = WMI_TLV_VDEV_PARAM_NSS, 3357 .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE, 3358 .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE, 3359 .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE, 3360 .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE, 3361 .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 3362 .ap_keepalive_min_idle_inactive_time_secs = 3363 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 3364 .ap_keepalive_max_idle_inactive_time_secs = 3365 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 3366 .ap_keepalive_max_unresponsive_time_secs = 3367 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 3368 .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS, 3369 .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED, 3370 .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS, 3371 .txbf = WMI_TLV_VDEV_PARAM_TXBF, 3372 .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE, 3373 .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY, 3374 .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE, 3375 .ap_detect_out_of_sync_sleeping_sta_time_secs = 3376 WMI_TLV_VDEV_PARAM_UNSUPPORTED, 3377 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, 3378 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, 3379 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, 3380 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, 3381 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, 3382 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 3383 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, 3384 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, 3385 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, 3386 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, 3387 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, 3388 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, 3389 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, 3390 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, 3391 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, 3392 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 3393 }; 3394 3395 static const struct wmi_ops wmi_tlv_ops = { 3396 .rx = ath10k_wmi_tlv_op_rx, 3397 .map_svc = wmi_tlv_svc_map, 3398 3399 .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, 3400 .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, 3401 .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev, 3402 .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev, 3403 .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev, 3404 .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev, 3405 .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev, 3406 .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev, 3407 .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev, 3408 .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, 3409 .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, 3410 .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, 3411 3412 .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend, 3413 .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume, 3414 .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd, 3415 .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param, 3416 .gen_init = ath10k_wmi_tlv_op_gen_init, 3417 .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan, 3418 .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan, 3419 .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create, 3420 .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete, 3421 .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start, 3422 .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop, 3423 .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up, 3424 .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down, 3425 .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param, 3426 .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key, 3427 .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf, 3428 .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create, 3429 .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete, 3430 .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush, 3431 .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param, 3432 .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc, 3433 .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode, 3434 .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps, 3435 .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps, 3436 .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list, 3437 .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma, 3438 .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm, 3439 .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, 3440 .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, 3441 /* .gen_mgmt_tx = not implemented; HTT is used */ 3442 .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, 3443 .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, 3444 .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, 3445 /* .gen_pdev_set_quiet_mode not implemented */ 3446 /* .gen_pdev_get_temperature not implemented */ 3447 /* .gen_addba_clear_resp not implemented */ 3448 /* .gen_addba_send not implemented */ 3449 /* .gen_addba_set_resp not implemented */ 3450 /* .gen_delba_send not implemented */ 3451 .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl, 3452 .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl, 3453 .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie, 3454 .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd, 3455 .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive, 3456 .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable, 3457 .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event, 3458 .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, 3459 .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern, 3460 .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern, 3461 .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, 3462 .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, 3463 .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, 3464 }; 3465 3466 /************/ 3467 /* TLV init */ 3468 /************/ 3469 3470 void ath10k_wmi_tlv_attach(struct ath10k *ar) 3471 { 3472 ar->wmi.cmd = &wmi_tlv_cmd_map; 3473 ar->wmi.vdev_param = &wmi_tlv_vdev_param_map; 3474 ar->wmi.pdev_param = &wmi_tlv_pdev_param_map; 3475 ar->wmi.ops = &wmi_tlv_ops; 3476 } 3477